From bbf275855c93b829636787143de7e4474a6e30bb Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sat, 30 Nov 2024 20:08:53 +0100 Subject: [PATCH 001/105] dump --- dhfs-parent/objects/pom.xml | 99 +++++ .../com/usatiuk/dhfs/objects/DataLocker.java | 40 ++ .../java/com/usatiuk/dhfs/objects/JData.java | 11 + .../com/usatiuk/dhfs/objects/JObject.java | 11 + .../dhfs/objects/JObjectInterface.java | 9 + .../com/usatiuk/dhfs/objects/JObjectKey.java | 4 + .../usatiuk/dhfs/objects/JObjectManager.java | 90 +++++ .../com/usatiuk/dhfs/objects/LockWrapper.java | 60 +++ .../persistence/ObjectPersistentStore.java | 33 ++ .../SerializingFileObjectPersistentStore.java | 351 ++++++++++++++++++ .../dhfs/objects/persistence/TxManifest.java | 13 + .../com/usatiuk/dhfs/objects/ObjectsTest.java | 97 +++++ .../persistence/FakeObjectStorage.java | 80 ++++ .../usatiuk/dhfs/objects/test/objs/Kid.java | 18 + .../dhfs/objects/test/objs/KidData.java | 19 + .../dhfs/objects/test/objs/KidDataImpl.java | 28 ++ .../dhfs/objects/test/objs/Parent.java | 25 ++ .../dhfs/objects/test/objs/ParentData.java | 14 + .../objects/test/objs/ParentDataImpl.java | 41 ++ .../dhfs/objects/test/objs/TestData.java | 34 ++ dhfs-parent/pom.xml | 7 + dhfs-parent/server/pom.xml | 11 +- .../files/service/DhfsFileServiceImpl.java | 2 +- .../jkleppmanntree/JKleppmannTreeManager.java | 2 +- .../dhfs/objects/jrepository/JObject.java | 2 +- .../objects/jrepository/JObjectManager.java | 2 +- .../jrepository/JObjectManagerImpl.java | 2 +- .../jrepository/JObjectRefProcessor.java | 2 +- .../objects/jrepository/JObjectTxManager.java | 2 +- .../dhfs/objects/jrepository/TxWriteback.java | 2 +- .../objects/jrepository/TxWritebackImpl.java | 2 +- .../repository/PersistentPeerDataService.java | 2 +- .../repository/RemoteObjectServiceServer.java | 2 +- .../dhfs/objects/repository/SyncHandler.java | 2 +- .../autosync/AutoSyncProcessor.java | 2 +- .../DeferredInvalidationQueueService.java | 2 +- .../InvalidationQueueService.java | 2 +- .../repository/opsupport/OpSender.java | 2 +- .../FileObjectPersistentStore.java | 6 +- dhfs-parent/utils/pom.xml | 59 +++ .../com/usatiuk/dhfs}/utils/ByteUtils.java | 2 +- .../utils/HashSetDelayedBlockingQueue.java | 7 +- .../dhfs/utils}/SerializationHelper.java | 6 +- .../StatusRuntimeExceptionNoStacktrace.java | 2 +- .../java/com/usatiuk/dhfs}/utils/VoidFn.java | 2 +- .../HashSetDelayedBlockingQueueTest.java | 2 +- 46 files changed, 1183 insertions(+), 30 deletions(-) create mode 100644 dhfs-parent/objects/pom.xml create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/DataLocker.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JData.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObject.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectInterface.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/LockWrapper.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingFileObjectPersistentStore.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/FakeObjectStorage.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Kid.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/KidData.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/KidDataImpl.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Parent.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/ParentData.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/ParentDataImpl.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/TestData.java create mode 100644 dhfs-parent/utils/pom.xml rename dhfs-parent/{server/src/main/java/com/usatiuk => utils/src/main/java/com/usatiuk/dhfs}/utils/ByteUtils.java (93%) rename dhfs-parent/{server/src/main/java/com/usatiuk => utils/src/main/java/com/usatiuk/dhfs}/utils/HashSetDelayedBlockingQueue.java (98%) rename dhfs-parent/{server/src/main/java/com/usatiuk/dhfs => utils/src/main/java/com/usatiuk/dhfs/utils}/SerializationHelper.java (90%) rename dhfs-parent/{server/src/main/java/com/usatiuk => utils/src/main/java/com/usatiuk/dhfs}/utils/StatusRuntimeExceptionNoStacktrace.java (94%) rename dhfs-parent/{server/src/main/java/com/usatiuk => utils/src/main/java/com/usatiuk/dhfs}/utils/VoidFn.java (68%) rename dhfs-parent/{server/src/test/java/com/usatiuk => utils/src/test/java/com/usatiuk/dhfs}/utils/HashSetDelayedBlockingQueueTest.java (99%) diff --git a/dhfs-parent/objects/pom.xml b/dhfs-parent/objects/pom.xml new file mode 100644 index 00000000..49970b92 --- /dev/null +++ b/dhfs-parent/objects/pom.xml @@ -0,0 +1,99 @@ + + + 4.0.0 + + com.usatiuk.dhfs + parent + 1.0-SNAPSHOT + + + objects + + + 21 + 21 + UTF-8 + + + + + io.quarkus + quarkus-junit5 + test + + + io.quarkus + quarkus-arc + + + io.quarkus + quarkus-grpc + + + net.openhft + zero-allocation-hashing + + + org.projectlombok + lombok + provided + + + org.junit.jupiter + junit-jupiter-engine + test + + + org.apache.commons + commons-lang3 + + + org.jboss.slf4j + slf4j-jboss-logmanager + test + + + com.usatiuk.dhfs + utils + 1.0-SNAPSHOT + + + com.usatiuk.dhfs + supportlib + 1.0-SNAPSHOT + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + 1C + false + classes + + + + ${quarkus.platform.group-id} + quarkus-maven-plugin + ${quarkus.platform.version} + true + + + quarkus-plugin + + build + generate-code + generate-code-tests + + + + + + + + \ No newline at end of file diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/DataLocker.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/DataLocker.java new file mode 100644 index 00000000..8b45f4ab --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/DataLocker.java @@ -0,0 +1,40 @@ +package com.usatiuk.dhfs.objects; + +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; + +import java.lang.ref.Cleaner; +import java.lang.ref.WeakReference; +import java.util.concurrent.ConcurrentHashMap; + +@ApplicationScoped +public class DataLocker { + private final ConcurrentHashMap>> _locks = new ConcurrentHashMap<>(); + private final static Cleaner CLEANER = Cleaner.create(); + + public LockWrapper get(T data) { + while (true) { + var have = _locks.get(data.getKey()); + if (have != null) { + var ret = have.get(); + if (ret != null) { + if (ret.sameObject(data)) { + return (LockWrapper) ret; + } else { + Log.warn("Removed stale lock for " + data.getKey()); + _locks.remove(data.getKey(), have); + } + } + } + + var ret = new LockWrapper<>(data); + var ref = new WeakReference<>(ret); + + if (_locks.putIfAbsent(data.getKey(), ref) == null) { + CLEANER.register(ret, () -> _locks.remove(data.getKey(), ref)); + return ret; + } + } + } + +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JData.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JData.java new file mode 100644 index 00000000..608ea9f5 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JData.java @@ -0,0 +1,11 @@ +package com.usatiuk.dhfs.objects; + +import java.util.function.Function; + +public interface JData { + JObjectKey getKey(); + + JData bindCopy(); + + Function binder(); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObject.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObject.java new file mode 100644 index 00000000..62f32be6 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObject.java @@ -0,0 +1,11 @@ +package com.usatiuk.dhfs.objects; + +public abstract class JObject { + protected final JObjectInterface _jObjectInterface; + + public JObject(JObjectInterface jObjectInterface) { + _jObjectInterface = jObjectInterface; + } + + public abstract JData getData(); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectInterface.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectInterface.java new file mode 100644 index 00000000..9860727e --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectInterface.java @@ -0,0 +1,9 @@ +package com.usatiuk.dhfs.objects; + +import java.util.Optional; + +public interface JObjectInterface { + Optional getObject(JObjectKey key); + + Optional getObject(JObjectKey key, Class type); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java new file mode 100644 index 00000000..9927eeee --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.objects; + +public record JObjectKey(String name) { +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java new file mode 100644 index 00000000..0a55315f --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -0,0 +1,90 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.ObjectPersistentStore; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +@ApplicationScoped +public class JObjectManager { + @Inject + ObjectPersistentStore objectStorage; + + @Inject + DataLocker dataLocker; + + public class Transaction implements JObjectInterface { + private final Map _objects = new HashMap<>(); + + private JObject dataToObject(JData data) { + return data.binder().apply(this); + } + + @Override + public Optional getObject(JObjectKey key) { + if (_objects.containsKey(key)) { + return Optional.of(_objects.get(key)); + } + + var data = objectStorage.readObject(key).orElse(null); + if (data == null) { + return Optional.empty(); + } + var ret = dataToObject(data); + _objects.put(key, ret); + return Optional.of(ret); + } + + @Override + public Optional getObject(JObjectKey key, Class type) { + if (_objects.containsKey(key)) { + var got = _objects.get(key); + if (type.isInstance(got)) { + return Optional.of(type.cast(got)); + } else { + throw new IllegalArgumentException("Object type mismatch"); + } + } + + var data = objectStorage.readObject(key).orElse(null); + if (data == null) { + return Optional.empty(); + } + var got = dataToObject(data); + if (type.isInstance(got)) { + _objects.put(key, got); + return Optional.of(type.cast(got)); + } else { + throw new IllegalArgumentException("Object type mismatch"); + } + } + + public void commit() { + _objects.forEach((key, value) -> { + var data = (TestData) value.getData(); + + if (!data.isChanged()) { + return; + } + + if (_objectStorage.get(key) == null) { + _objectStorage.put(data.copy()); + return; + } + + if (_objectStorage.get(key).getVersion() <= data.getVersion()) { + _objectStorage.put(data.copy()); + } else { + throw new IllegalArgumentException("Version mismatch"); + } + }); + } + } + + public Transaction beginTransaction() { + return new Transaction(); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/LockWrapper.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/LockWrapper.java new file mode 100644 index 00000000..4538a6aa --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/LockWrapper.java @@ -0,0 +1,60 @@ +package com.usatiuk.dhfs.objects; + +import java.util.concurrent.locks.ReentrantReadWriteLock; + +public class LockWrapper { + private final JData _data; + private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock(); + + public LockWrapper(T data) { + _data = data; + } + + public boolean sameObject(JData data) { + return _data == data; + } + + interface DataAccessor extends AutoCloseable { + T getData(); + } + + public class ReadLocked implements DataAccessor { + public ReadLocked() { + _lock.readLock().lock(); + } + + @Override + public void close() { + _lock.readLock().unlock(); + } + + @Override + public B getData() { + return (B) _data; + } + } + + public ReadLocked read() { + return new ReadLocked<>(); + } + + public class WriteLocked implements DataAccessor { + public WriteLocked() { + _lock.writeLock().lock(); + } + + @Override + public void close() { + _lock.writeLock().unlock(); + } + + @Override + public B getData() { + return (B) _data; + } + } + + public WriteLocked write() { + return new WriteLocked<>(); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java new file mode 100644 index 00000000..e03af1a4 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java @@ -0,0 +1,33 @@ +package com.usatiuk.dhfs.objects.persistence; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; + +import javax.annotation.Nonnull; +import java.util.Collection; +import java.util.Optional; + +public interface ObjectPersistentStore { + @Nonnull + Collection findAllObjects(); + + @Nonnull + Optional readObject(JObjectKey name); + + void writeObjectDirect(JObjectKey name, JData object); + + void writeObject(JObjectKey name, JData object); + + + + void commitTx(TxManifest names); + + // Deletes object metadata and data + void deleteObjectDirect(JObjectKey name); + + long getTotalSpace(); + + long getFreeSpace(); + + long getUsableSpace(); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingFileObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingFileObjectPersistentStore.java new file mode 100644 index 00000000..08b9decd --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingFileObjectPersistentStore.java @@ -0,0 +1,351 @@ +package com.usatiuk.dhfs.objects.persistence; + +import com.google.protobuf.UnsafeByteOperations; +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer; +import com.usatiuk.dhfs.utils.ByteUtils; +import com.usatiuk.dhfs.utils.SerializationHelper; +import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; +import io.grpc.Status; +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import net.openhft.hashing.LongHashFunction; +import org.apache.commons.lang3.concurrent.BasicThreadFactory; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import javax.annotation.Nonnull; +import java.io.*; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; +import java.util.*; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +import static java.nio.file.StandardCopyOption.ATOMIC_MOVE; +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; + +// File format: +// 64-bit metadata serialized size +// 64-bit offset of "rest of" metadata (if -1 then file has no data, +// if 0 then file has data and metadata fits into META_BLOCK_SIZE) +// Until META_BLOCK_SIZE - metadata (encoded as ObjectMetadataP) +// data (encoded as JObjectDataP) +// rest of metadata + +@ApplicationScoped +public class SerializingFileObjectPersistentStore implements ObjectPersistentStore { + private final Path _root; + private final Path _txManifest; + private ExecutorService _flushExecutor; + private RandomAccessFile _txFile; + private volatile boolean _ready = false; + + public SerializingFileObjectPersistentStore(@ConfigProperty(name = "dhfs.objects.persistence.files.root") String root) { + this._root = Path.of(root).resolve("objects"); + _txManifest = Path.of(root).resolve("cur-tx-manifest"); + } + + void init(@Observes @Priority(100) StartupEvent event) throws IOException { + if (!_root.toFile().exists()) { + Log.info("Initializing with root " + _root); + _root.toFile().mkdirs(); + for (int i = 0; i < 256; i++) { + _root.resolve(String.valueOf(i)).toFile().mkdirs(); + } + } + if (!Files.exists(_txManifest)) { + Files.createFile(_txManifest); + } + _txFile = new RandomAccessFile(_txManifest.toFile(), "rw"); + { + BasicThreadFactory factory = new BasicThreadFactory.Builder() + .namingPattern("persistent-commit-%d") + .build(); + + _flushExecutor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(), factory); + } + + tryReplay(); + Log.info("Transaction replay done"); + _ready = true; + } + + void shutdown(@Observes @Priority(900) ShutdownEvent event) throws IOException { + _ready = false; + Log.debug("Deleting manifest file"); + _txFile.close(); + Files.delete(_txManifest); + Log.debug("Manifest file deleted"); + } + + private void verifyReady() { + if (!_ready) throw new IllegalStateException("Wrong service order!"); + } + + private void tryReplay() { + var read = readTxManifest(); + if (read != null) + commitTxImpl(read, false); + } + + private Path getObjPath(@Nonnull JObjectKey obj) { + int h = Objects.hash(obj); + int p1 = h & 0b00000000_00000000_11111111_00000000; + return _root.resolve(String.valueOf(p1 >> 8)).resolve(obj.toString()); + } + + private Path getTmpObjPath(@Nonnull JObjectKey obj) { + int h = Objects.hash(obj); + int p1 = h & 0b00000000_00000000_11111111_00000000; + return _root.resolve(String.valueOf(p1 >> 8)).resolve(obj + ".tmp"); + } + + private void findAllObjectsImpl(Collection out, Path path) { + var read = path.toFile().listFiles(); + if (read == null) return; + + for (var s : read) { + if (s.isDirectory()) { + findAllObjectsImpl(out, s.toPath()); + } else { + if (s.getName().endsWith(".tmp")) continue; // FIXME: + out.add(new JObjectKey(s.getName())); // FIXME: + } + } + } + + @Nonnull + @Override + public Collection findAllObjects() { + verifyReady(); + ArrayList out = new ArrayList<>(); + findAllObjectsImpl(out, _root); + return Collections.unmodifiableCollection(out); + } + + @Nonnull + @Override + public Optional readObject(JObjectKey name) { + verifyReady(); + var path = getObjPath(name); + try (var rf = new RandomAccessFile(path.toFile(), "r")) { + ByteBuffer buf = UninitializedByteBuffer.allocateUninitialized(Math.toIntExact(rf.getChannel().size())); + fillBuffer(buf, rf.getChannel()); + buf.flip(); + + var bs = UnsafeByteOperations.unsafeWrap(buf); + // This way, the input will be considered "immutable" which would allow avoiding copies + // when parsing byte arrays + var ch = bs.newCodedInput(); + ch.enableAliasing(true); +// return JObjectDataP.parseFrom(ch); + return null; + } catch (EOFException | FileNotFoundException | NoSuchFileException fx) { + return Optional.empty(); + } catch (IOException e) { + Log.error("Error reading file " + path, e); + throw new StatusRuntimeExceptionNoStacktrace(Status.INTERNAL); + } + } + + private void fillBuffer(ByteBuffer dst, FileChannel src) throws IOException { + int rem = dst.remaining(); + int readTotal = 0; + int readCur = 0; + while (readTotal < rem && (readCur = src.read(dst)) != -1) { + readTotal += readCur; + } + if (rem != readTotal) + throw new EOFException(); + } + + private void writeObjectImpl(Path path, JData data, boolean sync) throws IOException { + try (var fsb = new FileOutputStream(path.toFile(), false)) { +// int dataSize = data.getSerializedSize(); + int dataSize = 0; + +// if (fsb.getChannel().write(metaBb.limit(META_BLOCK_SIZE)) != META_BLOCK_SIZE) +// throw new IOException("Could not write to file"); + + if (sync) { + fsb.flush(); + fsb.getFD().sync(); + } + } + } + + @Override + public void writeObjectDirect(JObjectKey name, JData data) { + verifyReady(); + try { + var path = getObjPath(name); + writeObjectImpl(path, data, false); + } catch (IOException e) { + Log.error("Error writing file " + name, e); + throw new StatusRuntimeExceptionNoStacktrace(Status.INTERNAL); + } + } + + @Override + public void writeObject(JObjectKey name, JData obj) { + verifyReady(); + try { + var tmpPath = getTmpObjPath(name); + writeObjectImpl(tmpPath, obj, true); + } catch (IOException e) { + Log.error("Error writing new file " + name, e); + } + } + + + private TxManifest readTxManifest() { + try { + var channel = _txFile.getChannel(); + + if (channel.size() == 0) + return null; + + channel.position(0); + + var buf = ByteBuffer.allocate(Math.toIntExact(channel.size())); + + fillBuffer(buf, channel); + buf.flip(); + + long checksum = buf.getLong(); + var data = buf.slice(); + var hash = LongHashFunction.xx3().hashBytes(data); + + if (hash != checksum) + throw new StatusRuntimeExceptionNoStacktrace(Status.DATA_LOSS.withDescription("Transaction manifest checksum mismatch!")); + + return SerializationHelper.deserialize(data.array(), data.arrayOffset()); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private void putTxManifest(TxManifest manifest) { + try { + var channel = _txFile.getChannel(); + var data = SerializationHelper.serializeArray(manifest); + channel.truncate(data.length + 8); + channel.position(0); + var hash = LongHashFunction.xx3().hashBytes(data); + if (channel.write(ByteUtils.longToBb(hash)) != 8) + throw new StatusRuntimeExceptionNoStacktrace(Status.INTERNAL); + if (channel.write(ByteBuffer.wrap(data)) != data.length) + throw new StatusRuntimeExceptionNoStacktrace(Status.INTERNAL); + channel.force(true); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public void commitTx(TxManifest manifest) { + verifyReady(); + commitTxImpl(manifest, true); + } + + public void commitTxImpl(TxManifest manifest, boolean failIfNotFound) { + try { + if (manifest.getDeleted().isEmpty() && manifest.getWritten().isEmpty()) { + Log.debug("Empty manifest, skipping"); + return; + } + + putTxManifest(manifest); + + var latch = new CountDownLatch(manifest.getWritten().size() + manifest.getDeleted().size()); + ConcurrentLinkedQueue errors = new ConcurrentLinkedQueue<>(); + + for (var n : manifest.getWritten()) { + _flushExecutor.execute(() -> { + try { + Files.move(getTmpObjPath(n), getObjPath(n), ATOMIC_MOVE, REPLACE_EXISTING); + } catch (Throwable t) { + if (!failIfNotFound && (t instanceof NoSuchFileException)) return; + Log.error("Error writing " + n, t); + errors.add(t); + } finally { + latch.countDown(); + } + }); + } + for (var d : manifest.getDeleted()) { + _flushExecutor.execute(() -> { + try { + deleteImpl(getObjPath(d)); + } catch (Throwable t) { + Log.error("Error deleting " + d, t); + errors.add(t); + } finally { + latch.countDown(); + } + }); + } + + latch.await(); + + if (!errors.isEmpty()) { + throw new RuntimeException("Errors when commiting tx!"); + } + + // No real need to truncate here +// try (var channel = _txFile.getChannel()) { +// channel.truncate(0); +// } +// } catch (IOException e) { +// Log.error("Failed committing transaction to disk: ", e); +// throw new RuntimeException(e); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + private void deleteImpl(Path path) { + try { + Files.delete(path); + } catch (NoSuchFileException ignored) { + } catch (IOException e) { + Log.error("Error deleting file " + path, e); + throw new StatusRuntimeExceptionNoStacktrace(Status.INTERNAL); + } + } + + @Override + public void deleteObjectDirect(JObjectKey name) { + verifyReady(); + deleteImpl(getObjPath(name)); + } + + @Override + public long getTotalSpace() { + verifyReady(); + return _root.toFile().getTotalSpace(); + } + + @Override + public long getFreeSpace() { + verifyReady(); + return _root.toFile().getFreeSpace(); + } + + @Override + public long getUsableSpace() { + verifyReady(); + return _root.toFile().getUsableSpace(); + } + +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java new file mode 100644 index 00000000..3a91f71e --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java @@ -0,0 +1,13 @@ +package com.usatiuk.dhfs.objects.persistence; + +import com.usatiuk.dhfs.objects.JObjectKey; + +import java.io.Serializable; +import java.util.List; + +// FIXME: Serializable +public interface TxManifest extends Serializable { + List getWritten(); + + List getDeleted(); +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java new file mode 100644 index 00000000..6a65b186 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -0,0 +1,97 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.FakeObjectStorage; +import com.usatiuk.dhfs.objects.test.objs.Kid; +import com.usatiuk.dhfs.objects.test.objs.Parent; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +public class ObjectsTest { + private final FakeObjectStorage _storage = new FakeObjectStorage(); + private final JObjectManager _tx = new JObjectManager(_storage); + + @Test + void createObject() { + { + var tx = _tx.beginTransaction(); + var parent = tx.getObject(new JObjectKey("Parent"), Parent.class); + parent.setName("John"); + tx.commit(); + } + + { + var tx2 = _tx.beginTransaction(); + var parent = tx2.getObject(new JObjectKey("Parent")); + Assertions.assertInstanceOf(Parent.class, parent); + Assertions.assertEquals("John", ((Parent) parent).getName()); + } + } + + @Test + void createObjectConflict() { + { + var tx = _tx.beginTransaction(); + var parent = tx.getObject(new JObjectKey("Parent"), Parent.class); + parent.setName("John"); + + var tx2 = _tx.beginTransaction(); + var parent2 = tx2.getObject(new JObjectKey("Parent"), Parent.class); + parent2.setName("John"); + + tx.commit(); + Assertions.assertThrows(Exception.class, tx2::commit); + } + } + + @Test + void editConflict() { + { + var tx = _tx.beginTransaction(); + var parent = tx.getObject(new JObjectKey("Parent"), Parent.class); + parent.setName("John"); + tx.commit(); + } + + { + var tx = _tx.beginTransaction(); + var parent = tx.getObject(new JObjectKey("Parent"), Parent.class); + parent.setName("John2"); + + var tx2 = _tx.beginTransaction(); + var parent2 = tx2.getObject(new JObjectKey("Parent"), Parent.class); + parent2.setName("John3"); + + tx.commit(); + Assertions.assertThrows(Exception.class, tx2::commit); + } + + { + var tx2 = _tx.beginTransaction(); + var parent = tx2.getObject(new JObjectKey("Parent")); + Assertions.assertInstanceOf(Parent.class, parent); + Assertions.assertEquals("John2", ((Parent) parent).getName()); + } + } + + @Test + void nestedCreate() { + { + var tx = _tx.beginTransaction(); + var parent = tx.getObject(new JObjectKey("Parent"), Parent.class); + var kid = tx.getObject(new JObjectKey("Kid"), Kid.class); + parent.setName("John"); + kid.setName("KidName"); + parent.setKidKey(kid.getKey()); + tx.commit(); + } + + { + var tx2 = _tx.beginTransaction(); + var parent = tx2.getObject(new JObjectKey("Parent")); + Assertions.assertInstanceOf(Parent.class, parent); + Assertions.assertEquals("John", ((Parent) parent).getName()); + Assertions.assertEquals("KidName", ((Parent) parent).getKid().getName()); + } + } + +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/FakeObjectStorage.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/FakeObjectStorage.java new file mode 100644 index 00000000..5566f1f5 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/FakeObjectStorage.java @@ -0,0 +1,80 @@ +package com.usatiuk.dhfs.objects.persistence; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.test.objs.TestData; + +import javax.annotation.Nonnull; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +public class FakeObjectStorage implements ObjectPersistentStore { + private final Map _objects = new HashMap<>(); + private final Map _pending = new HashMap<>(); + + @Nonnull + @Override + public Collection findAllObjects() { + synchronized (this) { + return _objects.keySet(); + } + } + + @Nonnull + @Override + public Optional readObject(JObjectKey name) { + synchronized (this) { + return Optional.ofNullable(_objects.get(name)); + } + } + + @Override + public void writeObjectDirect(JObjectKey name, JData object) { + synchronized (this) { + _objects.put(name, (TestData) object); + } + } + + @Override + public void writeObject(JObjectKey name, JData object) { + synchronized (this) { + _pending.put(name, (TestData) object); + } + } + + @Override + public void commitTx(TxManifest names) { + synchronized (this) { + for (JObjectKey key : names.getWritten()) { + _objects.put(key, _pending.get(key)); + } + for (JObjectKey key : names.getDeleted()) { + _objects.remove(key); + } + } + } + + @Override + public void deleteObjectDirect(JObjectKey name) { + synchronized (this) { + _objects.remove(name); + } + } + + @Override + public long getTotalSpace() { + return 0; + } + + @Override + public long getFreeSpace() { + return 0; + } + + @Override + public long getUsableSpace() { + return 0; + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Kid.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Kid.java new file mode 100644 index 00000000..d5fb404d --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Kid.java @@ -0,0 +1,18 @@ +package com.usatiuk.dhfs.objects.test.objs; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObject; +import com.usatiuk.dhfs.objects.JObjectInterface; + +public class Kid extends JObject { + + public Kid(JObjectInterface jObjectInterface, KidData data) { + super(jObjectInterface, data); + } + + @Override + public JData getData() { + return _data; + } + +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/KidData.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/KidData.java new file mode 100644 index 00000000..e519803e --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/KidData.java @@ -0,0 +1,19 @@ +package com.usatiuk.dhfs.objects.test.objs; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObject; +import com.usatiuk.dhfs.objects.JObjectInterface; + +import java.util.function.Function; + +public interface KidData extends JData { + String getName(); + + void setName(String name); + + KidData bindCopy(); + + default Function binder() { + return jo -> new Kid(jo, bindCopy()); + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/KidDataImpl.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/KidDataImpl.java new file mode 100644 index 00000000..48b8baf6 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/KidDataImpl.java @@ -0,0 +1,28 @@ +package com.usatiuk.dhfs.objects.test.objs; + +import com.usatiuk.dhfs.objects.JObjectKey; + +public class KidDataImpl extends TestData implements KidData { + private String _name; + + public KidDataImpl(long version, JObjectKey key, String name) { + super(version, key); + _name = name; + } + + @Override + public String getName() { + return _name; + } + + @Override + public void setName(String name) { + _name = name; + onChanged(); + } + + @Override + public KidDataImpl copy() { + return new KidDataImpl(isChanged() ? getVersion() + 1 : getVersion(), getKey(), _name); + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Parent.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Parent.java new file mode 100644 index 00000000..176c983f --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Parent.java @@ -0,0 +1,25 @@ +package com.usatiuk.dhfs.objects.test.objs; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObject; +import com.usatiuk.dhfs.objects.JObjectInterface; +import lombok.experimental.Delegate; + +public class Parent extends JObject { + @Delegate + private final ParentData _data; + + public Parent(JObjectInterface jObjectInterface, ParentData data) { + super(jObjectInterface); + _data = data; + } + + @Override + public JData getData() { + return _data; + } + + public Kid getKid() { + return _jObjectInterface.getObject(_data.getKidKey(), Kid.class); + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/ParentData.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/ParentData.java new file mode 100644 index 00000000..b3f0e76f --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/ParentData.java @@ -0,0 +1,14 @@ +package com.usatiuk.dhfs.objects.test.objs; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; + +public interface ParentData extends JData { + String getName(); + + void setName(String name); + + JObjectKey getKidKey(); + + void setKidKey(JObjectKey kid); +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/ParentDataImpl.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/ParentDataImpl.java new file mode 100644 index 00000000..c77a0020 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/ParentDataImpl.java @@ -0,0 +1,41 @@ +package com.usatiuk.dhfs.objects.test.objs; + +import com.usatiuk.dhfs.objects.JObjectKey; + +public class ParentDataImpl extends TestData implements ParentData { + private String _name; + private JObjectKey _kidKey; + + public ParentDataImpl(long version, JObjectKey key, String name, JObjectKey kidKey) { + super(version, key); + _name = name; + _kidKey = kidKey; + } + + @Override + public String getName() { + return _name; + } + + @Override + public void setName(String name) { + _name = name; + onChanged(); + } + + @Override + public JObjectKey getKidKey() { + return _kidKey; + } + + @Override + public void setKidKey(JObjectKey kid) { + _kidKey = kid; + onChanged(); + } + + @Override + public ParentDataImpl copy() { + return new ParentDataImpl(isChanged() ? getVersion() + 1 : getVersion(), getKey(), _name, _kidKey); + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/TestData.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/TestData.java new file mode 100644 index 00000000..0bf25df1 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/TestData.java @@ -0,0 +1,34 @@ +package com.usatiuk.dhfs.objects.test.objs; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; + +public abstract class TestData implements JData { + private boolean _changed = false; + private final long _version; + private final JObjectKey _key; + + protected TestData(long version, JObjectKey key) { + _version = version; + _key = key; + } + + void onChanged() { + _changed = true; + } + + public boolean isChanged() { + return _changed; + } + + public long getVersion() { + return _version; + } + + @Override + public JObjectKey getKey() { + return _key; + } + + public abstract TestData copy(); +} diff --git a/dhfs-parent/pom.xml b/dhfs-parent/pom.xml index 8597d81c..3140d94e 100644 --- a/dhfs-parent/pom.xml +++ b/dhfs-parent/pom.xml @@ -15,6 +15,8 @@ kleppmanntree supportlib autoprotomap + objects + utils @@ -54,6 +56,11 @@ 1.18.34 provided + + net.openhft + zero-allocation-hashing + 0.16 + org.awaitility awaitility diff --git a/dhfs-parent/server/pom.xml b/dhfs-parent/server/pom.xml index d9e34d5d..bb74c72a 100644 --- a/dhfs-parent/server/pom.xml +++ b/dhfs-parent/server/pom.xml @@ -51,7 +51,6 @@ net.openhft zero-allocation-hashing - 0.16 io.quarkus @@ -147,6 +146,16 @@ supportlib 1.0-SNAPSHOT + + com.usatiuk.dhfs + objects + 1.0-SNAPSHOT + + + com.usatiuk.dhfs + utils + 1.0-SNAPSHOT + diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java index 08bf639f..33b30d85 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java @@ -15,7 +15,7 @@ import com.usatiuk.dhfs.objects.jrepository.JObject; import com.usatiuk.dhfs.objects.jrepository.JObjectManager; import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; -import com.usatiuk.utils.StatusRuntimeExceptionNoStacktrace; +import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.quarkus.logging.Log; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java index 43502d20..2743bf48 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java @@ -9,7 +9,7 @@ import com.usatiuk.dhfs.objects.repository.opsupport.OpObject; import com.usatiuk.dhfs.objects.repository.opsupport.OpObjectRegistry; import com.usatiuk.dhfs.objects.repository.opsupport.OpSender; import com.usatiuk.kleppmanntree.*; -import com.usatiuk.utils.VoidFn; +import com.usatiuk.dhfs.utils.VoidFn; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObject.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObject.java index b21a9ece..1d0a9ca0 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObject.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObject.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.objects.jrepository; -import com.usatiuk.utils.VoidFn; +import com.usatiuk.dhfs.utils.VoidFn; public abstract class JObject { public abstract ObjectMetadata getMeta(); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManager.java index 5c7ac28f..377c9533 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManager.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.objects.jrepository; -import com.usatiuk.utils.VoidFn; +import com.usatiuk.dhfs.utils.VoidFn; import jakarta.annotation.Nullable; import java.util.Collection; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManagerImpl.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManagerImpl.java index 5a24c1e7..5cd3e2ce 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManagerImpl.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManagerImpl.java @@ -7,7 +7,7 @@ import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; import com.usatiuk.dhfs.objects.repository.persistence.ObjectPersistentStore; -import com.usatiuk.utils.VoidFn; +import com.usatiuk.dhfs.utils.VoidFn; import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.quarkus.logging.Log; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectRefProcessor.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectRefProcessor.java index 66914cca..5de25357 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectRefProcessor.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectRefProcessor.java @@ -3,7 +3,7 @@ package com.usatiuk.dhfs.objects.jrepository; import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; import com.usatiuk.dhfs.objects.repository.autosync.AutoSyncProcessor; -import com.usatiuk.utils.HashSetDelayedBlockingQueue; +import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue; import io.quarkus.logging.Log; import io.quarkus.runtime.ShutdownEvent; import io.quarkus.runtime.StartupEvent; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectTxManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectTxManager.java index 3d8282aa..3634f3a2 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectTxManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectTxManager.java @@ -4,7 +4,7 @@ import com.usatiuk.autoprotomap.runtime.ProtoSerializer; import com.usatiuk.dhfs.objects.persistence.JObjectDataP; import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP; import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; -import com.usatiuk.utils.VoidFn; +import com.usatiuk.dhfs.utils.VoidFn; import io.quarkus.logging.Log; import jakarta.annotation.Nullable; import jakarta.enterprise.context.ApplicationScoped; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWriteback.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWriteback.java index 14c6146f..70a4e60e 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWriteback.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWriteback.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.objects.jrepository; -import com.usatiuk.utils.VoidFn; +import com.usatiuk.dhfs.utils.VoidFn; public interface TxWriteback { TxBundle createBundle(); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWritebackImpl.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWritebackImpl.java index db5e7119..ab1b1440 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWritebackImpl.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWritebackImpl.java @@ -3,7 +3,7 @@ package com.usatiuk.dhfs.objects.jrepository; import com.usatiuk.dhfs.objects.persistence.JObjectDataP; import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP; import com.usatiuk.dhfs.objects.repository.persistence.ObjectPersistentStore; -import com.usatiuk.utils.VoidFn; +import com.usatiuk.dhfs.utils.VoidFn; import io.quarkus.logging.Log; import io.quarkus.runtime.ShutdownEvent; import io.quarkus.runtime.StartupEvent; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java index 05f8f66f..0413d8b8 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.objects.repository; -import com.usatiuk.dhfs.SerializationHelper; +import com.usatiuk.dhfs.utils.SerializationHelper; import com.usatiuk.dhfs.ShutdownChecker; import com.usatiuk.dhfs.objects.jrepository.*; import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java index fde49ecb..17b9bb22 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java @@ -10,7 +10,7 @@ import com.usatiuk.dhfs.objects.repository.autosync.AutoSyncProcessor; import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; import com.usatiuk.dhfs.objects.repository.opsupport.Op; import com.usatiuk.dhfs.objects.repository.opsupport.OpObjectRegistry; -import com.usatiuk.utils.StatusRuntimeExceptionNoStacktrace; +import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.quarkus.grpc.GrpcService; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java index cc88f97d..136041a8 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java @@ -8,7 +8,7 @@ import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; import com.usatiuk.dhfs.objects.persistence.JObjectDataP; import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; import com.usatiuk.dhfs.objects.repository.opsupport.OpObjectRegistry; -import com.usatiuk.utils.StatusRuntimeExceptionNoStacktrace; +import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; import io.grpc.Status; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/autosync/AutoSyncProcessor.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/autosync/AutoSyncProcessor.java index fcc5d702..0220c443 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/autosync/AutoSyncProcessor.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/autosync/AutoSyncProcessor.java @@ -3,7 +3,7 @@ package com.usatiuk.dhfs.objects.repository.autosync; import com.usatiuk.dhfs.objects.jrepository.*; import com.usatiuk.dhfs.objects.repository.peersync.PeerDirectory; import com.usatiuk.dhfs.objects.repository.peersync.PersistentPeerInfo; -import com.usatiuk.utils.HashSetDelayedBlockingQueue; +import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue; import io.quarkus.logging.Log; import io.quarkus.runtime.ShutdownEvent; import io.quarkus.runtime.Startup; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java index 575f65dc..e62e4d19 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.objects.repository.invalidation; -import com.usatiuk.dhfs.SerializationHelper; +import com.usatiuk.dhfs.utils.SerializationHelper; import com.usatiuk.dhfs.objects.repository.PeerManager; import io.quarkus.logging.Log; import io.quarkus.runtime.ShutdownEvent; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java index f754457c..b5424c28 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java @@ -6,7 +6,7 @@ import com.usatiuk.dhfs.objects.jrepository.JObjectManager; import com.usatiuk.dhfs.objects.repository.PeerManager; import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; -import com.usatiuk.utils.HashSetDelayedBlockingQueue; +import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue; import io.quarkus.logging.Log; import io.quarkus.runtime.ShutdownEvent; import io.quarkus.runtime.StartupEvent; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpSender.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpSender.java index 9cd68547..3bf3b647 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpSender.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpSender.java @@ -3,7 +3,7 @@ package com.usatiuk.dhfs.objects.repository.opsupport; import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; import com.usatiuk.dhfs.objects.repository.PeerManager; import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; -import com.usatiuk.utils.HashSetDelayedBlockingQueue; +import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue; import io.quarkus.logging.Log; import io.quarkus.runtime.ShutdownEvent; import io.quarkus.runtime.Startup; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/FileObjectPersistentStore.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/FileObjectPersistentStore.java index 3dd12370..493a8323 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/FileObjectPersistentStore.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/FileObjectPersistentStore.java @@ -3,13 +3,13 @@ package com.usatiuk.dhfs.objects.repository.persistence; import com.google.protobuf.ByteString; import com.google.protobuf.CodedOutputStream; import com.google.protobuf.UnsafeByteOperations; -import com.usatiuk.dhfs.SerializationHelper; +import com.usatiuk.dhfs.utils.SerializationHelper; import com.usatiuk.dhfs.objects.persistence.JObjectDataP; import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP; import com.usatiuk.dhfs.supportlib.DhfsSupport; import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer; -import com.usatiuk.utils.ByteUtils; -import com.usatiuk.utils.StatusRuntimeExceptionNoStacktrace; +import com.usatiuk.dhfs.utils.ByteUtils; +import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.quarkus.logging.Log; diff --git a/dhfs-parent/utils/pom.xml b/dhfs-parent/utils/pom.xml new file mode 100644 index 00000000..30db029a --- /dev/null +++ b/dhfs-parent/utils/pom.xml @@ -0,0 +1,59 @@ + + + 4.0.0 + + com.usatiuk.dhfs + parent + 1.0-SNAPSHOT + + + utils + + + 21 + 21 + UTF-8 + + + + + io.quarkus + quarkus-junit5 + test + + + io.quarkus + quarkus-arc + + + io.quarkus + quarkus-grpc + + + org.projectlombok + lombok + provided + + + org.junit.jupiter + junit-jupiter-engine + test + + + org.apache.commons + commons-lang3 + + + org.jboss.slf4j + slf4j-jboss-logmanager + test + + + commons-io + commons-io + + + + \ No newline at end of file diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/utils/ByteUtils.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/ByteUtils.java similarity index 93% rename from dhfs-parent/server/src/main/java/com/usatiuk/utils/ByteUtils.java rename to dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/ByteUtils.java index f7075b40..dba58508 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/utils/ByteUtils.java +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/ByteUtils.java @@ -1,4 +1,4 @@ -package com.usatiuk.utils; +package com.usatiuk.dhfs.utils; import java.nio.ByteBuffer; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/utils/HashSetDelayedBlockingQueue.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/HashSetDelayedBlockingQueue.java similarity index 98% rename from dhfs-parent/server/src/main/java/com/usatiuk/utils/HashSetDelayedBlockingQueue.java rename to dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/HashSetDelayedBlockingQueue.java index 51d23509..628bf4fd 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/utils/HashSetDelayedBlockingQueue.java +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/HashSetDelayedBlockingQueue.java @@ -1,4 +1,4 @@ -package com.usatiuk.utils; +package com.usatiuk.dhfs.utils; import jakarta.annotation.Nullable; import lombok.Getter; @@ -11,9 +11,12 @@ import java.util.function.Function; public class HashSetDelayedBlockingQueue { private final LinkedHashMap> _set = new LinkedHashMap<>(); private final Object _sleepSynchronizer = new Object(); - @Getter private long _delay; + public long getDelay() { + return _delay; + } + private boolean _closed = false; public HashSetDelayedBlockingQueue(long delay) { diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/SerializationHelper.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/SerializationHelper.java similarity index 90% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/SerializationHelper.java rename to dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/SerializationHelper.java index 977b2307..d285a821 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/SerializationHelper.java +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/SerializationHelper.java @@ -1,8 +1,7 @@ -package com.usatiuk.dhfs; +package com.usatiuk.dhfs.utils; import com.google.protobuf.ByteString; import com.google.protobuf.UnsafeByteOperations; -import com.usatiuk.dhfs.files.objects.File; import org.apache.commons.io.input.ClassLoaderObjectInputStream; import org.apache.commons.lang3.SerializationUtils; @@ -12,10 +11,9 @@ import java.io.InputStream; import java.io.Serializable; public abstract class SerializationHelper { - // Taken from SerializationUtils public static T deserialize(final InputStream inputStream) { - try (ClassLoaderObjectInputStream in = new ClassLoaderObjectInputStream(File.class.getClassLoader(), inputStream)) { + try (ClassLoaderObjectInputStream in = new ClassLoaderObjectInputStream(SerializationHelper.class.getClassLoader(), inputStream)) { final T obj = (T) in.readObject(); return obj; } catch (IOException | ClassNotFoundException e) { diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/utils/StatusRuntimeExceptionNoStacktrace.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/StatusRuntimeExceptionNoStacktrace.java similarity index 94% rename from dhfs-parent/server/src/main/java/com/usatiuk/utils/StatusRuntimeExceptionNoStacktrace.java rename to dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/StatusRuntimeExceptionNoStacktrace.java index 963da69d..40897edc 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/utils/StatusRuntimeExceptionNoStacktrace.java +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/StatusRuntimeExceptionNoStacktrace.java @@ -1,4 +1,4 @@ -package com.usatiuk.utils; +package com.usatiuk.dhfs.utils; import io.grpc.Metadata; import io.grpc.Status; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/utils/VoidFn.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/VoidFn.java similarity index 68% rename from dhfs-parent/server/src/main/java/com/usatiuk/utils/VoidFn.java rename to dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/VoidFn.java index 46f4ff0c..e20d6707 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/utils/VoidFn.java +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/VoidFn.java @@ -1,4 +1,4 @@ -package com.usatiuk.utils; +package com.usatiuk.dhfs.utils; @FunctionalInterface public interface VoidFn { diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/utils/HashSetDelayedBlockingQueueTest.java b/dhfs-parent/utils/src/test/java/com/usatiuk/dhfs/utils/HashSetDelayedBlockingQueueTest.java similarity index 99% rename from dhfs-parent/server/src/test/java/com/usatiuk/utils/HashSetDelayedBlockingQueueTest.java rename to dhfs-parent/utils/src/test/java/com/usatiuk/dhfs/utils/HashSetDelayedBlockingQueueTest.java index d68998cf..70f36cc9 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/utils/HashSetDelayedBlockingQueueTest.java +++ b/dhfs-parent/utils/src/test/java/com/usatiuk/dhfs/utils/HashSetDelayedBlockingQueueTest.java @@ -1,4 +1,4 @@ -package com.usatiuk.utils; +package com.usatiuk.dhfs.utils; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; From 094a3e5e76e8f237dcb0261835b45cc0c67f5bd9 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 1 Dec 2024 23:29:13 +0100 Subject: [PATCH 002/105] dump --- dhfs-parent/objects/src/lombok.config | 1 + .../com/usatiuk/dhfs/objects/DataLocker.java | 40 -- .../java/com/usatiuk/dhfs/objects/JData.java | 9 +- .../com/usatiuk/dhfs/objects/JObject.java | 11 - .../dhfs/objects/JObjectInterface.java | 9 - .../usatiuk/dhfs/objects/JObjectManager.java | 227 +++++++--- .../com/usatiuk/dhfs/objects/LockWrapper.java | 60 --- .../usatiuk/dhfs/objects/ObjectAllocator.java | 16 + .../dhfs/objects/ObjectSerializer.java | 9 + .../dhfs/objects/TransactionManager.java | 13 + .../dhfs/objects/TransactionManagerImpl.java | 45 ++ .../com/usatiuk/dhfs/objects/TxBundle.java | 9 + .../com/usatiuk/dhfs/objects/TxWriteback.java | 17 + .../usatiuk/dhfs/objects/TxWritebackImpl.java | 415 ++++++++++++++++++ ...re.java => FileObjectPersistentStore.java} | 44 +- .../persistence/ObjectPersistentStore.java | 14 +- .../objects/transaction/LockingStrategy.java | 7 + .../dhfs/objects/transaction/Transaction.java | 17 + .../transaction/TransactionFactory.java | 5 + .../transaction/TransactionFactoryImpl.java | 96 ++++ .../transaction/TransactionObjectSource.java | 17 + .../transaction/TransactionPrivate.java | 9 + .../dhfs/objects/transaction/TxRecord.java | 73 +++ .../persistence/FakeObjectStorage.java | 7 - .../usatiuk/dhfs/objects/test/objs/Kid.java | 6 +- .../dhfs/objects/test/objs/KidData.java | 4 +- .../dhfs/objects/test/objs/Parent.java | 6 +- .../com/usatiuk/dhfs/utils/DataLocker.java | 53 +++ 28 files changed, 987 insertions(+), 252 deletions(-) create mode 100644 dhfs-parent/objects/src/lombok.config delete mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/DataLocker.java delete mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObject.java delete mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectInterface.java delete mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/LockWrapper.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectAllocator.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectSerializer.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java rename dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/{SerializingFileObjectPersistentStore.java => FileObjectPersistentStore.java} (88%) create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/LockingStrategy.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactory.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java create mode 100644 dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java diff --git a/dhfs-parent/objects/src/lombok.config b/dhfs-parent/objects/src/lombok.config new file mode 100644 index 00000000..f1c474ce --- /dev/null +++ b/dhfs-parent/objects/src/lombok.config @@ -0,0 +1 @@ +lombok.accessors.prefix += _ diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/DataLocker.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/DataLocker.java deleted file mode 100644 index 8b45f4ab..00000000 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/DataLocker.java +++ /dev/null @@ -1,40 +0,0 @@ -package com.usatiuk.dhfs.objects; - -import io.quarkus.logging.Log; -import jakarta.enterprise.context.ApplicationScoped; - -import java.lang.ref.Cleaner; -import java.lang.ref.WeakReference; -import java.util.concurrent.ConcurrentHashMap; - -@ApplicationScoped -public class DataLocker { - private final ConcurrentHashMap>> _locks = new ConcurrentHashMap<>(); - private final static Cleaner CLEANER = Cleaner.create(); - - public LockWrapper get(T data) { - while (true) { - var have = _locks.get(data.getKey()); - if (have != null) { - var ret = have.get(); - if (ret != null) { - if (ret.sameObject(data)) { - return (LockWrapper) ret; - } else { - Log.warn("Removed stale lock for " + data.getKey()); - _locks.remove(data.getKey(), have); - } - } - } - - var ret = new LockWrapper<>(data); - var ref = new WeakReference<>(ret); - - if (_locks.putIfAbsent(data.getKey(), ref) == null) { - CLEANER.register(ret, () -> _locks.remove(data.getKey(), ref)); - return ret; - } - } - } - -} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JData.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JData.java index 608ea9f5..f032b27b 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JData.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JData.java @@ -1,11 +1,8 @@ package com.usatiuk.dhfs.objects; -import java.util.function.Function; - +// The base class for JObject data +// Only one instance of this exists per key, the instance in the manager is canonical +// When committing a transaction, the instance is checked against it, if it isn't the same, a race occurred. public interface JData { JObjectKey getKey(); - - JData bindCopy(); - - Function binder(); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObject.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObject.java deleted file mode 100644 index 62f32be6..00000000 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObject.java +++ /dev/null @@ -1,11 +0,0 @@ -package com.usatiuk.dhfs.objects; - -public abstract class JObject { - protected final JObjectInterface _jObjectInterface; - - public JObject(JObjectInterface jObjectInterface) { - _jObjectInterface = jObjectInterface; - } - - public abstract JData getData(); -} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectInterface.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectInterface.java deleted file mode 100644 index 9860727e..00000000 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectInterface.java +++ /dev/null @@ -1,9 +0,0 @@ -package com.usatiuk.dhfs.objects; - -import java.util.Optional; - -public interface JObjectInterface { - Optional getObject(JObjectKey key); - - Optional getObject(JObjectKey key, Class type); -} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 0a55315f..d5c1aca6 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -1,90 +1,181 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.persistence.ObjectPersistentStore; +import com.usatiuk.dhfs.objects.transaction.TransactionFactory; +import com.usatiuk.dhfs.objects.transaction.TransactionObjectSource; +import com.usatiuk.dhfs.objects.transaction.TransactionPrivate; +import com.usatiuk.dhfs.objects.transaction.TxRecord; +import com.usatiuk.dhfs.utils.DataLocker; +import com.usatiuk.dhfs.utils.VoidFn; +import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; +import org.apache.commons.lang3.tuple.Pair; -import java.util.HashMap; -import java.util.Map; +import java.lang.ref.Cleaner; +import java.lang.ref.WeakReference; +import java.util.ArrayList; +import java.util.LinkedList; import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +// Manages all access to JData objects. +// In particular, it serves as a source of truth for what is committed to the backing storage. +// All data goes through it, it is responsible for transaction atomicity @ApplicationScoped public class JObjectManager { @Inject ObjectPersistentStore objectStorage; - @Inject - DataLocker dataLocker; + ObjectSerializer objectSerializer; + @Inject + ObjectAllocator objectAllocator; + @Inject + TransactionFactory transactionFactory; - public class Transaction implements JObjectInterface { - private final Map _objects = new HashMap<>(); + private final DataLocker _storageReadLocker = new DataLocker(); + private final ConcurrentHashMap> _objects = new ConcurrentHashMap<>(); + private final AtomicLong _txCounter = new AtomicLong(); - private JObject dataToObject(JData data) { - return data.binder().apply(this); - } + private class JDataWrapper extends WeakReference { + private static final Cleaner CLEANER = Cleaner.create(); - @Override - public Optional getObject(JObjectKey key) { - if (_objects.containsKey(key)) { - return Optional.of(_objects.get(key)); - } + final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + long lastWriteTx = 0; - var data = objectStorage.readObject(key).orElse(null); - if (data == null) { - return Optional.empty(); - } - var ret = dataToObject(data); - _objects.put(key, ret); - return Optional.of(ret); - } - - @Override - public Optional getObject(JObjectKey key, Class type) { - if (_objects.containsKey(key)) { - var got = _objects.get(key); - if (type.isInstance(got)) { - return Optional.of(type.cast(got)); - } else { - throw new IllegalArgumentException("Object type mismatch"); - } - } - - var data = objectStorage.readObject(key).orElse(null); - if (data == null) { - return Optional.empty(); - } - var got = dataToObject(data); - if (type.isInstance(got)) { - _objects.put(key, got); - return Optional.of(type.cast(got)); - } else { - throw new IllegalArgumentException("Object type mismatch"); - } - } - - public void commit() { - _objects.forEach((key, value) -> { - var data = (TestData) value.getData(); - - if (!data.isChanged()) { - return; - } - - if (_objectStorage.get(key) == null) { - _objectStorage.put(data.copy()); - return; - } - - if (_objectStorage.get(key).getVersion() <= data.getVersion()) { - _objectStorage.put(data.copy()); - } else { - throw new IllegalArgumentException("Version mismatch"); - } + public JDataWrapper(T referent) { + super(referent); + var key = referent.getKey(); + CLEANER.register(referent, () -> { + _objects.remove(key, this); }); } } - public Transaction beginTransaction() { - return new Transaction(); + private Pair> get(Class type, JObjectKey key) { + while (true) { + { + var got = _objects.get(key); + + if (got != null) { + var ref = got.get(); + if (type.isInstance(ref)) { + return Pair.of(type.cast(ref), (JDataWrapper) got); + } else if (ref == null) { + _objects.remove(key, got); + } else { + throw new IllegalArgumentException("Object type mismatch"); + } + } + } + + //noinspection unused + try (var readLock = _storageReadLocker.lock(key)) { + var read = objectStorage.readObject(key).orElse(null); + if (read == null) throw new IllegalArgumentException("Object not found"); + + var got = objectSerializer.deserialize(read); + + if (type.isInstance(got)) { + var wrapper = new JDataWrapper((T) got); + var old = _objects.putIfAbsent(key, wrapper); + if (old != null) continue; + return Pair.of(type.cast(got), wrapper); + } else if (got == null) { + return null; + } else { + throw new IllegalArgumentException("Object type mismatch"); + } + } + } } -} + + private final TransactionObjectSource _objSource = new TransactionObjectSource() { + @Override + public Optional> get(Class type, JObjectKey key) { + var got = JObjectManager.this.get(type, key); + if (got == null) return Optional.empty(); + return Optional.of(new TransactionObject<>() { + @Override + public T get() { + return got.getLeft(); + } + + @Override + public ReadWriteLock getLock() { + return got.getRight().lock; + } + }); + + } + }; + + public TransactionPrivate createTransaction() { + return transactionFactory.createTransaction(_txCounter.getAndIncrement(), _objSource); + } + + + public void commit(TransactionPrivate tx) { + var toUnlock = new LinkedList(); + var toFlush = new LinkedList>(); + var toLock = new ArrayList>(); + + try { + for (var entry : tx.drain()) { + switch (entry) { + case TxRecord.TxObjectRecordRead read -> { + toUnlock.add(read.original().getLock().readLock()::unlock); + } + case TxRecord.TxObjectRecordCopyLock copy -> { + toUnlock.add(copy.original().getLock().writeLock()::unlock); + if (copy.copy().isModified()) { + toFlush.add(copy); + } + } + case TxRecord.TxObjectRecordCopyNoLock copy -> { + if (copy.copy().isModified()) { + toLock.add(copy); + toFlush.add(copy); + } + } + case TxRecord.TxObjectRecordNew created -> { + toFlush.add(created); + } + default -> throw new IllegalStateException("Unexpected value: " + entry); + } + } + + for (var record : toLock) { + var found = _objects.get(record.original().getKey()); + + if (found.get() != record.original()) { + throw new IllegalStateException("Object changed during transaction"); + } + + found.lock.writeLock().lock(); + toUnlock.add(found.lock.writeLock()::unlock); + } + + for (var record : toFlush) { + var current = _objects.get(record.copy().wrapped().getKey()); + + + assert current == null && record instanceof TxRecord.TxObjectRecordNew || current == record.copy().wrapped(); + + if (current.get() != ) + + } + + } catch (Throwable t) { + Log.error("Error when committing transaction", t); + throw t; + } finally { + for (var unlock : toUnlock) { + unlock.apply(); + } + } + } +} \ No newline at end of file diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/LockWrapper.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/LockWrapper.java deleted file mode 100644 index 4538a6aa..00000000 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/LockWrapper.java +++ /dev/null @@ -1,60 +0,0 @@ -package com.usatiuk.dhfs.objects; - -import java.util.concurrent.locks.ReentrantReadWriteLock; - -public class LockWrapper { - private final JData _data; - private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock(); - - public LockWrapper(T data) { - _data = data; - } - - public boolean sameObject(JData data) { - return _data == data; - } - - interface DataAccessor extends AutoCloseable { - T getData(); - } - - public class ReadLocked implements DataAccessor { - public ReadLocked() { - _lock.readLock().lock(); - } - - @Override - public void close() { - _lock.readLock().unlock(); - } - - @Override - public B getData() { - return (B) _data; - } - } - - public ReadLocked read() { - return new ReadLocked<>(); - } - - public class WriteLocked implements DataAccessor { - public WriteLocked() { - _lock.writeLock().lock(); - } - - @Override - public void close() { - _lock.writeLock().unlock(); - } - - @Override - public B getData() { - return (B) _data; - } - } - - public WriteLocked write() { - return new WriteLocked<>(); - } -} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectAllocator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectAllocator.java new file mode 100644 index 00000000..abc04ca3 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectAllocator.java @@ -0,0 +1,16 @@ +package com.usatiuk.dhfs.objects; + +public interface ObjectAllocator { + T create(Class type, JObjectKey key); + + interface ChangeTrackingJData { + T wrapped(); + + boolean isModified(); + } + + // A copy of data that can be modified without affecting the original, and that can track changes + ChangeTrackingJData copy(T obj); + + T unmodifiable(T obj); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectSerializer.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectSerializer.java new file mode 100644 index 00000000..103d0611 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectSerializer.java @@ -0,0 +1,9 @@ +package com.usatiuk.dhfs.objects; + +import com.google.protobuf.ByteString; + +public interface ObjectSerializer { + ByteString serialize(T obj); + + T deserialize(ByteString data); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java new file mode 100644 index 00000000..cbbf59ae --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java @@ -0,0 +1,13 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.transaction.Transaction; + +public interface TransactionManager { + void begin(); + + void commit(); + + void rollback(); + + Transaction current(); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java new file mode 100644 index 00000000..3c29beba --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java @@ -0,0 +1,45 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.transaction.Transaction; +import com.usatiuk.dhfs.objects.transaction.TransactionPrivate; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +@ApplicationScoped +public class TransactionManagerImpl implements TransactionManager { + @Inject + JObjectManager objectManager; + + private static final ThreadLocal _currentTransaction = new ThreadLocal<>(); + + @Override + public void begin() { + if (_currentTransaction.get() != null) { + throw new IllegalStateException("Transaction already started"); + } + + var tx = objectManager.createTransaction(); + _currentTransaction.set(tx); + } + + @Override + public void commit() { + if(_currentTransaction.get() == null) { + throw new IllegalStateException("No transaction started"); + } + + jobjectManager.commit(_currentTransaction.get()); + } + + @Override + public void rollback() { + + } + + @Override + public Transaction current() { + return _currentTransaction.get(); + } + +} + diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java new file mode 100644 index 00000000..e64617a1 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java @@ -0,0 +1,9 @@ +package com.usatiuk.dhfs.objects; + +public interface TxBundle { + long getId(); + + void commit(JData obj); + + void delete(JData obj); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java new file mode 100644 index 00000000..38ca45f4 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java @@ -0,0 +1,17 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.utils.VoidFn; + +public interface TxWriteback { + TxBundle createBundle(); + + void commitBundle(TxBundle bundle); + + void dropBundle(TxBundle bundle); + + void fence(long bundleId); + + // Executes callback after bundle with bundleId id has been persisted + // if it was already, runs callback on the caller thread + void asyncFence(long bundleId, VoidFn callback); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java new file mode 100644 index 00000000..28ecf30e --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java @@ -0,0 +1,415 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.ObjectPersistentStore; +import com.usatiuk.dhfs.utils.VoidFn; +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import lombok.Getter; +import org.apache.commons.lang3.concurrent.BasicThreadFactory; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.util.*; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +@ApplicationScoped +public class TxWritebackImpl implements TxWriteback { + private final LinkedList _pendingBundles = new LinkedList<>(); + private final LinkedHashMap _notFlushedBundles = new LinkedHashMap<>(); + + private final Object _flushWaitSynchronizer = new Object(); + private final AtomicLong _lastWrittenTx = new AtomicLong(-1); + private final AtomicLong _counter = new AtomicLong(); + private final AtomicLong _waitedTotal = new AtomicLong(0); + @Inject + ObjectPersistentStore objectPersistentStore; + @ConfigProperty(name = "dhfs.objects.writeback.limit") + long sizeLimit; + private long currentSize = 0; + private ExecutorService _writebackExecutor; + private ExecutorService _commitExecutor; + private ExecutorService _statusExecutor; + private volatile boolean _ready = false; + + void init(@Observes @Priority(110) StartupEvent event) { + { + BasicThreadFactory factory = new BasicThreadFactory.Builder() + .namingPattern("tx-writeback-%d") + .build(); + + _writebackExecutor = Executors.newSingleThreadExecutor(factory); + _writebackExecutor.submit(this::writeback); + } + + { + BasicThreadFactory factory = new BasicThreadFactory.Builder() + .namingPattern("writeback-commit-%d") + .build(); + + _commitExecutor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(), factory); + } + _statusExecutor = Executors.newSingleThreadExecutor(); + _statusExecutor.submit(() -> { + try { + while (true) { + Thread.sleep(1000); + if (currentSize > 0) + Log.info("Tx commit status: size=" + + currentSize / 1024 / 1024 + "MB"); + } + } catch (InterruptedException ignored) { + } + }); + _ready = true; + } + + void shutdown(@Observes @Priority(890) ShutdownEvent event) throws InterruptedException { + Log.info("Waiting for all transactions to drain"); + + synchronized (_flushWaitSynchronizer) { + _ready = false; + while (currentSize > 0) { + _flushWaitSynchronizer.wait(); + } + } + + _writebackExecutor.shutdownNow(); + Log.info("Total tx bundle wait time: " + _waitedTotal.get() + "ms"); + } + + private void verifyReady() { + if (!_ready) throw new IllegalStateException("Not doing transactions while shutting down!"); + } + + private void writeback() { + while (!Thread.interrupted()) { + try { + TxBundle bundle = new TxBundle(0); + synchronized (_pendingBundles) { + while (_pendingBundles.isEmpty() || !_pendingBundles.peek()._ready) + _pendingBundles.wait(); + + long diff = 0; + while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { + var toCompress = _pendingBundles.poll(); + diff -= toCompress.calculateTotalSize(); + bundle.compress(toCompress); + } + diff += bundle.calculateTotalSize(); + synchronized (_flushWaitSynchronizer) { + currentSize += diff; + } + } + + var latch = new CountDownLatch(bundle._committed.size() + bundle._meta.size()); + ConcurrentLinkedQueue errors = new ConcurrentLinkedQueue<>(); + + for (var c : bundle._committed.values()) { + _commitExecutor.execute(() -> { + try { + Log.trace("Writing new " + c.newMeta.getName()); + objectPersistentStore.writeNewObject(c.newMeta.getName(), c.newMeta, c.newData); + } catch (Throwable t) { + Log.error("Error writing " + c.newMeta.getName(), t); + errors.add(t); + } finally { + latch.countDown(); + } + }); + } + for (var c : bundle._meta.values()) { + _commitExecutor.execute(() -> { + try { + Log.trace("Writing (meta) " + c.newMeta.getName()); + objectPersistentStore.writeNewObjectMeta(c.newMeta.getName(), c.newMeta); + } catch (Throwable t) { + Log.error("Error writing " + c.newMeta.getName(), t); + errors.add(t); + } finally { + latch.countDown(); + } + }); + } + if (Log.isDebugEnabled()) + for (var d : bundle._deleted.keySet()) + Log.debug("Deleting from persistent storage " + d.getMeta().getName()); // FIXME: For tests + + latch.await(); + if (!errors.isEmpty()) { + throw new RuntimeException("Errors in writeback!"); + } + objectPersistentStore.commitTx( + new TxManifest( + Stream.concat(bundle._committed.keySet().stream().map(t -> t.getMeta().getName()), + bundle._meta.keySet().stream().map(t -> t.getMeta().getName())).collect(Collectors.toCollection(ArrayList::new)), + bundle._deleted.keySet().stream().map(t -> t.getMeta().getName()).collect(Collectors.toCollection(ArrayList::new)) + )); + Log.trace("Bundle " + bundle.getId() + " committed"); + + + List> callbacks = new ArrayList<>(); + synchronized (_notFlushedBundles) { + _lastWrittenTx.set(bundle.getId()); + while (!_notFlushedBundles.isEmpty() && _notFlushedBundles.firstEntry().getKey() <= bundle.getId()) { + callbacks.add(_notFlushedBundles.pollFirstEntry().getValue().setCommitted()); + } + } + callbacks.forEach(l -> l.forEach(VoidFn::apply)); + + synchronized (_flushWaitSynchronizer) { + currentSize -= ((TxBundle) bundle).calculateTotalSize(); + // FIXME: + if (currentSize <= sizeLimit || !_ready) + _flushWaitSynchronizer.notifyAll(); + } + } catch (InterruptedException ignored) { + } catch (Exception e) { + Log.error("Uncaught exception in writeback", e); + } catch (Throwable o) { + Log.error("Uncaught THROWABLE in writeback", o); + } + } + Log.info("Writeback thread exiting"); + } + + @Override + public com.usatiuk.dhfs.objects.jrepository.TxBundle createBundle() { + verifyReady(); + boolean wait = false; + while (true) { + if (wait) { + synchronized (_flushWaitSynchronizer) { + long started = System.currentTimeMillis(); + while (currentSize > sizeLimit) { + try { + _flushWaitSynchronizer.wait(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + long waited = System.currentTimeMillis() - started; + _waitedTotal.addAndGet(waited); + if (Log.isTraceEnabled()) + Log.trace("Thread " + Thread.currentThread().getName() + " waited for tx bundle for " + waited + " ms"); + wait = false; + } + } + synchronized (_pendingBundles) { + synchronized (_flushWaitSynchronizer) { + if (currentSize > sizeLimit) { + if (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { + var target = _pendingBundles.poll(); + + long diff = -target.calculateTotalSize(); + while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { + var toCompress = _pendingBundles.poll(); + diff -= toCompress.calculateTotalSize(); + target.compress(toCompress); + } + diff += target.calculateTotalSize(); + currentSize += diff; + _pendingBundles.addFirst(target); + } + } + + if (currentSize > sizeLimit) { + wait = true; + continue; + } + } + synchronized (_notFlushedBundles) { + var bundle = new TxBundle(_counter.incrementAndGet()); + _pendingBundles.addLast(bundle); + _notFlushedBundles.put(bundle.getId(), bundle); + return bundle; + } + } + } + } + + @Override + public void commitBundle(com.usatiuk.dhfs.objects.TxBundle bundle) { + verifyReady(); + synchronized (_pendingBundles) { + ((TxBundle) bundle).setReady(); + if (_pendingBundles.peek() == bundle) + _pendingBundles.notify(); + synchronized (_flushWaitSynchronizer) { + currentSize += ((TxBundle) bundle).calculateTotalSize(); + } + } + } + + @Override + public void dropBundle(com.usatiuk.dhfs.objects.TxBundle bundle) { + verifyReady(); + synchronized (_pendingBundles) { + Log.warn("Dropped bundle: " + bundle); + _pendingBundles.remove((TxBundle) bundle); + synchronized (_flushWaitSynchronizer) { + currentSize -= ((TxBundle) bundle).calculateTotalSize(); + } + } + } + + @Override + public void fence(long bundleId) { + var latch = new CountDownLatch(1); + asyncFence(bundleId, latch::countDown); + try { + latch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + @Override + public void asyncFence(long bundleId, VoidFn fn) { + verifyReady(); + if (bundleId < 0) throw new IllegalArgumentException("txId should be >0!"); + if (_lastWrittenTx.get() >= bundleId) { + fn.apply(); + return; + } + synchronized (_notFlushedBundles) { + if (_lastWrittenTx.get() >= bundleId) { + fn.apply(); + return; + } + _notFlushedBundles.get(bundleId).addCallback(fn); + } + } + + @Getter + private static class TxManifest implements com.usatiuk.dhfs.objects.repository.persistence.TxManifest { + private final ArrayList _written; + private final ArrayList _deleted; + + private TxManifest(ArrayList written, ArrayList deleted) { + _written = written; + _deleted = deleted; + } + } + + private class TxBundle implements com.usatiuk.dhfs.objects.jrepository.TxBundle { + private final HashMap, CommittedEntry> _committed = new HashMap<>(); + private final HashMap, CommittedMeta> _meta = new HashMap<>(); + private final HashMap, Integer> _deleted = new HashMap<>(); + private final ArrayList _callbacks = new ArrayList<>(); + private long _txId; + @Getter + private volatile boolean _ready = false; + private long _size = -1; + private boolean _wasCommitted = false; + + private TxBundle(long txId) {_txId = txId;} + + @Override + public long getId() { + return _txId; + } + + public void setReady() { + _ready = true; + } + + public void addCallback(VoidFn callback) { + synchronized (_callbacks) { + if (_wasCommitted) throw new IllegalStateException(); + _callbacks.add(callback); + } + } + + public List setCommitted() { + synchronized (_callbacks) { + _wasCommitted = true; + return Collections.unmodifiableList(_callbacks); + } + } + + @Override + public void commit(JObject obj, ObjectMetadataP meta, JObjectDataP data) { + synchronized (_committed) { + _committed.put(obj, new CommittedEntry(meta, data, obj.estimateSize())); + } + } + + @Override + public void commitMetaChange(JObject obj, ObjectMetadataP meta) { + synchronized (_meta) { + _meta.put(obj, new CommittedMeta(meta, obj.estimateSize())); + } + } + + @Override + public void delete(JObject obj) { + synchronized (_deleted) { + _deleted.put(obj, obj.estimateSize()); + } + } + + + public long calculateTotalSize() { + if (_size >= 0) return _size; + long out = 0; + for (var c : _committed.values()) + out += c.size; + for (var c : _meta.values()) + out += c.size; + for (var c : _deleted.entrySet()) + out += c.getValue(); + _size = out; + return _size; + } + + public void compress(TxBundle other) { + if (_txId >= other._txId) + throw new IllegalArgumentException("Compressing an older bundle into newer"); + + _txId = other._txId; + _size = -1; + + for (var d : other._deleted.entrySet()) { + _committed.remove(d.getKey()); + _meta.remove(d.getKey()); + _deleted.put(d.getKey(), d.getValue()); + } + + for (var c : other._committed.entrySet()) { + _committed.put(c.getKey(), c.getValue()); + _meta.remove(c.getKey()); + _deleted.remove(c.getKey()); + } + + for (var m : other._meta.entrySet()) { + var deleted = _deleted.remove(m.getKey()); + if (deleted != null) { + _committed.put(m.getKey(), new CommittedEntry(m.getValue().newMeta, null, m.getKey().estimateSize())); + continue; + } + var committed = _committed.remove(m.getKey()); + if (committed != null) { + _committed.put(m.getKey(), new CommittedEntry(m.getValue().newMeta, committed.newData, m.getKey().estimateSize())); + continue; + } + _meta.put(m.getKey(), m.getValue()); + } + } + + private record CommittedEntry(ObjectMetadataP newMeta, JObjectDataP newData, int size) {} + + private record CommittedMeta(ObjectMetadataP newMeta, int size) {} + + private record Deleted(JObject handle) {} + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingFileObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java similarity index 88% rename from dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingFileObjectPersistentStore.java rename to dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java index 08b9decd..b88b3cac 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingFileObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.persistence; +import com.google.protobuf.ByteString; import com.google.protobuf.UnsafeByteOperations; -import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer; import com.usatiuk.dhfs.utils.ByteUtils; @@ -43,14 +43,14 @@ import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; // rest of metadata @ApplicationScoped -public class SerializingFileObjectPersistentStore implements ObjectPersistentStore { +public class FileObjectPersistentStore implements ObjectPersistentStore { private final Path _root; private final Path _txManifest; private ExecutorService _flushExecutor; private RandomAccessFile _txFile; private volatile boolean _ready = false; - public SerializingFileObjectPersistentStore(@ConfigProperty(name = "dhfs.objects.persistence.files.root") String root) { + public FileObjectPersistentStore(@ConfigProperty(name = "dhfs.objects.persistence.files.root") String root) { this._root = Path.of(root).resolve("objects"); _txManifest = Path.of(root).resolve("cur-tx-manifest"); } @@ -135,7 +135,7 @@ public class SerializingFileObjectPersistentStore implements ObjectPersistentSto @Nonnull @Override - public Optional readObject(JObjectKey name) { + public Optional readObject(JObjectKey name) { verifyReady(); var path = getObjPath(name); try (var rf = new RandomAccessFile(path.toFile(), "r")) { @@ -146,10 +146,9 @@ public class SerializingFileObjectPersistentStore implements ObjectPersistentSto var bs = UnsafeByteOperations.unsafeWrap(buf); // This way, the input will be considered "immutable" which would allow avoiding copies // when parsing byte arrays - var ch = bs.newCodedInput(); - ch.enableAliasing(true); -// return JObjectDataP.parseFrom(ch); - return null; +// var ch = bs.newCodedInput(); +// ch.enableAliasing(true); + return Optional.of(bs); } catch (EOFException | FileNotFoundException | NoSuchFileException fx) { return Optional.empty(); } catch (IOException e) { @@ -169,13 +168,9 @@ public class SerializingFileObjectPersistentStore implements ObjectPersistentSto throw new EOFException(); } - private void writeObjectImpl(Path path, JData data, boolean sync) throws IOException { + private void writeObjectImpl(Path path, ByteString data, boolean sync) throws IOException { try (var fsb = new FileOutputStream(path.toFile(), false)) { -// int dataSize = data.getSerializedSize(); - int dataSize = 0; - -// if (fsb.getChannel().write(metaBb.limit(META_BLOCK_SIZE)) != META_BLOCK_SIZE) -// throw new IOException("Could not write to file"); + data.writeTo(fsb); if (sync) { fsb.flush(); @@ -185,19 +180,7 @@ public class SerializingFileObjectPersistentStore implements ObjectPersistentSto } @Override - public void writeObjectDirect(JObjectKey name, JData data) { - verifyReady(); - try { - var path = getObjPath(name); - writeObjectImpl(path, data, false); - } catch (IOException e) { - Log.error("Error writing file " + name, e); - throw new StatusRuntimeExceptionNoStacktrace(Status.INTERNAL); - } - } - - @Override - public void writeObject(JObjectKey name, JData obj) { + public void writeObject(JObjectKey name, ByteString obj) { verifyReady(); try { var tmpPath = getTmpObjPath(name); @@ -207,7 +190,6 @@ public class SerializingFileObjectPersistentStore implements ObjectPersistentSto } } - private TxManifest readTxManifest() { try { var channel = _txFile.getChannel(); @@ -324,12 +306,6 @@ public class SerializingFileObjectPersistentStore implements ObjectPersistentSto } } - @Override - public void deleteObjectDirect(JObjectKey name) { - verifyReady(); - deleteImpl(getObjPath(name)); - } - @Override public long getTotalSpace() { verifyReady(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java index e03af1a4..79750a69 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java @@ -1,5 +1,6 @@ package com.usatiuk.dhfs.objects.persistence; +import com.google.protobuf.ByteString; import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.JObjectKey; @@ -7,24 +8,19 @@ import javax.annotation.Nonnull; import java.util.Collection; import java.util.Optional; +// Persistent storage of objects +// All changes are written as sequential transactions public interface ObjectPersistentStore { @Nonnull Collection findAllObjects(); @Nonnull - Optional readObject(JObjectKey name); - - void writeObjectDirect(JObjectKey name, JData object); - - void writeObject(JObjectKey name, JData object); - + Optional readObject(JObjectKey name); + void writeObject(JObjectKey name, ByteString object); void commitTx(TxManifest names); - // Deletes object metadata and data - void deleteObjectDirect(JObjectKey name); - long getTotalSpace(); long getFreeSpace(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/LockingStrategy.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/LockingStrategy.java new file mode 100644 index 00000000..f3fb25e4 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/LockingStrategy.java @@ -0,0 +1,7 @@ +package com.usatiuk.dhfs.objects.transaction; + +public enum LockingStrategy { + READ_ONLY, // Read only, no writes allowed, blocks writers + OPTIMISTIC, // Optimistic write, no blocking other possible writers + WRITE // Write lock, blocks all other writers +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java new file mode 100644 index 00000000..f9220a7d --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java @@ -0,0 +1,17 @@ +package com.usatiuk.dhfs.objects.transaction; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; + +import java.util.Optional; + +// The transaction interface actually used by user code to retrieve objects +public interface Transaction { + Optional getObject(Class type, JObjectKey key, LockingStrategy strategy); + + void putObject(JData obj); + + default Optional getObject(Class type, JObjectKey key) { + return getObject(type, key, LockingStrategy.READ_ONLY); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactory.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactory.java new file mode 100644 index 00000000..eea5cfc5 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactory.java @@ -0,0 +1,5 @@ +package com.usatiuk.dhfs.objects.transaction; + +public interface TransactionFactory { + TransactionPrivate createTransaction(long id, TransactionObjectSource source); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java new file mode 100644 index 00000000..44367977 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -0,0 +1,96 @@ +package com.usatiuk.dhfs.objects.transaction; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.ObjectAllocator; +import jakarta.inject.Inject; +import lombok.AccessLevel; +import lombok.Getter; + +import java.util.*; + +public class TransactionFactoryImpl implements TransactionFactory { + @Inject + ObjectAllocator objectAllocator; + + private class TransactionImpl implements TransactionPrivate { + @Getter(AccessLevel.PUBLIC) + private final long _id; + private final TransactionObjectSource _source; + + private final Map> _objects = new HashMap<>(); + + private TransactionImpl(long id, TransactionObjectSource source) { + _id = id; + _source = source; + } + + @Override + public Optional getObject(Class type, JObjectKey key, LockingStrategy strategy) { + var got = _objects.get(key); + if (got != null) { + var compatible = got.getIfStrategyCompatible(key, strategy); + if (compatible == null) { + throw new IllegalArgumentException("Locking strategy mismatch"); + } + if (!type.isInstance(compatible)) { + throw new IllegalArgumentException("Object type mismatch"); + } + return Optional.of(type.cast(compatible)); + } + + var read = _source.get(type, key).orElse(null); + + if (read == null) { + return Optional.empty(); + } + + switch (strategy) { + case READ_ONLY: { + read.getLock().readLock().lock(); + var view = objectAllocator.unmodifiable(read.get()); + _objects.put(key, new TxRecord.TxObjectRecordRead<>(read, view)); + return Optional.of(view); + } + case WRITE: + case OPTIMISTIC: { + var copy = objectAllocator.copy(read.get()); + + switch (strategy) { + case WRITE: + read.getLock().writeLock().lock(); + _objects.put(key, new TxRecord.TxObjectRecordCopyLock<>(read, copy)); + break; + case OPTIMISTIC: + _objects.put(key, new TxRecord.TxObjectRecordCopyNoLock<>(read.get(), copy)); + break; + } + + return Optional.of(copy.wrapped()); + } + default: + throw new IllegalArgumentException("Unknown locking strategy"); + } + } + + @Override + public void putObject(JData obj) { + if (_objects.containsKey(obj.getKey())) { + throw new IllegalArgumentException("Object already exists in transaction"); + } + + _objects.put(obj.getKey(), new TxRecord.TxObjectRecordNew<>(obj)); + } + + @Override + public Collection> drain() { + return Collections.unmodifiableCollection(_objects.values()); + } + } + + @Override + public TransactionPrivate createTransaction(long id, TransactionObjectSource source) { + return new TransactionImpl(id, source); + } + +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java new file mode 100644 index 00000000..55301c9c --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java @@ -0,0 +1,17 @@ +package com.usatiuk.dhfs.objects.transaction; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; + +import java.util.Optional; +import java.util.concurrent.locks.ReadWriteLock; + +public interface TransactionObjectSource { + interface TransactionObject { + T get(); + + ReadWriteLock getLock(); + } + + Optional> get(Class type, JObjectKey key); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java new file mode 100644 index 00000000..2d3300bc --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java @@ -0,0 +1,9 @@ +package com.usatiuk.dhfs.objects.transaction; + +import java.util.Collection; + +// The transaction interface actually used by user code to retrieve objects +public interface TransactionPrivate extends Transaction{ + long getId(); + Collection> drain(); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java new file mode 100644 index 00000000..b5b3250d --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java @@ -0,0 +1,73 @@ +package com.usatiuk.dhfs.objects.transaction; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.ObjectAllocator; + +public class TxRecord { + public interface TxObjectRecord { + T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy); + } + + public interface TxObjectRecordWrite extends TxObjectRecord { + ObjectAllocator.ChangeTrackingJData copy(); + } + + public record TxObjectRecordRead(TransactionObjectSource.TransactionObject original, + T copy) + implements TxObjectRecord { + @Override + public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { + if (strategy == LockingStrategy.READ_ONLY) + return copy; + return null; + } + } + + public record TxObjectRecordNew(T created) + implements TxObjectRecordWrite { + @Override + public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { + if (strategy == LockingStrategy.WRITE || strategy == LockingStrategy.OPTIMISTIC) + return created; + return null; + } + + @Override + public ObjectAllocator.ChangeTrackingJData copy() { + return new ObjectAllocator.ChangeTrackingJData() { + @Override + public T wrapped() { + return created; + } + + @Override + public boolean isModified() { + return false; + } + }; + } + } + + public record TxObjectRecordCopyLock(TransactionObjectSource.TransactionObject original, + ObjectAllocator.ChangeTrackingJData copy) + implements TxObjectRecordWrite { + @Override + public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { + if (strategy == LockingStrategy.WRITE || strategy == LockingStrategy.OPTIMISTIC) + return copy.wrapped(); + return null; + } + } + + public record TxObjectRecordCopyNoLock(T original, + ObjectAllocator.ChangeTrackingJData copy) + implements TxObjectRecordWrite { + @Override + public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { + if (strategy == LockingStrategy.WRITE || strategy == LockingStrategy.OPTIMISTIC) + return copy.wrapped(); + return null; + } + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/FakeObjectStorage.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/FakeObjectStorage.java index 5566f1f5..90c5c7c1 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/FakeObjectStorage.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/FakeObjectStorage.java @@ -30,13 +30,6 @@ public class FakeObjectStorage implements ObjectPersistentStore { } } - @Override - public void writeObjectDirect(JObjectKey name, JData object) { - synchronized (this) { - _objects.put(name, (TestData) object); - } - } - @Override public void writeObject(JObjectKey name, JData object) { synchronized (this) { diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Kid.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Kid.java index d5fb404d..b5696f25 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Kid.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Kid.java @@ -2,12 +2,12 @@ package com.usatiuk.dhfs.objects.test.objs; import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.JObject; -import com.usatiuk.dhfs.objects.JObjectInterface; +import com.usatiuk.dhfs.objects.transaction.Transaction; public class Kid extends JObject { - public Kid(JObjectInterface jObjectInterface, KidData data) { - super(jObjectInterface, data); + public Kid(Transaction Transaction, KidData data) { + super(Transaction, data); } @Override diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/KidData.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/KidData.java index e519803e..bf49e1e7 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/KidData.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/KidData.java @@ -2,7 +2,7 @@ package com.usatiuk.dhfs.objects.test.objs; import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.JObject; -import com.usatiuk.dhfs.objects.JObjectInterface; +import com.usatiuk.dhfs.objects.transaction.Transaction; import java.util.function.Function; @@ -13,7 +13,7 @@ public interface KidData extends JData { KidData bindCopy(); - default Function binder() { + default Function binder(boolean isLocked) { return jo -> new Kid(jo, bindCopy()); } } diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Parent.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Parent.java index 176c983f..7b90597e 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Parent.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Parent.java @@ -2,15 +2,15 @@ package com.usatiuk.dhfs.objects.test.objs; import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.JObject; -import com.usatiuk.dhfs.objects.JObjectInterface; +import com.usatiuk.dhfs.objects.transaction.Transaction; import lombok.experimental.Delegate; public class Parent extends JObject { @Delegate private final ParentData _data; - public Parent(JObjectInterface jObjectInterface, ParentData data) { - super(jObjectInterface); + public Parent(Transaction Transaction, ParentData data) { + super(Transaction); _data = data; } diff --git a/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java new file mode 100644 index 00000000..ecb1288a --- /dev/null +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java @@ -0,0 +1,53 @@ +package com.usatiuk.dhfs.utils; + +import java.util.concurrent.ConcurrentHashMap; + +public class DataLocker { + private static class LockTag { + boolean released = false; + } + + private final ConcurrentHashMap _locks = new ConcurrentHashMap<>(); + + public class Lock implements AutoCloseable { + private final Object _key; + private final LockTag _tag; + + public Lock(Object key, LockTag tag) { + _key = key; + _tag = tag; + } + + @Override + public void close() { + synchronized (_tag) { + _tag.released = true; + _tag.notifyAll(); + _locks.remove(_key, _tag); + } + } + } + + public Lock lock(Object data) { + while (true) { + try { + var tag = _locks.get(data); + if (tag != null) { + synchronized (tag) { + if (!tag.released) + tag.wait(); + continue; + } + } + } catch (InterruptedException ignored) { + } + + var newTag = new LockTag(); + var oldTag = _locks.putIfAbsent(data, newTag); + if (oldTag == null) { + return new Lock(data, newTag); + } + } + } + +} From 7bb509024fdbcec1bd1ba685682b2da92fc8363f Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Mon, 2 Dec 2024 22:08:48 +0100 Subject: [PATCH 003/105] simple transactions 1 --- .../dhfs/objects/CurrentTransaction.java | 24 + .../usatiuk/dhfs/objects/JObjectManager.java | 72 +- .../dhfs/objects/TransactionManagerImpl.java | 21 +- .../usatiuk/dhfs/objects/TxWritebackImpl.java | 830 +++++++++--------- .../FileObjectPersistentStore.java | 3 + .../MemoryObjectPersistentStore.java} | 27 +- .../transaction/TransactionFactoryImpl.java | 2 + .../com/usatiuk/dhfs/objects/ObjectsTest.java | 199 +++-- .../objects/allocator/ChangeTrackerBase.java | 14 + .../dhfs/objects/allocator/KidDataCT.java | 33 + .../dhfs/objects/allocator/KidDataNormal.java | 23 + .../dhfs/objects/allocator/ParentDataCT.java | 40 + .../objects/allocator/ParentDataNormal.java | 29 + .../{test/objs => allocator}/TestData.java | 2 +- .../allocator/TestObjectAllocator.java | 40 + .../com/usatiuk/dhfs/objects/data/Kid.java | 9 + .../com/usatiuk/dhfs/objects/data/Parent.java | 15 + .../serializer/TestJDataSerializer.java | 23 + .../usatiuk/dhfs/objects/test/objs/Kid.java | 18 - .../dhfs/objects/test/objs/KidData.java | 19 - .../dhfs/objects/test/objs/KidDataImpl.java | 28 - .../dhfs/objects/test/objs/Parent.java | 25 - .../dhfs/objects/test/objs/ParentData.java | 14 - .../objects/test/objs/ParentDataImpl.java | 41 - .../src/test/resources/application.properties | 1 + 25 files changed, 890 insertions(+), 662 deletions(-) create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java rename dhfs-parent/objects/src/{test/java/com/usatiuk/dhfs/objects/persistence/FakeObjectStorage.java => main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java} (63%) create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ChangeTrackerBase.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataCT.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataNormal.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataCT.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataNormal.java rename dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/{test/objs => allocator}/TestData.java (93%) create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestObjectAllocator.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/serializer/TestJDataSerializer.java delete mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Kid.java delete mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/KidData.java delete mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/KidDataImpl.java delete mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Parent.java delete mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/ParentData.java delete mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/ParentDataImpl.java create mode 100644 dhfs-parent/objects/src/test/resources/application.properties diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java new file mode 100644 index 00000000..fe309206 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java @@ -0,0 +1,24 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.transaction.LockingStrategy; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +import java.util.Optional; + +@ApplicationScoped +public class CurrentTransaction implements Transaction { + @Inject + TransactionManager transactionManager; + + @Override + public Optional getObject(Class type, JObjectKey key, LockingStrategy strategy) { + return transactionManager.current().getObject(type, key, strategy); + } + + @Override + public void putObject(JData obj) { + transactionManager.current().putObject(obj); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index d5c1aca6..c2485f1a 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -1,6 +1,7 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.persistence.ObjectPersistentStore; +import com.usatiuk.dhfs.objects.persistence.TxManifest; import com.usatiuk.dhfs.objects.transaction.TransactionFactory; import com.usatiuk.dhfs.objects.transaction.TransactionObjectSource; import com.usatiuk.dhfs.objects.transaction.TransactionPrivate; @@ -14,9 +15,7 @@ import org.apache.commons.lang3.tuple.Pair; import java.lang.ref.Cleaner; import java.lang.ref.WeakReference; -import java.util.ArrayList; -import java.util.LinkedList; -import java.util.Optional; +import java.util.*; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReadWriteLock; @@ -114,7 +113,9 @@ public class JObjectManager { }; public TransactionPrivate createTransaction() { - return transactionFactory.createTransaction(_txCounter.getAndIncrement(), _objSource); + var counter = _txCounter.getAndIncrement(); + Log.trace("Creating transaction " + counter); + return transactionFactory.createTransaction(counter, _objSource); } @@ -123,8 +124,11 @@ public class JObjectManager { var toFlush = new LinkedList>(); var toLock = new ArrayList>(); + Log.trace("Committing transaction " + tx.getId()); + try { for (var entry : tx.drain()) { + Log.trace("Processing entry " + entry.toString()); switch (entry) { case TxRecord.TxObjectRecordRead read -> { toUnlock.add(read.original().getLock().readLock()::unlock); @@ -148,7 +152,11 @@ public class JObjectManager { } } + toLock.sort(Comparator.comparingInt(a -> System.identityHashCode(a.original()))); + for (var record : toLock) { + Log.trace("Locking " + record.toString()); + var found = _objects.get(record.original().getKey()); if (found.get() != record.original()) { @@ -160,15 +168,65 @@ public class JObjectManager { } for (var record : toFlush) { + Log.trace("Processing flush entry " + record.toString()); + var current = _objects.get(record.copy().wrapped().getKey()); + if (current == null && !(record instanceof TxRecord.TxObjectRecordNew)) { + throw new IllegalStateException("Object not found during transaction"); + } else if (current != null) { + var old = switch (record) { + case TxRecord.TxObjectRecordCopyLock copy -> copy.original().get(); + case TxRecord.TxObjectRecordCopyNoLock copy -> copy.original(); + default -> throw new IllegalStateException("Unexpected value: " + record); + }; - assert current == null && record instanceof TxRecord.TxObjectRecordNew || current == record.copy().wrapped(); - - if (current.get() != ) + if (current.get() != old) { + throw new IllegalStateException("Object changed during transaction"); + } + if (current.lastWriteTx > tx.getId()) { + throw new IllegalStateException("Transaction race"); + } + } else if (record instanceof TxRecord.TxObjectRecordNew created) { + var wrapper = new JDataWrapper<>(created.created()); + wrapper.lock.writeLock().lock(); + var old = _objects.putIfAbsent(created.created().getKey(), wrapper); + if (old != null) + throw new IllegalStateException("Object already exists"); + toUnlock.add(wrapper.lock.writeLock()::unlock); + } else { + throw new IllegalStateException("Object not found during transaction"); + } } + // Have all locks now + for (var record : toFlush) { + Log.trace("Flushing " + record.toString()); + + if (!record.copy().isModified()) + continue; + + var obj = record.copy().wrapped(); + var key = obj.getKey(); + var data = objectSerializer.serialize(obj); + objectStorage.writeObject(key, data); + _objects.get(key).lastWriteTx = tx.getId(); // FIXME: + } + + Log.trace("Flushing transaction " + tx.getId()); + + objectStorage.commitTx(new TxManifest() { + @Override + public List getWritten() { + return toFlush.stream().map(r -> r.copy().wrapped().getKey()).toList(); + } + + @Override + public List getDeleted() { + return List.of(); + } + }); } catch (Throwable t) { Log.error("Error when committing transaction", t); throw t; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java index 3c29beba..f4944aa0 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java @@ -2,13 +2,14 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.dhfs.objects.transaction.TransactionPrivate; +import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; @ApplicationScoped public class TransactionManagerImpl implements TransactionManager { @Inject - JObjectManager objectManager; + JObjectManager jObjectManager; private static final ThreadLocal _currentTransaction = new ThreadLocal<>(); @@ -18,28 +19,36 @@ public class TransactionManagerImpl implements TransactionManager { throw new IllegalStateException("Transaction already started"); } - var tx = objectManager.createTransaction(); + Log.trace("Starting transaction"); + var tx = jObjectManager.createTransaction(); _currentTransaction.set(tx); } @Override public void commit() { - if(_currentTransaction.get() == null) { + if (_currentTransaction.get() == null) { throw new IllegalStateException("No transaction started"); } - jobjectManager.commit(_currentTransaction.get()); + Log.trace("Committing transaction"); + try { + jObjectManager.commit(_currentTransaction.get()); + } catch (Throwable e) { + Log.warn("Transaction commit failed", e); + throw e; + } finally { + _currentTransaction.remove(); + } } @Override public void rollback() { - + _currentTransaction.remove(); } @Override public Transaction current() { return _currentTransaction.get(); } - } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java index 28ecf30e..92705e43 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java @@ -1,415 +1,415 @@ -package com.usatiuk.dhfs.objects; - -import com.usatiuk.dhfs.objects.persistence.ObjectPersistentStore; -import com.usatiuk.dhfs.utils.VoidFn; -import io.quarkus.logging.Log; -import io.quarkus.runtime.ShutdownEvent; -import io.quarkus.runtime.StartupEvent; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import jakarta.inject.Inject; -import lombok.Getter; -import org.apache.commons.lang3.concurrent.BasicThreadFactory; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.util.*; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -@ApplicationScoped -public class TxWritebackImpl implements TxWriteback { - private final LinkedList _pendingBundles = new LinkedList<>(); - private final LinkedHashMap _notFlushedBundles = new LinkedHashMap<>(); - - private final Object _flushWaitSynchronizer = new Object(); - private final AtomicLong _lastWrittenTx = new AtomicLong(-1); - private final AtomicLong _counter = new AtomicLong(); - private final AtomicLong _waitedTotal = new AtomicLong(0); - @Inject - ObjectPersistentStore objectPersistentStore; - @ConfigProperty(name = "dhfs.objects.writeback.limit") - long sizeLimit; - private long currentSize = 0; - private ExecutorService _writebackExecutor; - private ExecutorService _commitExecutor; - private ExecutorService _statusExecutor; - private volatile boolean _ready = false; - - void init(@Observes @Priority(110) StartupEvent event) { - { - BasicThreadFactory factory = new BasicThreadFactory.Builder() - .namingPattern("tx-writeback-%d") - .build(); - - _writebackExecutor = Executors.newSingleThreadExecutor(factory); - _writebackExecutor.submit(this::writeback); - } - - { - BasicThreadFactory factory = new BasicThreadFactory.Builder() - .namingPattern("writeback-commit-%d") - .build(); - - _commitExecutor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(), factory); - } - _statusExecutor = Executors.newSingleThreadExecutor(); - _statusExecutor.submit(() -> { - try { - while (true) { - Thread.sleep(1000); - if (currentSize > 0) - Log.info("Tx commit status: size=" - + currentSize / 1024 / 1024 + "MB"); - } - } catch (InterruptedException ignored) { - } - }); - _ready = true; - } - - void shutdown(@Observes @Priority(890) ShutdownEvent event) throws InterruptedException { - Log.info("Waiting for all transactions to drain"); - - synchronized (_flushWaitSynchronizer) { - _ready = false; - while (currentSize > 0) { - _flushWaitSynchronizer.wait(); - } - } - - _writebackExecutor.shutdownNow(); - Log.info("Total tx bundle wait time: " + _waitedTotal.get() + "ms"); - } - - private void verifyReady() { - if (!_ready) throw new IllegalStateException("Not doing transactions while shutting down!"); - } - - private void writeback() { - while (!Thread.interrupted()) { - try { - TxBundle bundle = new TxBundle(0); - synchronized (_pendingBundles) { - while (_pendingBundles.isEmpty() || !_pendingBundles.peek()._ready) - _pendingBundles.wait(); - - long diff = 0; - while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { - var toCompress = _pendingBundles.poll(); - diff -= toCompress.calculateTotalSize(); - bundle.compress(toCompress); - } - diff += bundle.calculateTotalSize(); - synchronized (_flushWaitSynchronizer) { - currentSize += diff; - } - } - - var latch = new CountDownLatch(bundle._committed.size() + bundle._meta.size()); - ConcurrentLinkedQueue errors = new ConcurrentLinkedQueue<>(); - - for (var c : bundle._committed.values()) { - _commitExecutor.execute(() -> { - try { - Log.trace("Writing new " + c.newMeta.getName()); - objectPersistentStore.writeNewObject(c.newMeta.getName(), c.newMeta, c.newData); - } catch (Throwable t) { - Log.error("Error writing " + c.newMeta.getName(), t); - errors.add(t); - } finally { - latch.countDown(); - } - }); - } - for (var c : bundle._meta.values()) { - _commitExecutor.execute(() -> { - try { - Log.trace("Writing (meta) " + c.newMeta.getName()); - objectPersistentStore.writeNewObjectMeta(c.newMeta.getName(), c.newMeta); - } catch (Throwable t) { - Log.error("Error writing " + c.newMeta.getName(), t); - errors.add(t); - } finally { - latch.countDown(); - } - }); - } - if (Log.isDebugEnabled()) - for (var d : bundle._deleted.keySet()) - Log.debug("Deleting from persistent storage " + d.getMeta().getName()); // FIXME: For tests - - latch.await(); - if (!errors.isEmpty()) { - throw new RuntimeException("Errors in writeback!"); - } - objectPersistentStore.commitTx( - new TxManifest( - Stream.concat(bundle._committed.keySet().stream().map(t -> t.getMeta().getName()), - bundle._meta.keySet().stream().map(t -> t.getMeta().getName())).collect(Collectors.toCollection(ArrayList::new)), - bundle._deleted.keySet().stream().map(t -> t.getMeta().getName()).collect(Collectors.toCollection(ArrayList::new)) - )); - Log.trace("Bundle " + bundle.getId() + " committed"); - - - List> callbacks = new ArrayList<>(); - synchronized (_notFlushedBundles) { - _lastWrittenTx.set(bundle.getId()); - while (!_notFlushedBundles.isEmpty() && _notFlushedBundles.firstEntry().getKey() <= bundle.getId()) { - callbacks.add(_notFlushedBundles.pollFirstEntry().getValue().setCommitted()); - } - } - callbacks.forEach(l -> l.forEach(VoidFn::apply)); - - synchronized (_flushWaitSynchronizer) { - currentSize -= ((TxBundle) bundle).calculateTotalSize(); - // FIXME: - if (currentSize <= sizeLimit || !_ready) - _flushWaitSynchronizer.notifyAll(); - } - } catch (InterruptedException ignored) { - } catch (Exception e) { - Log.error("Uncaught exception in writeback", e); - } catch (Throwable o) { - Log.error("Uncaught THROWABLE in writeback", o); - } - } - Log.info("Writeback thread exiting"); - } - - @Override - public com.usatiuk.dhfs.objects.jrepository.TxBundle createBundle() { - verifyReady(); - boolean wait = false; - while (true) { - if (wait) { - synchronized (_flushWaitSynchronizer) { - long started = System.currentTimeMillis(); - while (currentSize > sizeLimit) { - try { - _flushWaitSynchronizer.wait(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - long waited = System.currentTimeMillis() - started; - _waitedTotal.addAndGet(waited); - if (Log.isTraceEnabled()) - Log.trace("Thread " + Thread.currentThread().getName() + " waited for tx bundle for " + waited + " ms"); - wait = false; - } - } - synchronized (_pendingBundles) { - synchronized (_flushWaitSynchronizer) { - if (currentSize > sizeLimit) { - if (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { - var target = _pendingBundles.poll(); - - long diff = -target.calculateTotalSize(); - while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { - var toCompress = _pendingBundles.poll(); - diff -= toCompress.calculateTotalSize(); - target.compress(toCompress); - } - diff += target.calculateTotalSize(); - currentSize += diff; - _pendingBundles.addFirst(target); - } - } - - if (currentSize > sizeLimit) { - wait = true; - continue; - } - } - synchronized (_notFlushedBundles) { - var bundle = new TxBundle(_counter.incrementAndGet()); - _pendingBundles.addLast(bundle); - _notFlushedBundles.put(bundle.getId(), bundle); - return bundle; - } - } - } - } - - @Override - public void commitBundle(com.usatiuk.dhfs.objects.TxBundle bundle) { - verifyReady(); - synchronized (_pendingBundles) { - ((TxBundle) bundle).setReady(); - if (_pendingBundles.peek() == bundle) - _pendingBundles.notify(); - synchronized (_flushWaitSynchronizer) { - currentSize += ((TxBundle) bundle).calculateTotalSize(); - } - } - } - - @Override - public void dropBundle(com.usatiuk.dhfs.objects.TxBundle bundle) { - verifyReady(); - synchronized (_pendingBundles) { - Log.warn("Dropped bundle: " + bundle); - _pendingBundles.remove((TxBundle) bundle); - synchronized (_flushWaitSynchronizer) { - currentSize -= ((TxBundle) bundle).calculateTotalSize(); - } - } - } - - @Override - public void fence(long bundleId) { - var latch = new CountDownLatch(1); - asyncFence(bundleId, latch::countDown); - try { - latch.await(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - - @Override - public void asyncFence(long bundleId, VoidFn fn) { - verifyReady(); - if (bundleId < 0) throw new IllegalArgumentException("txId should be >0!"); - if (_lastWrittenTx.get() >= bundleId) { - fn.apply(); - return; - } - synchronized (_notFlushedBundles) { - if (_lastWrittenTx.get() >= bundleId) { - fn.apply(); - return; - } - _notFlushedBundles.get(bundleId).addCallback(fn); - } - } - - @Getter - private static class TxManifest implements com.usatiuk.dhfs.objects.repository.persistence.TxManifest { - private final ArrayList _written; - private final ArrayList _deleted; - - private TxManifest(ArrayList written, ArrayList deleted) { - _written = written; - _deleted = deleted; - } - } - - private class TxBundle implements com.usatiuk.dhfs.objects.jrepository.TxBundle { - private final HashMap, CommittedEntry> _committed = new HashMap<>(); - private final HashMap, CommittedMeta> _meta = new HashMap<>(); - private final HashMap, Integer> _deleted = new HashMap<>(); - private final ArrayList _callbacks = new ArrayList<>(); - private long _txId; - @Getter - private volatile boolean _ready = false; - private long _size = -1; - private boolean _wasCommitted = false; - - private TxBundle(long txId) {_txId = txId;} - - @Override - public long getId() { - return _txId; - } - - public void setReady() { - _ready = true; - } - - public void addCallback(VoidFn callback) { - synchronized (_callbacks) { - if (_wasCommitted) throw new IllegalStateException(); - _callbacks.add(callback); - } - } - - public List setCommitted() { - synchronized (_callbacks) { - _wasCommitted = true; - return Collections.unmodifiableList(_callbacks); - } - } - - @Override - public void commit(JObject obj, ObjectMetadataP meta, JObjectDataP data) { - synchronized (_committed) { - _committed.put(obj, new CommittedEntry(meta, data, obj.estimateSize())); - } - } - - @Override - public void commitMetaChange(JObject obj, ObjectMetadataP meta) { - synchronized (_meta) { - _meta.put(obj, new CommittedMeta(meta, obj.estimateSize())); - } - } - - @Override - public void delete(JObject obj) { - synchronized (_deleted) { - _deleted.put(obj, obj.estimateSize()); - } - } - - - public long calculateTotalSize() { - if (_size >= 0) return _size; - long out = 0; - for (var c : _committed.values()) - out += c.size; - for (var c : _meta.values()) - out += c.size; - for (var c : _deleted.entrySet()) - out += c.getValue(); - _size = out; - return _size; - } - - public void compress(TxBundle other) { - if (_txId >= other._txId) - throw new IllegalArgumentException("Compressing an older bundle into newer"); - - _txId = other._txId; - _size = -1; - - for (var d : other._deleted.entrySet()) { - _committed.remove(d.getKey()); - _meta.remove(d.getKey()); - _deleted.put(d.getKey(), d.getValue()); - } - - for (var c : other._committed.entrySet()) { - _committed.put(c.getKey(), c.getValue()); - _meta.remove(c.getKey()); - _deleted.remove(c.getKey()); - } - - for (var m : other._meta.entrySet()) { - var deleted = _deleted.remove(m.getKey()); - if (deleted != null) { - _committed.put(m.getKey(), new CommittedEntry(m.getValue().newMeta, null, m.getKey().estimateSize())); - continue; - } - var committed = _committed.remove(m.getKey()); - if (committed != null) { - _committed.put(m.getKey(), new CommittedEntry(m.getValue().newMeta, committed.newData, m.getKey().estimateSize())); - continue; - } - _meta.put(m.getKey(), m.getValue()); - } - } - - private record CommittedEntry(ObjectMetadataP newMeta, JObjectDataP newData, int size) {} - - private record CommittedMeta(ObjectMetadataP newMeta, int size) {} - - private record Deleted(JObject handle) {} - } -} +//package com.usatiuk.dhfs.objects; +// +//import com.usatiuk.dhfs.objects.persistence.ObjectPersistentStore; +//import com.usatiuk.dhfs.utils.VoidFn; +//import io.quarkus.logging.Log; +//import io.quarkus.runtime.ShutdownEvent; +//import io.quarkus.runtime.StartupEvent; +//import jakarta.annotation.Priority; +//import jakarta.enterprise.context.ApplicationScoped; +//import jakarta.enterprise.event.Observes; +//import jakarta.inject.Inject; +//import lombok.Getter; +//import org.apache.commons.lang3.concurrent.BasicThreadFactory; +//import org.eclipse.microprofile.config.inject.ConfigProperty; +// +//import java.util.*; +//import java.util.concurrent.ConcurrentLinkedQueue; +//import java.util.concurrent.CountDownLatch; +//import java.util.concurrent.ExecutorService; +//import java.util.concurrent.Executors; +//import java.util.concurrent.atomic.AtomicLong; +//import java.util.stream.Collectors; +//import java.util.stream.Stream; +// +//@ApplicationScoped +//public class TxWritebackImpl implements TxWriteback { +// private final LinkedList _pendingBundles = new LinkedList<>(); +// private final LinkedHashMap _notFlushedBundles = new LinkedHashMap<>(); +// +// private final Object _flushWaitSynchronizer = new Object(); +// private final AtomicLong _lastWrittenTx = new AtomicLong(-1); +// private final AtomicLong _counter = new AtomicLong(); +// private final AtomicLong _waitedTotal = new AtomicLong(0); +// @Inject +// ObjectPersistentStore objectPersistentStore; +// @ConfigProperty(name = "dhfs.objects.writeback.limit") +// long sizeLimit; +// private long currentSize = 0; +// private ExecutorService _writebackExecutor; +// private ExecutorService _commitExecutor; +// private ExecutorService _statusExecutor; +// private volatile boolean _ready = false; +// +// void init(@Observes @Priority(110) StartupEvent event) { +// { +// BasicThreadFactory factory = new BasicThreadFactory.Builder() +// .namingPattern("tx-writeback-%d") +// .build(); +// +// _writebackExecutor = Executors.newSingleThreadExecutor(factory); +// _writebackExecutor.submit(this::writeback); +// } +// +// { +// BasicThreadFactory factory = new BasicThreadFactory.Builder() +// .namingPattern("writeback-commit-%d") +// .build(); +// +// _commitExecutor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(), factory); +// } +// _statusExecutor = Executors.newSingleThreadExecutor(); +// _statusExecutor.submit(() -> { +// try { +// while (true) { +// Thread.sleep(1000); +// if (currentSize > 0) +// Log.info("Tx commit status: size=" +// + currentSize / 1024 / 1024 + "MB"); +// } +// } catch (InterruptedException ignored) { +// } +// }); +// _ready = true; +// } +// +// void shutdown(@Observes @Priority(890) ShutdownEvent event) throws InterruptedException { +// Log.info("Waiting for all transactions to drain"); +// +// synchronized (_flushWaitSynchronizer) { +// _ready = false; +// while (currentSize > 0) { +// _flushWaitSynchronizer.wait(); +// } +// } +// +// _writebackExecutor.shutdownNow(); +// Log.info("Total tx bundle wait time: " + _waitedTotal.get() + "ms"); +// } +// +// private void verifyReady() { +// if (!_ready) throw new IllegalStateException("Not doing transactions while shutting down!"); +// } +// +// private void writeback() { +// while (!Thread.interrupted()) { +// try { +// TxBundle bundle = new TxBundle(0); +// synchronized (_pendingBundles) { +// while (_pendingBundles.isEmpty() || !_pendingBundles.peek()._ready) +// _pendingBundles.wait(); +// +// long diff = 0; +// while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { +// var toCompress = _pendingBundles.poll(); +// diff -= toCompress.calculateTotalSize(); +// bundle.compress(toCompress); +// } +// diff += bundle.calculateTotalSize(); +// synchronized (_flushWaitSynchronizer) { +// currentSize += diff; +// } +// } +// +// var latch = new CountDownLatch(bundle._committed.size() + bundle._meta.size()); +// ConcurrentLinkedQueue errors = new ConcurrentLinkedQueue<>(); +// +// for (var c : bundle._committed.values()) { +// _commitExecutor.execute(() -> { +// try { +// Log.trace("Writing new " + c.newMeta.getName()); +// objectPersistentStore.writeNewObject(c.newMeta.getName(), c.newMeta, c.newData); +// } catch (Throwable t) { +// Log.error("Error writing " + c.newMeta.getName(), t); +// errors.add(t); +// } finally { +// latch.countDown(); +// } +// }); +// } +// for (var c : bundle._meta.values()) { +// _commitExecutor.execute(() -> { +// try { +// Log.trace("Writing (meta) " + c.newMeta.getName()); +// objectPersistentStore.writeNewObjectMeta(c.newMeta.getName(), c.newMeta); +// } catch (Throwable t) { +// Log.error("Error writing " + c.newMeta.getName(), t); +// errors.add(t); +// } finally { +// latch.countDown(); +// } +// }); +// } +// if (Log.isDebugEnabled()) +// for (var d : bundle._deleted.keySet()) +// Log.debug("Deleting from persistent storage " + d.getMeta().getName()); // FIXME: For tests +// +// latch.await(); +// if (!errors.isEmpty()) { +// throw new RuntimeException("Errors in writeback!"); +// } +// objectPersistentStore.commitTx( +// new TxManifest( +// Stream.concat(bundle._committed.keySet().stream().map(t -> t.getMeta().getName()), +// bundle._meta.keySet().stream().map(t -> t.getMeta().getName())).collect(Collectors.toCollection(ArrayList::new)), +// bundle._deleted.keySet().stream().map(t -> t.getMeta().getName()).collect(Collectors.toCollection(ArrayList::new)) +// )); +// Log.trace("Bundle " + bundle.getId() + " committed"); +// +// +// List> callbacks = new ArrayList<>(); +// synchronized (_notFlushedBundles) { +// _lastWrittenTx.set(bundle.getId()); +// while (!_notFlushedBundles.isEmpty() && _notFlushedBundles.firstEntry().getKey() <= bundle.getId()) { +// callbacks.add(_notFlushedBundles.pollFirstEntry().getValue().setCommitted()); +// } +// } +// callbacks.forEach(l -> l.forEach(VoidFn::apply)); +// +// synchronized (_flushWaitSynchronizer) { +// currentSize -= ((TxBundle) bundle).calculateTotalSize(); +// // FIXME: +// if (currentSize <= sizeLimit || !_ready) +// _flushWaitSynchronizer.notifyAll(); +// } +// } catch (InterruptedException ignored) { +// } catch (Exception e) { +// Log.error("Uncaught exception in writeback", e); +// } catch (Throwable o) { +// Log.error("Uncaught THROWABLE in writeback", o); +// } +// } +// Log.info("Writeback thread exiting"); +// } +// +// @Override +// public com.usatiuk.dhfs.objects.jrepository.TxBundle createBundle() { +// verifyReady(); +// boolean wait = false; +// while (true) { +// if (wait) { +// synchronized (_flushWaitSynchronizer) { +// long started = System.currentTimeMillis(); +// while (currentSize > sizeLimit) { +// try { +// _flushWaitSynchronizer.wait(); +// } catch (InterruptedException e) { +// throw new RuntimeException(e); +// } +// } +// long waited = System.currentTimeMillis() - started; +// _waitedTotal.addAndGet(waited); +// if (Log.isTraceEnabled()) +// Log.trace("Thread " + Thread.currentThread().getName() + " waited for tx bundle for " + waited + " ms"); +// wait = false; +// } +// } +// synchronized (_pendingBundles) { +// synchronized (_flushWaitSynchronizer) { +// if (currentSize > sizeLimit) { +// if (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { +// var target = _pendingBundles.poll(); +// +// long diff = -target.calculateTotalSize(); +// while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { +// var toCompress = _pendingBundles.poll(); +// diff -= toCompress.calculateTotalSize(); +// target.compress(toCompress); +// } +// diff += target.calculateTotalSize(); +// currentSize += diff; +// _pendingBundles.addFirst(target); +// } +// } +// +// if (currentSize > sizeLimit) { +// wait = true; +// continue; +// } +// } +// synchronized (_notFlushedBundles) { +// var bundle = new TxBundle(_counter.incrementAndGet()); +// _pendingBundles.addLast(bundle); +// _notFlushedBundles.put(bundle.getId(), bundle); +// return bundle; +// } +// } +// } +// } +// +// @Override +// public void commitBundle(com.usatiuk.dhfs.objects.TxBundle bundle) { +// verifyReady(); +// synchronized (_pendingBundles) { +// ((TxBundle) bundle).setReady(); +// if (_pendingBundles.peek() == bundle) +// _pendingBundles.notify(); +// synchronized (_flushWaitSynchronizer) { +// currentSize += ((TxBundle) bundle).calculateTotalSize(); +// } +// } +// } +// +// @Override +// public void dropBundle(com.usatiuk.dhfs.objects.TxBundle bundle) { +// verifyReady(); +// synchronized (_pendingBundles) { +// Log.warn("Dropped bundle: " + bundle); +// _pendingBundles.remove((TxBundle) bundle); +// synchronized (_flushWaitSynchronizer) { +// currentSize -= ((TxBundle) bundle).calculateTotalSize(); +// } +// } +// } +// +// @Override +// public void fence(long bundleId) { +// var latch = new CountDownLatch(1); +// asyncFence(bundleId, latch::countDown); +// try { +// latch.await(); +// } catch (InterruptedException e) { +// throw new RuntimeException(e); +// } +// } +// +// @Override +// public void asyncFence(long bundleId, VoidFn fn) { +// verifyReady(); +// if (bundleId < 0) throw new IllegalArgumentException("txId should be >0!"); +// if (_lastWrittenTx.get() >= bundleId) { +// fn.apply(); +// return; +// } +// synchronized (_notFlushedBundles) { +// if (_lastWrittenTx.get() >= bundleId) { +// fn.apply(); +// return; +// } +// _notFlushedBundles.get(bundleId).addCallback(fn); +// } +// } +// +// @Getter +// private static class TxManifest implements com.usatiuk.dhfs.objects.repository.persistence.TxManifest { +// private final ArrayList _written; +// private final ArrayList _deleted; +// +// private TxManifest(ArrayList written, ArrayList deleted) { +// _written = written; +// _deleted = deleted; +// } +// } +// +// private class TxBundle implements com.usatiuk.dhfs.objects.jrepository.TxBundle { +// private final HashMap, CommittedEntry> _committed = new HashMap<>(); +// private final HashMap, CommittedMeta> _meta = new HashMap<>(); +// private final HashMap, Integer> _deleted = new HashMap<>(); +// private final ArrayList _callbacks = new ArrayList<>(); +// private long _txId; +// @Getter +// private volatile boolean _ready = false; +// private long _size = -1; +// private boolean _wasCommitted = false; +// +// private TxBundle(long txId) {_txId = txId;} +// +// @Override +// public long getId() { +// return _txId; +// } +// +// public void setReady() { +// _ready = true; +// } +// +// public void addCallback(VoidFn callback) { +// synchronized (_callbacks) { +// if (_wasCommitted) throw new IllegalStateException(); +// _callbacks.add(callback); +// } +// } +// +// public List setCommitted() { +// synchronized (_callbacks) { +// _wasCommitted = true; +// return Collections.unmodifiableList(_callbacks); +// } +// } +// +// @Override +// public void commit(JObject obj, ObjectMetadataP meta, JObjectDataP data) { +// synchronized (_committed) { +// _committed.put(obj, new CommittedEntry(meta, data, obj.estimateSize())); +// } +// } +// +// @Override +// public void commitMetaChange(JObject obj, ObjectMetadataP meta) { +// synchronized (_meta) { +// _meta.put(obj, new CommittedMeta(meta, obj.estimateSize())); +// } +// } +// +// @Override +// public void delete(JObject obj) { +// synchronized (_deleted) { +// _deleted.put(obj, obj.estimateSize()); +// } +// } +// +// +// public long calculateTotalSize() { +// if (_size >= 0) return _size; +// long out = 0; +// for (var c : _committed.values()) +// out += c.size; +// for (var c : _meta.values()) +// out += c.size; +// for (var c : _deleted.entrySet()) +// out += c.getValue(); +// _size = out; +// return _size; +// } +// +// public void compress(TxBundle other) { +// if (_txId >= other._txId) +// throw new IllegalArgumentException("Compressing an older bundle into newer"); +// +// _txId = other._txId; +// _size = -1; +// +// for (var d : other._deleted.entrySet()) { +// _committed.remove(d.getKey()); +// _meta.remove(d.getKey()); +// _deleted.put(d.getKey(), d.getValue()); +// } +// +// for (var c : other._committed.entrySet()) { +// _committed.put(c.getKey(), c.getValue()); +// _meta.remove(c.getKey()); +// _deleted.remove(c.getKey()); +// } +// +// for (var m : other._meta.entrySet()) { +// var deleted = _deleted.remove(m.getKey()); +// if (deleted != null) { +// _committed.put(m.getKey(), new CommittedEntry(m.getValue().newMeta, null, m.getKey().estimateSize())); +// continue; +// } +// var committed = _committed.remove(m.getKey()); +// if (committed != null) { +// _committed.put(m.getKey(), new CommittedEntry(m.getValue().newMeta, committed.newData, m.getKey().estimateSize())); +// continue; +// } +// _meta.put(m.getKey(), m.getValue()); +// } +// } +// +// private record CommittedEntry(ObjectMetadataP newMeta, JObjectDataP newData, int size) {} +// +// private record CommittedMeta(ObjectMetadataP newMeta, int size) {} +// +// private record Deleted(JObject handle) {} +// } +//} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java index b88b3cac..ed3cbef7 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java @@ -8,6 +8,8 @@ import com.usatiuk.dhfs.utils.ByteUtils; import com.usatiuk.dhfs.utils.SerializationHelper; import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; import io.grpc.Status; +import io.quarkus.arc.lookup.LookupIfProperty; +import io.quarkus.arc.properties.IfBuildProperty; import io.quarkus.logging.Log; import io.quarkus.runtime.ShutdownEvent; import io.quarkus.runtime.StartupEvent; @@ -43,6 +45,7 @@ import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; // rest of metadata @ApplicationScoped +@IfBuildProperty(name = "dhfs.objects.persistence", stringValue = "files") public class FileObjectPersistentStore implements ObjectPersistentStore { private final Path _root; private final Path _txManifest; diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/FakeObjectStorage.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java similarity index 63% rename from dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/FakeObjectStorage.java rename to dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java index 90c5c7c1..41af2572 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/FakeObjectStorage.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java @@ -1,8 +1,10 @@ package com.usatiuk.dhfs.objects.persistence; -import com.usatiuk.dhfs.objects.JData; +import com.google.protobuf.ByteString; import com.usatiuk.dhfs.objects.JObjectKey; -import com.usatiuk.dhfs.objects.test.objs.TestData; +import io.quarkus.arc.lookup.LookupIfProperty; +import io.quarkus.arc.properties.IfBuildProperty; +import jakarta.enterprise.context.ApplicationScoped; import javax.annotation.Nonnull; import java.util.Collection; @@ -10,9 +12,11 @@ import java.util.HashMap; import java.util.Map; import java.util.Optional; -public class FakeObjectStorage implements ObjectPersistentStore { - private final Map _objects = new HashMap<>(); - private final Map _pending = new HashMap<>(); +@ApplicationScoped +@IfBuildProperty(name = "dhfs.objects.persistence", stringValue = "memory") +public class MemoryObjectPersistentStore implements ObjectPersistentStore { + private final Map _objects = new HashMap<>(); + private final Map _pending = new HashMap<>(); @Nonnull @Override @@ -24,16 +28,16 @@ public class FakeObjectStorage implements ObjectPersistentStore { @Nonnull @Override - public Optional readObject(JObjectKey name) { + public Optional readObject(JObjectKey name) { synchronized (this) { return Optional.ofNullable(_objects.get(name)); } } @Override - public void writeObject(JObjectKey name, JData object) { + public void writeObject(JObjectKey name, ByteString object) { synchronized (this) { - _pending.put(name, (TestData) object); + _pending.put(name, object); } } @@ -49,13 +53,6 @@ public class FakeObjectStorage implements ObjectPersistentStore { } } - @Override - public void deleteObjectDirect(JObjectKey name) { - synchronized (this) { - _objects.remove(name); - } - } - @Override public long getTotalSpace() { return 0; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index 44367977..c0f8c55c 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -3,12 +3,14 @@ package com.usatiuk.dhfs.objects.transaction; import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.ObjectAllocator; +import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; import lombok.AccessLevel; import lombok.Getter; import java.util.*; +@ApplicationScoped public class TransactionFactoryImpl implements TransactionFactory { @Inject ObjectAllocator objectAllocator; diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index 6a65b186..b3fd7649 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -1,97 +1,150 @@ package com.usatiuk.dhfs.objects; -import com.usatiuk.dhfs.objects.persistence.FakeObjectStorage; -import com.usatiuk.dhfs.objects.test.objs.Kid; -import com.usatiuk.dhfs.objects.test.objs.Parent; +import com.usatiuk.dhfs.objects.data.Parent; +import com.usatiuk.dhfs.objects.transaction.LockingStrategy; +import io.quarkus.logging.Log; +import io.quarkus.test.junit.QuarkusTest; +import jakarta.inject.Inject; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import java.util.concurrent.Semaphore; +import java.util.concurrent.atomic.AtomicBoolean; + +@QuarkusTest public class ObjectsTest { - private final FakeObjectStorage _storage = new FakeObjectStorage(); - private final JObjectManager _tx = new JObjectManager(_storage); + @Inject + TransactionManager txm; + + @Inject + CurrentTransaction curTx; + + @Inject + ObjectAllocator alloc; @Test void createObject() { { - var tx = _tx.beginTransaction(); - var parent = tx.getObject(new JObjectKey("Parent"), Parent.class); - parent.setName("John"); - tx.commit(); + txm.begin(); + var newParent = alloc.create(Parent.class, new JObjectKey("Parent")); + newParent.setLastName("John"); + curTx.putObject(newParent); + txm.commit(); } { - var tx2 = _tx.beginTransaction(); - var parent = tx2.getObject(new JObjectKey("Parent")); - Assertions.assertInstanceOf(Parent.class, parent); - Assertions.assertEquals("John", ((Parent) parent).getName()); + txm.begin(); + var parent = curTx.getObject(Parent.class, new JObjectKey("Parent"), LockingStrategy.READ_ONLY).orElse(null); + Assertions.assertEquals("John", parent.getLastName()); + txm.commit(); } } @Test - void createObjectConflict() { - { - var tx = _tx.beginTransaction(); - var parent = tx.getObject(new JObjectKey("Parent"), Parent.class); - parent.setName("John"); + void createObjectConflict() throws InterruptedException { + AtomicBoolean thread1Failed = new AtomicBoolean(true); + AtomicBoolean thread2Failed = new AtomicBoolean(true); - var tx2 = _tx.beginTransaction(); - var parent2 = tx2.getObject(new JObjectKey("Parent"), Parent.class); - parent2.setName("John"); + var signal = new Semaphore(0); - tx.commit(); - Assertions.assertThrows(Exception.class, tx2::commit); + new Thread(() -> { + Log.warn("Thread 1"); + txm.begin(); + var newParent = alloc.create(Parent.class, new JObjectKey("Parent2")); + newParent.setLastName("John"); + curTx.putObject(newParent); + try { + signal.acquire(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + Log.warn("Thread 1 commit"); + txm.commit(); + thread1Failed.set(false); + }).start(); + + new Thread(() -> { + Log.warn("Thread 2"); + txm.begin(); + var newParent = alloc.create(Parent.class, new JObjectKey("Parent2")); + newParent.setLastName("John2"); + curTx.putObject(newParent); + try { + signal.acquire(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + Log.warn("Thread 2 commit"); + txm.commit(); + thread2Failed.set(false); + }).start(); + + signal.release(2); + + Thread.sleep(500); + + txm.begin(); + var got = curTx.getObject(Parent.class, new JObjectKey("Parent2"), LockingStrategy.READ_ONLY).orElse(null); + + if (!thread1Failed.get()) { + Assertions.assertTrue(thread2Failed.get()); + Assertions.assertEquals("John", got.getLastName()); + } else if (!thread2Failed.get()) { + Assertions.assertEquals("John2", got.getLastName()); + } else { + Assertions.fail("No thread succeeded"); } } - @Test - void editConflict() { - { - var tx = _tx.beginTransaction(); - var parent = tx.getObject(new JObjectKey("Parent"), Parent.class); - parent.setName("John"); - tx.commit(); - } - - { - var tx = _tx.beginTransaction(); - var parent = tx.getObject(new JObjectKey("Parent"), Parent.class); - parent.setName("John2"); - - var tx2 = _tx.beginTransaction(); - var parent2 = tx2.getObject(new JObjectKey("Parent"), Parent.class); - parent2.setName("John3"); - - tx.commit(); - Assertions.assertThrows(Exception.class, tx2::commit); - } - - { - var tx2 = _tx.beginTransaction(); - var parent = tx2.getObject(new JObjectKey("Parent")); - Assertions.assertInstanceOf(Parent.class, parent); - Assertions.assertEquals("John2", ((Parent) parent).getName()); - } - } - - @Test - void nestedCreate() { - { - var tx = _tx.beginTransaction(); - var parent = tx.getObject(new JObjectKey("Parent"), Parent.class); - var kid = tx.getObject(new JObjectKey("Kid"), Kid.class); - parent.setName("John"); - kid.setName("KidName"); - parent.setKidKey(kid.getKey()); - tx.commit(); - } - - { - var tx2 = _tx.beginTransaction(); - var parent = tx2.getObject(new JObjectKey("Parent")); - Assertions.assertInstanceOf(Parent.class, parent); - Assertions.assertEquals("John", ((Parent) parent).getName()); - Assertions.assertEquals("KidName", ((Parent) parent).getKid().getName()); - } - } +// @Test +// void editConflict() { +// { +// var tx = _tx.beginTransaction(); +// var parent = tx.getObject(new JObjectKey("Parent"), Parent.class); +// parent.setName("John"); +// tx.commit(); +// } +// +// { +// var tx = _tx.beginTransaction(); +// var parent = tx.getObject(new JObjectKey("Parent"), Parent.class); +// parent.setName("John2"); +// +// var tx2 = _tx.beginTransaction(); +// var parent2 = tx2.getObject(new JObjectKey("Parent"), Parent.class); +// parent2.setName("John3"); +// +// tx.commit(); +// Assertions.assertThrows(Exception.class, tx2::commit); +// } +// +// { +// var tx2 = _tx.beginTransaction(); +// var parent = tx2.getObject(new JObjectKey("Parent")); +// Assertions.assertInstanceOf(Parent.class, parent); +// Assertions.assertEquals("John2", ((Parent) parent).getName()); +// } +// } +// +// @Test +// void nestedCreate() { +// { +// var tx = _tx.beginTransaction(); +// var parent = tx.getObject(new JObjectKey("Parent"), Parent.class); +// var kid = tx.getObject(new JObjectKey("Kid"), Kid.class); +// parent.setName("John"); +// kid.setName("KidName"); +// parent.setKidKey(kid.getKey()); +// tx.commit(); +// } +// +// { +// var tx2 = _tx.beginTransaction(); +// var parent = tx2.getObject(new JObjectKey("Parent")); +// Assertions.assertInstanceOf(Parent.class, parent); +// Assertions.assertEquals("John", ((Parent) parent).getName()); +// Assertions.assertEquals("KidName", ((Parent) parent).getKid().getName()); +// } +// } } diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ChangeTrackerBase.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ChangeTrackerBase.java new file mode 100644 index 00000000..597d944c --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ChangeTrackerBase.java @@ -0,0 +1,14 @@ +package com.usatiuk.dhfs.objects.allocator; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.ObjectAllocator; +import lombok.Getter; + +public abstract class ChangeTrackerBase implements ObjectAllocator.ChangeTrackingJData { + @Getter + private boolean _modified = false; + + protected void onChange() { + _modified = true; + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataCT.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataCT.java new file mode 100644 index 00000000..3daee93b --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataCT.java @@ -0,0 +1,33 @@ +package com.usatiuk.dhfs.objects.allocator; + +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.data.Kid; +import lombok.Getter; + +public class KidDataCT extends ChangeTrackerBase implements Kid { + private final JObjectKey _key; + + @Getter + private String _name; + + @Override + public void setName(String name) { + _name = name; + onChange(); + } + + public KidDataCT(KidDataNormal normal) { + _key = normal.getKey(); + _name = normal.getName(); + } + + @Override + public JObjectKey getKey() { + return _key; + } + + @Override + public Kid wrapped() { + return this; + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataNormal.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataNormal.java new file mode 100644 index 00000000..600ec787 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataNormal.java @@ -0,0 +1,23 @@ +package com.usatiuk.dhfs.objects.allocator; + +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.data.Kid; +import lombok.Getter; +import lombok.Setter; + +import java.io.Serializable; + +public class KidDataNormal implements Kid, Serializable { + private final JObjectKey _key; + + @Getter + @Setter + private String _name; + + public KidDataNormal(JObjectKey key) {_key = key;} + + @Override + public JObjectKey getKey() { + return _key; + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataCT.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataCT.java new file mode 100644 index 00000000..240624fe --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataCT.java @@ -0,0 +1,40 @@ +package com.usatiuk.dhfs.objects.allocator; + +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.data.Parent; +import lombok.Getter; + +public class ParentDataCT extends ChangeTrackerBase implements Parent { + @Getter + private JObjectKey _name; + @Getter + private JObjectKey _kidKey; + @Getter + private String _lastName; + + public void setKidKey(JObjectKey key) { + _kidKey = key; + onChange(); + } + + public void setLastName(String lastName) { + _lastName = lastName; + onChange(); + } + + public ParentDataCT(ParentDataNormal normal) { + _name = normal.getKey(); + _kidKey = normal.getKidKey(); + _lastName = normal.getLastName(); + } + + @Override + public JObjectKey getKey() { + return _name; + } + + @Override + public Parent wrapped() { + return this; + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataNormal.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataNormal.java new file mode 100644 index 00000000..77943cf6 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataNormal.java @@ -0,0 +1,29 @@ +package com.usatiuk.dhfs.objects.allocator; + +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.data.Parent; +import lombok.Getter; +import lombok.Setter; + +import java.io.Serializable; + +public class ParentDataNormal implements Parent, Serializable { + @Getter + private JObjectKey _name; + @Getter + @Setter + private JObjectKey _kidKey; + @Getter + @Setter + private String _lastName; + + public ParentDataNormal(JObjectKey name) { + _name = name; + } + + @Override + public JObjectKey getKey() { + return _name; + } + +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/TestData.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestData.java similarity index 93% rename from dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/TestData.java rename to dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestData.java index 0bf25df1..e34db09d 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/TestData.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestData.java @@ -1,4 +1,4 @@ -package com.usatiuk.dhfs.objects.test.objs; +package com.usatiuk.dhfs.objects.allocator; import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.JObjectKey; diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestObjectAllocator.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestObjectAllocator.java new file mode 100644 index 00000000..a117e80d --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestObjectAllocator.java @@ -0,0 +1,40 @@ +package com.usatiuk.dhfs.objects.allocator; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.ObjectAllocator; +import com.usatiuk.dhfs.objects.data.Kid; +import com.usatiuk.dhfs.objects.data.Parent; +import jakarta.enterprise.context.ApplicationScoped; + +@ApplicationScoped +public class TestObjectAllocator implements ObjectAllocator { + @Override + public T create(Class type, JObjectKey key) { + if (type == Kid.class) { + return type.cast(new KidDataNormal(key)); + } else if (type == Parent.class) { + return type.cast(new ParentDataNormal(key)); + } else { + throw new IllegalArgumentException("Unknown type: " + type); + } + } + + @Override + public ChangeTrackingJData copy(T obj) { + if (obj instanceof ChangeTrackerBase) { + throw new IllegalArgumentException("Cannot copy a ChangeTrackerBase object"); + } + + return switch (obj) { + case KidDataNormal kid -> (ChangeTrackingJData) new KidDataCT(kid); + case ParentDataNormal parent -> (ChangeTrackingJData) new ParentDataCT(parent); + default -> throw new IllegalStateException("Unexpected value: " + obj); + }; + } + + @Override + public T unmodifiable(T obj) { + return obj; // TODO: + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java new file mode 100644 index 00000000..9c3df9db --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java @@ -0,0 +1,9 @@ +package com.usatiuk.dhfs.objects.data; + +import com.usatiuk.dhfs.objects.JData; + +public interface Kid extends JData { + String getName(); + + void setName(String name); +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java new file mode 100644 index 00000000..1067ea5d --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java @@ -0,0 +1,15 @@ +package com.usatiuk.dhfs.objects.data; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; + +public interface Parent extends JData { + JObjectKey getName(); + + String getLastName(); + void setLastName(String lastName); + + JObjectKey getKidKey(); + + void setKidKey(JObjectKey kid); +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/serializer/TestJDataSerializer.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/serializer/TestJDataSerializer.java new file mode 100644 index 00000000..388ffccd --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/serializer/TestJDataSerializer.java @@ -0,0 +1,23 @@ +package com.usatiuk.dhfs.objects.serializer; + + +import com.google.protobuf.ByteString; +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.ObjectSerializer; +import com.usatiuk.dhfs.utils.SerializationHelper; +import jakarta.enterprise.context.ApplicationScoped; + +import java.io.Serializable; + +@ApplicationScoped +public class TestJDataSerializer implements ObjectSerializer { + @Override + public ByteString serialize(JData obj) { + return SerializationHelper.serialize((Serializable) obj); + } + + @Override + public JData deserialize(ByteString data) { + return SerializationHelper.deserialize(data.toByteArray()); + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Kid.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Kid.java deleted file mode 100644 index b5696f25..00000000 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Kid.java +++ /dev/null @@ -1,18 +0,0 @@ -package com.usatiuk.dhfs.objects.test.objs; - -import com.usatiuk.dhfs.objects.JData; -import com.usatiuk.dhfs.objects.JObject; -import com.usatiuk.dhfs.objects.transaction.Transaction; - -public class Kid extends JObject { - - public Kid(Transaction Transaction, KidData data) { - super(Transaction, data); - } - - @Override - public JData getData() { - return _data; - } - -} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/KidData.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/KidData.java deleted file mode 100644 index bf49e1e7..00000000 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/KidData.java +++ /dev/null @@ -1,19 +0,0 @@ -package com.usatiuk.dhfs.objects.test.objs; - -import com.usatiuk.dhfs.objects.JData; -import com.usatiuk.dhfs.objects.JObject; -import com.usatiuk.dhfs.objects.transaction.Transaction; - -import java.util.function.Function; - -public interface KidData extends JData { - String getName(); - - void setName(String name); - - KidData bindCopy(); - - default Function binder(boolean isLocked) { - return jo -> new Kid(jo, bindCopy()); - } -} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/KidDataImpl.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/KidDataImpl.java deleted file mode 100644 index 48b8baf6..00000000 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/KidDataImpl.java +++ /dev/null @@ -1,28 +0,0 @@ -package com.usatiuk.dhfs.objects.test.objs; - -import com.usatiuk.dhfs.objects.JObjectKey; - -public class KidDataImpl extends TestData implements KidData { - private String _name; - - public KidDataImpl(long version, JObjectKey key, String name) { - super(version, key); - _name = name; - } - - @Override - public String getName() { - return _name; - } - - @Override - public void setName(String name) { - _name = name; - onChanged(); - } - - @Override - public KidDataImpl copy() { - return new KidDataImpl(isChanged() ? getVersion() + 1 : getVersion(), getKey(), _name); - } -} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Parent.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Parent.java deleted file mode 100644 index 7b90597e..00000000 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/Parent.java +++ /dev/null @@ -1,25 +0,0 @@ -package com.usatiuk.dhfs.objects.test.objs; - -import com.usatiuk.dhfs.objects.JData; -import com.usatiuk.dhfs.objects.JObject; -import com.usatiuk.dhfs.objects.transaction.Transaction; -import lombok.experimental.Delegate; - -public class Parent extends JObject { - @Delegate - private final ParentData _data; - - public Parent(Transaction Transaction, ParentData data) { - super(Transaction); - _data = data; - } - - @Override - public JData getData() { - return _data; - } - - public Kid getKid() { - return _jObjectInterface.getObject(_data.getKidKey(), Kid.class); - } -} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/ParentData.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/ParentData.java deleted file mode 100644 index b3f0e76f..00000000 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/ParentData.java +++ /dev/null @@ -1,14 +0,0 @@ -package com.usatiuk.dhfs.objects.test.objs; - -import com.usatiuk.dhfs.objects.JData; -import com.usatiuk.dhfs.objects.JObjectKey; - -public interface ParentData extends JData { - String getName(); - - void setName(String name); - - JObjectKey getKidKey(); - - void setKidKey(JObjectKey kid); -} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/ParentDataImpl.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/ParentDataImpl.java deleted file mode 100644 index c77a0020..00000000 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/test/objs/ParentDataImpl.java +++ /dev/null @@ -1,41 +0,0 @@ -package com.usatiuk.dhfs.objects.test.objs; - -import com.usatiuk.dhfs.objects.JObjectKey; - -public class ParentDataImpl extends TestData implements ParentData { - private String _name; - private JObjectKey _kidKey; - - public ParentDataImpl(long version, JObjectKey key, String name, JObjectKey kidKey) { - super(version, key); - _name = name; - _kidKey = kidKey; - } - - @Override - public String getName() { - return _name; - } - - @Override - public void setName(String name) { - _name = name; - onChanged(); - } - - @Override - public JObjectKey getKidKey() { - return _kidKey; - } - - @Override - public void setKidKey(JObjectKey kid) { - _kidKey = kid; - onChanged(); - } - - @Override - public ParentDataImpl copy() { - return new ParentDataImpl(isChanged() ? getVersion() + 1 : getVersion(), getKey(), _name, _kidKey); - } -} diff --git a/dhfs-parent/objects/src/test/resources/application.properties b/dhfs-parent/objects/src/test/resources/application.properties new file mode 100644 index 00000000..1b0d9d26 --- /dev/null +++ b/dhfs-parent/objects/src/test/resources/application.properties @@ -0,0 +1 @@ +dhfs.objects.persistence=memory \ No newline at end of file From 70b810545125e4c9a1d0ac95cca9589b319729b4 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Mon, 2 Dec 2024 22:30:52 +0100 Subject: [PATCH 004/105] working object edit --- .../com/usatiuk/dhfs/objects/JObjectKey.java | 4 +- .../usatiuk/dhfs/objects/JObjectManager.java | 10 +- .../dhfs/objects/transaction/TxRecord.java | 2 +- .../java/com/usatiuk/dhfs/objects/Just.java | 15 +++ .../com/usatiuk/dhfs/objects/ObjectsTest.java | 100 +++++++++++++----- .../objects/allocator/ChangeTrackerBase.java | 6 +- .../dhfs/objects/allocator/KidDataCT.java | 2 +- .../dhfs/objects/allocator/ParentDataCT.java | 2 +- .../allocator/TestObjectAllocator.java | 10 +- 9 files changed, 109 insertions(+), 42 deletions(-) create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/Just.java diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java index 9927eeee..8cc6c978 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java @@ -1,4 +1,6 @@ package com.usatiuk.dhfs.objects; -public record JObjectKey(String name) { +import java.io.Serializable; + +public record JObjectKey(String name) implements Serializable { } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index c2485f1a..3ad37c83 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -188,6 +188,13 @@ public class JObjectManager { if (current.lastWriteTx > tx.getId()) { throw new IllegalStateException("Transaction race"); } + + var newWrapper = new JDataWrapper<>(record.copy().wrapped()); + newWrapper.lock.writeLock().lock(); + if (!_objects.replace(record.copy().wrapped().getKey(), current, newWrapper)) { + throw new IllegalStateException("Object changed during transaction"); + } + toUnlock.add(newWrapper.lock.writeLock()::unlock); } else if (record instanceof TxRecord.TxObjectRecordNew created) { var wrapper = new JDataWrapper<>(created.created()); wrapper.lock.writeLock().lock(); @@ -204,8 +211,7 @@ public class JObjectManager { for (var record : toFlush) { Log.trace("Flushing " + record.toString()); - if (!record.copy().isModified()) - continue; + assert record.copy().isModified(); var obj = record.copy().wrapped(); var key = obj.getKey(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java index b5b3250d..0fc6835a 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java @@ -43,7 +43,7 @@ public class TxRecord { @Override public boolean isModified() { - return false; + return true; } }; } diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/Just.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/Just.java new file mode 100644 index 00000000..ed5e9d44 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/Just.java @@ -0,0 +1,15 @@ +package com.usatiuk.dhfs.objects; + +import java.util.concurrent.Callable; + +public abstract class Just { + public static void run(Callable callable) { + new Thread(() -> { + try { + callable.call(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }).start(); + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index b3fd7649..26af7975 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -40,51 +40,92 @@ public class ObjectsTest { } } + @Test + void editObject() { + { + txm.begin(); + var newParent = alloc.create(Parent.class, new JObjectKey("Parent3")); + newParent.setLastName("John"); + curTx.putObject(newParent); + txm.commit(); + } + + { + txm.begin(); + var parent = curTx.getObject(Parent.class, new JObjectKey("Parent3"), LockingStrategy.OPTIMISTIC).orElse(null); + Assertions.assertEquals("John", parent.getLastName()); + parent.setLastName("John2"); + txm.commit(); + } + + { + txm.begin(); + var parent = curTx.getObject(Parent.class, new JObjectKey("Parent3"), LockingStrategy.WRITE).orElse(null); + Assertions.assertEquals("John2", parent.getLastName()); + parent.setLastName("John3"); + txm.commit(); + } + + { + txm.begin(); + var parent = curTx.getObject(Parent.class, new JObjectKey("Parent3"), LockingStrategy.READ_ONLY).orElse(null); + Assertions.assertEquals("John3", parent.getLastName()); + txm.commit(); + } + } + @Test void createObjectConflict() throws InterruptedException { AtomicBoolean thread1Failed = new AtomicBoolean(true); AtomicBoolean thread2Failed = new AtomicBoolean(true); var signal = new Semaphore(0); + var signalFin = new Semaphore(2); - new Thread(() -> { - Log.warn("Thread 1"); - txm.begin(); - var newParent = alloc.create(Parent.class, new JObjectKey("Parent2")); - newParent.setLastName("John"); - curTx.putObject(newParent); - try { - signal.acquire(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - Log.warn("Thread 1 commit"); - txm.commit(); - thread1Failed.set(false); - }).start(); - new Thread(() -> { - Log.warn("Thread 2"); - txm.begin(); - var newParent = alloc.create(Parent.class, new JObjectKey("Parent2")); - newParent.setLastName("John2"); - curTx.putObject(newParent); + Just.run(() -> { try { + signalFin.acquire(); + Log.warn("Thread 1"); + txm.begin(); + var newParent = alloc.create(Parent.class, new JObjectKey("Parent2")); + newParent.setLastName("John"); + curTx.putObject(newParent); signal.acquire(); - } catch (InterruptedException e) { - throw new RuntimeException(e); + Log.warn("Thread 1 commit"); + txm.commit(); + thread1Failed.set(false); + signal.release(); + return null; + } finally { + signalFin.release(); } - Log.warn("Thread 2 commit"); - txm.commit(); - thread2Failed.set(false); - }).start(); + }); + Just.run(() -> { + try { + signalFin.acquire(); + Log.warn("Thread 2"); + txm.begin(); + var newParent = alloc.create(Parent.class, new JObjectKey("Parent2")); + newParent.setLastName("John2"); + curTx.putObject(newParent); + signal.acquire(); + Log.warn("Thread 2 commit"); + txm.commit(); + thread2Failed.set(false); + signal.release(); + return null; + } finally { + signalFin.release(); + } + }); signal.release(2); - - Thread.sleep(500); + signalFin.acquire(2); txm.begin(); var got = curTx.getObject(Parent.class, new JObjectKey("Parent2"), LockingStrategy.READ_ONLY).orElse(null); + txm.commit(); if (!thread1Failed.get()) { Assertions.assertTrue(thread2Failed.get()); @@ -94,6 +135,7 @@ public class ObjectsTest { } else { Assertions.fail("No thread succeeded"); } + } // @Test diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ChangeTrackerBase.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ChangeTrackerBase.java index 597d944c..1f0a302a 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ChangeTrackerBase.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ChangeTrackerBase.java @@ -4,9 +4,11 @@ import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.ObjectAllocator; import lombok.Getter; -public abstract class ChangeTrackerBase implements ObjectAllocator.ChangeTrackingJData { +import java.io.Serializable; + +public abstract class ChangeTrackerBase implements ObjectAllocator.ChangeTrackingJData, Serializable { @Getter - private boolean _modified = false; + private transient boolean _modified = false; protected void onChange() { _modified = true; diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataCT.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataCT.java index 3daee93b..adf74a49 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataCT.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataCT.java @@ -16,7 +16,7 @@ public class KidDataCT extends ChangeTrackerBase implements Kid { onChange(); } - public KidDataCT(KidDataNormal normal) { + public KidDataCT(Kid normal) { _key = normal.getKey(); _name = normal.getName(); } diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataCT.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataCT.java index 240624fe..106f61c7 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataCT.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataCT.java @@ -22,7 +22,7 @@ public class ParentDataCT extends ChangeTrackerBase implements Parent { onChange(); } - public ParentDataCT(ParentDataNormal normal) { + public ParentDataCT(Parent normal) { _name = normal.getKey(); _kidKey = normal.getKidKey(); _lastName = normal.getLastName(); diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestObjectAllocator.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestObjectAllocator.java index a117e80d..d18c1a83 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestObjectAllocator.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestObjectAllocator.java @@ -22,13 +22,13 @@ public class TestObjectAllocator implements ObjectAllocator { @Override public ChangeTrackingJData copy(T obj) { - if (obj instanceof ChangeTrackerBase) { - throw new IllegalArgumentException("Cannot copy a ChangeTrackerBase object"); - } +// if (obj instanceof ChangeTrackerBase) { +// throw new IllegalArgumentException("Cannot copy a ChangeTrackerBase object"); +// } return switch (obj) { - case KidDataNormal kid -> (ChangeTrackingJData) new KidDataCT(kid); - case ParentDataNormal parent -> (ChangeTrackingJData) new ParentDataCT(parent); + case Kid kid -> (ChangeTrackingJData) new KidDataCT(kid); + case Parent parent -> (ChangeTrackingJData) new ParentDataCT(parent); default -> throw new IllegalStateException("Unexpected value: " + obj); }; } From ce2822595b0fe910f45b73c5063b8e8716ba8c8a Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Mon, 2 Dec 2024 23:06:20 +0100 Subject: [PATCH 005/105] working object edit 2 --- .../usatiuk/dhfs/objects/JObjectManager.java | 23 +-- .../transaction/TransactionFactoryImpl.java | 43 +++-- .../transaction/TransactionObjectSource.java | 4 +- .../com/usatiuk/dhfs/objects/ObjectsTest.java | 169 ++++++++++++++---- .../src/test/resources/application.properties | 4 +- 5 files changed, 174 insertions(+), 69 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 3ad37c83..2ecb5a99 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -92,23 +92,16 @@ public class JObjectManager { } } + private record TransactionObjectImpl + (T data, ReadWriteLock lock) + implements TransactionObjectSource.TransactionObject {} + private final TransactionObjectSource _objSource = new TransactionObjectSource() { @Override public Optional> get(Class type, JObjectKey key) { var got = JObjectManager.this.get(type, key); if (got == null) return Optional.empty(); - return Optional.of(new TransactionObject<>() { - @Override - public T get() { - return got.getLeft(); - } - - @Override - public ReadWriteLock getLock() { - return got.getRight().lock; - } - }); - + return Optional.of(new TransactionObjectImpl<>(got.getLeft(), got.getRight().lock)); } }; @@ -131,10 +124,10 @@ public class JObjectManager { Log.trace("Processing entry " + entry.toString()); switch (entry) { case TxRecord.TxObjectRecordRead read -> { - toUnlock.add(read.original().getLock().readLock()::unlock); + toUnlock.add(read.original().lock().readLock()::unlock); } case TxRecord.TxObjectRecordCopyLock copy -> { - toUnlock.add(copy.original().getLock().writeLock()::unlock); + toUnlock.add(copy.original().lock().writeLock()::unlock); if (copy.copy().isModified()) { toFlush.add(copy); } @@ -176,7 +169,7 @@ public class JObjectManager { throw new IllegalStateException("Object not found during transaction"); } else if (current != null) { var old = switch (record) { - case TxRecord.TxObjectRecordCopyLock copy -> copy.original().get(); + case TxRecord.TxObjectRecordCopyLock copy -> copy.original().data(); case TxRecord.TxObjectRecordCopyNoLock copy -> copy.original(); default -> throw new IllegalStateException("Unexpected value: " + record); }; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index c0f8c55c..5de8255a 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -49,27 +49,40 @@ public class TransactionFactoryImpl implements TransactionFactory { switch (strategy) { case READ_ONLY: { - read.getLock().readLock().lock(); - var view = objectAllocator.unmodifiable(read.get()); + read.lock().readLock().lock(); + var view = objectAllocator.unmodifiable(read.data()); _objects.put(key, new TxRecord.TxObjectRecordRead<>(read, view)); return Optional.of(view); } - case WRITE: case OPTIMISTIC: { - var copy = objectAllocator.copy(read.get()); - - switch (strategy) { - case WRITE: - read.getLock().writeLock().lock(); - _objects.put(key, new TxRecord.TxObjectRecordCopyLock<>(read, copy)); - break; - case OPTIMISTIC: - _objects.put(key, new TxRecord.TxObjectRecordCopyNoLock<>(read.get(), copy)); - break; - } - + var copy = objectAllocator.copy(read.data()); + _objects.put(key, new TxRecord.TxObjectRecordCopyNoLock<>(read.data(), copy)); return Optional.of(copy.wrapped()); } + case WRITE: { + read.lock().writeLock().lock(); + while (true) { + try { + var readAgain = _source.get(type, key).orElse(null); + if (readAgain == null) { + read.lock().writeLock().unlock(); + return Optional.empty(); + } + if (!Objects.equals(read, readAgain)) { + read.lock().writeLock().unlock(); + read = readAgain; + read.lock().writeLock().lock(); + continue; + } + var copy = objectAllocator.copy(read.data()); + _objects.put(key, new TxRecord.TxObjectRecordCopyLock<>(read, copy)); + return Optional.of(copy.wrapped()); + } catch (Throwable e) { + read.lock().writeLock().unlock(); + throw e; + } + } + } default: throw new IllegalArgumentException("Unknown locking strategy"); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java index 55301c9c..2526bcd4 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java @@ -8,9 +8,9 @@ import java.util.concurrent.locks.ReadWriteLock; public interface TransactionObjectSource { interface TransactionObject { - T get(); + T data(); - ReadWriteLock getLock(); + ReadWriteLock lock(); } Optional> get(Class type, JObjectKey key); diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index 26af7975..777e3045 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -8,6 +8,7 @@ import jakarta.inject.Inject; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicBoolean; @@ -80,12 +81,10 @@ public class ObjectsTest { AtomicBoolean thread2Failed = new AtomicBoolean(true); var signal = new Semaphore(0); - var signalFin = new Semaphore(2); - + var latch = new CountDownLatch(2); Just.run(() -> { try { - signalFin.acquire(); Log.warn("Thread 1"); txm.begin(); var newParent = alloc.create(Parent.class, new JObjectKey("Parent2")); @@ -98,12 +97,11 @@ public class ObjectsTest { signal.release(); return null; } finally { - signalFin.release(); + latch.countDown(); } }); Just.run(() -> { try { - signalFin.acquire(); Log.warn("Thread 2"); txm.begin(); var newParent = alloc.create(Parent.class, new JObjectKey("Parent2")); @@ -116,12 +114,12 @@ public class ObjectsTest { signal.release(); return null; } finally { - signalFin.release(); + latch.countDown(); } }); signal.release(2); - signalFin.acquire(2); + latch.await(); txm.begin(); var got = curTx.getObject(Parent.class, new JObjectKey("Parent2"), LockingStrategy.READ_ONLY).orElse(null); @@ -135,37 +133,136 @@ public class ObjectsTest { } else { Assertions.fail("No thread succeeded"); } - } -// @Test -// void editConflict() { -// { -// var tx = _tx.beginTransaction(); -// var parent = tx.getObject(new JObjectKey("Parent"), Parent.class); -// parent.setName("John"); -// tx.commit(); -// } -// -// { -// var tx = _tx.beginTransaction(); -// var parent = tx.getObject(new JObjectKey("Parent"), Parent.class); -// parent.setName("John2"); -// -// var tx2 = _tx.beginTransaction(); -// var parent2 = tx2.getObject(new JObjectKey("Parent"), Parent.class); -// parent2.setName("John3"); -// -// tx.commit(); -// Assertions.assertThrows(Exception.class, tx2::commit); -// } -// -// { -// var tx2 = _tx.beginTransaction(); -// var parent = tx2.getObject(new JObjectKey("Parent")); -// Assertions.assertInstanceOf(Parent.class, parent); -// Assertions.assertEquals("John2", ((Parent) parent).getName()); -// } + @Test + void editConflict() throws InterruptedException { + { + txm.begin(); + var newParent = alloc.create(Parent.class, new JObjectKey("Parent4")); + newParent.setLastName("John3"); + curTx.putObject(newParent); + txm.commit(); + } + + AtomicBoolean thread1Failed = new AtomicBoolean(true); + AtomicBoolean thread2Failed = new AtomicBoolean(true); + + var signal = new Semaphore(0); + var latch = new CountDownLatch(2); + + Just.run(() -> { + try { + Log.warn("Thread 1"); + txm.begin(); + var parent = curTx.getObject(Parent.class, new JObjectKey("Parent4"), LockingStrategy.OPTIMISTIC).orElse(null); + parent.setLastName("John"); + signal.acquire(); + Log.warn("Thread 1 commit"); + txm.commit(); + thread1Failed.set(false); + signal.release(); + return null; + } finally { + latch.countDown(); + } + }); + Just.run(() -> { + try { + Log.warn("Thread 2"); + txm.begin(); + var parent = curTx.getObject(Parent.class, new JObjectKey("Parent4"), LockingStrategy.OPTIMISTIC).orElse(null); + parent.setLastName("John2"); + signal.acquire(); + Log.warn("Thread 2 commit"); + txm.commit(); + thread2Failed.set(false); + signal.release(); + return null; + } finally { + latch.countDown(); + } + }); + + signal.release(2); + latch.await(); + + txm.begin(); + var got = curTx.getObject(Parent.class, new JObjectKey("Parent4"), LockingStrategy.READ_ONLY).orElse(null); + txm.commit(); + + if (!thread1Failed.get()) { + Assertions.assertTrue(thread2Failed.get()); + Assertions.assertEquals("John", got.getLastName()); + } else if (!thread2Failed.get()) { + Assertions.assertEquals("John2", got.getLastName()); + } else { + Assertions.fail("No thread succeeded"); + } + + Assertions.assertTrue(thread1Failed.get() || thread2Failed.get()); + } + + @Test + void editLock() throws InterruptedException { + { + txm.begin(); + var newParent = alloc.create(Parent.class, new JObjectKey("Parent5")); + newParent.setLastName("John3"); + curTx.putObject(newParent); + txm.commit(); + } + + AtomicBoolean thread1Failed = new AtomicBoolean(true); + AtomicBoolean thread2Failed = new AtomicBoolean(true); + + var signal = new Semaphore(0); + var latch = new CountDownLatch(2); + + Just.run(() -> { + try { + Log.warn("Thread 1"); + txm.begin(); + var parent = curTx.getObject(Parent.class, new JObjectKey("Parent5"), LockingStrategy.WRITE).orElse(null); + parent.setLastName("John"); + signal.acquire(); + Log.warn("Thread 1 commit"); + txm.commit(); + thread1Failed.set(false); + signal.release(); + return null; + } finally { + latch.countDown(); + } + }); + Just.run(() -> { + try { + Log.warn("Thread 2"); + txm.begin(); + var parent = curTx.getObject(Parent.class, new JObjectKey("Parent5"), LockingStrategy.WRITE).orElse(null); + parent.setLastName("John2"); + signal.acquire(); + Log.warn("Thread 2 commit"); + txm.commit(); + thread2Failed.set(false); + signal.release(); + return null; + } finally { + latch.countDown(); + } + }); + + signal.release(2); + latch.await(); + + txm.begin(); + var got = curTx.getObject(Parent.class, new JObjectKey("Parent5"), LockingStrategy.READ_ONLY).orElse(null); + txm.commit(); + + Assertions.assertTrue(!thread1Failed.get() && !thread2Failed.get()); + Assertions.assertTrue(got.getLastName().equals("John") || got.getLastName().equals("John2")); + } + // } // // @Test diff --git a/dhfs-parent/objects/src/test/resources/application.properties b/dhfs-parent/objects/src/test/resources/application.properties index 1b0d9d26..41617308 100644 --- a/dhfs-parent/objects/src/test/resources/application.properties +++ b/dhfs-parent/objects/src/test/resources/application.properties @@ -1 +1,3 @@ -dhfs.objects.persistence=memory \ No newline at end of file +dhfs.objects.persistence=memory +quarkus.log.category."com.usatiuk".level=TRACE +quarkus.log.category."com.usatiuk".min-level=TRACE From 5af1d8a712bd8c522f24611f563a0a45005a5ea5 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Mon, 2 Dec 2024 23:22:30 +0100 Subject: [PATCH 006/105] some moving around --- dhfs-parent/objects-alloc/deployment/pom.xml | 52 ++++++++++ .../deployment/ObjectsAllocProcessor.java | 14 +++ .../alloc/test/ObjectsAllocDevModeTest.java | 23 +++++ .../objects/alloc/test/ObjectsAllocTest.java | 23 +++++ .../objects-alloc/integration-tests/pom.xml | 97 +++++++++++++++++++ .../alloc/it/ObjectsAllocResource.java | 32 ++++++ .../src/main/resources/application.properties | 0 .../alloc/it/ObjectsAllocResourceIT.java | 7 ++ .../alloc/it/ObjectsAllocResourceTest.java | 21 ++++ dhfs-parent/objects-alloc/pom.xml | 22 +++++ dhfs-parent/objects-alloc/runtime/pom.xml | 64 ++++++++++++ .../alloc/runtime}/ObjectAllocator.java | 5 +- .../resources/META-INF/quarkus-extension.yaml | 9 ++ dhfs-parent/objects-common/pom.xml | 20 ++++ .../com/usatiuk/objects/common}/JData.java | 3 +- .../usatiuk/objects/common}/JObjectKey.java | 2 +- dhfs-parent/objects/pom.xml | 16 +++ .../dhfs/objects/CurrentTransaction.java | 2 + .../usatiuk/dhfs/objects/JObjectManager.java | 5 +- .../dhfs/objects/ObjectSerializer.java | 1 + .../com/usatiuk/dhfs/objects/TxBundle.java | 2 + .../FileObjectPersistentStore.java | 3 +- .../MemoryObjectPersistentStore.java | 3 +- .../persistence/ObjectPersistentStore.java | 3 +- .../dhfs/objects/persistence/TxManifest.java | 2 +- .../dhfs/objects/transaction/Transaction.java | 4 +- .../transaction/TransactionFactoryImpl.java | 6 +- .../transaction/TransactionObjectSource.java | 4 +- .../dhfs/objects/transaction/TxRecord.java | 8 +- .../com/usatiuk/dhfs/objects/ObjectsTest.java | 2 + .../objects/allocator/ChangeTrackerBase.java | 4 +- .../dhfs/objects/allocator/KidDataCT.java | 2 +- .../dhfs/objects/allocator/KidDataNormal.java | 2 +- .../dhfs/objects/allocator/ParentDataCT.java | 2 +- .../objects/allocator/ParentDataNormal.java | 2 +- .../dhfs/objects/allocator/TestData.java | 4 +- .../allocator/TestObjectAllocator.java | 6 +- .../com/usatiuk/dhfs/objects/data/Kid.java | 2 +- .../com/usatiuk/dhfs/objects/data/Parent.java | 5 +- .../serializer/TestJDataSerializer.java | 2 +- dhfs-parent/pom.xml | 2 + 41 files changed, 451 insertions(+), 37 deletions(-) create mode 100644 dhfs-parent/objects-alloc/deployment/pom.xml create mode 100644 dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java create mode 100644 dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocDevModeTest.java create mode 100644 dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocTest.java create mode 100644 dhfs-parent/objects-alloc/integration-tests/pom.xml create mode 100644 dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/ObjectsAllocResource.java create mode 100644 dhfs-parent/objects-alloc/integration-tests/src/main/resources/application.properties create mode 100644 dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectsAllocResourceIT.java create mode 100644 dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectsAllocResourceTest.java create mode 100644 dhfs-parent/objects-alloc/pom.xml create mode 100644 dhfs-parent/objects-alloc/runtime/pom.xml rename dhfs-parent/{objects/src/main/java/com/usatiuk/dhfs/objects => objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime}/ObjectAllocator.java (76%) create mode 100644 dhfs-parent/objects-alloc/runtime/src/main/resources/META-INF/quarkus-extension.yaml create mode 100644 dhfs-parent/objects-common/pom.xml rename dhfs-parent/{objects/src/main/java/com/usatiuk/dhfs/objects => objects-common/src/main/java/com/usatiuk/objects/common}/JData.java (75%) rename dhfs-parent/{objects/src/main/java/com/usatiuk/dhfs/objects => objects-common/src/main/java/com/usatiuk/objects/common}/JObjectKey.java (72%) diff --git a/dhfs-parent/objects-alloc/deployment/pom.xml b/dhfs-parent/objects-alloc/deployment/pom.xml new file mode 100644 index 00000000..0c4dfa0d --- /dev/null +++ b/dhfs-parent/objects-alloc/deployment/pom.xml @@ -0,0 +1,52 @@ + + + 4.0.0 + + + com.usatiuk + objects-alloc-parent + 1.0-SNAPSHOT + + objects-alloc-deployment + DHFS objects allocation - Deployment + + + + io.quarkus + quarkus-arc-deployment + + + com.usatiuk + objects-alloc + ${project.version} + + + io.quarkus + quarkus-junit5-internal + test + + + + + + + maven-compiler-plugin + + + default-compile + + + + io.quarkus + quarkus-extension-processor + ${quarkus.platform.version} + + + + + + + + + diff --git a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java new file mode 100644 index 00000000..22db6d10 --- /dev/null +++ b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java @@ -0,0 +1,14 @@ +package com.usatiuk.objects.alloc.deployment; + +import io.quarkus.deployment.annotations.BuildStep; +import io.quarkus.deployment.builditem.FeatureBuildItem; + +class ObjectsAllocProcessor { + + private static final String FEATURE = "objects-alloc"; + + @BuildStep + FeatureBuildItem feature() { + return new FeatureBuildItem(FEATURE); + } +} diff --git a/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocDevModeTest.java b/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocDevModeTest.java new file mode 100644 index 00000000..526a143c --- /dev/null +++ b/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocDevModeTest.java @@ -0,0 +1,23 @@ +package com.usatiuk.objects.alloc.test; + +import org.jboss.shrinkwrap.api.ShrinkWrap; +import org.jboss.shrinkwrap.api.spec.JavaArchive; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; + +import io.quarkus.test.QuarkusDevModeTest; + +public class ObjectsAllocDevModeTest { + + // Start hot reload (DevMode) test with your extension loaded + @RegisterExtension + static final QuarkusDevModeTest devModeTest = new QuarkusDevModeTest() + .setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class)); + + @Test + public void writeYourOwnDevModeTest() { + // Write your dev mode tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-hot-reload for more information + Assertions.assertTrue(true, "Add dev mode assertions to " + getClass().getName()); + } +} diff --git a/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocTest.java b/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocTest.java new file mode 100644 index 00000000..14d144c9 --- /dev/null +++ b/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocTest.java @@ -0,0 +1,23 @@ +package com.usatiuk.objects.alloc.test; + +import org.jboss.shrinkwrap.api.ShrinkWrap; +import org.jboss.shrinkwrap.api.spec.JavaArchive; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; + +import io.quarkus.test.QuarkusUnitTest; + +public class ObjectsAllocTest { + + // Start unit test with your extension loaded + @RegisterExtension + static final QuarkusUnitTest unitTest = new QuarkusUnitTest() + .setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class)); + + @Test + public void writeYourOwnUnitTest() { + // Write your unit tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-extensions for more information + Assertions.assertTrue(true, "Add some assertions to " + getClass().getName()); + } +} diff --git a/dhfs-parent/objects-alloc/integration-tests/pom.xml b/dhfs-parent/objects-alloc/integration-tests/pom.xml new file mode 100644 index 00000000..d68f96c5 --- /dev/null +++ b/dhfs-parent/objects-alloc/integration-tests/pom.xml @@ -0,0 +1,97 @@ + + + 4.0.0 + + + com.usatiuk + objects-alloc-parent + 1.0.0-SNAPSHOT + + objects-alloc-integration-tests + DHFS objects allocation - Integration Tests + + + true + + + + + io.quarkus + quarkus-rest + + + com.usatiuk + objects-alloc + ${project.version} + + + io.quarkus + quarkus-junit5 + test + + + io.rest-assured + rest-assured + test + + + + + + + io.quarkus + quarkus-maven-plugin + + + + build + + + + + + maven-failsafe-plugin + + + + integration-test + verify + + + + + + ${project.build.directory}/${project.build.finalName}-runner + org.jboss.logmanager.LogManager + ${maven.home} + + + + + + + + + native-image + + + native + + + + + + maven-surefire-plugin + + ${native.surefire.skip} + + + + + + false + true + + + + diff --git a/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/ObjectsAllocResource.java b/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/ObjectsAllocResource.java new file mode 100644 index 00000000..9a3c5942 --- /dev/null +++ b/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/ObjectsAllocResource.java @@ -0,0 +1,32 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one or more +* contributor license agreements. See the NOTICE file distributed with +* this work for additional information regarding copyright ownership. +* The ASF licenses this file to You under the Apache License, Version 2.0 +* (the "License"); you may not use this file except in compliance with +* the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ +package com.usatiuk.objects.alloc.it; + +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.Path; + +@Path("/objects-alloc") +@ApplicationScoped +public class ObjectsAllocResource { + // add some rest methods here + + @GET + public String hello() { + return "Hello objects-alloc"; + } +} diff --git a/dhfs-parent/objects-alloc/integration-tests/src/main/resources/application.properties b/dhfs-parent/objects-alloc/integration-tests/src/main/resources/application.properties new file mode 100644 index 00000000..e69de29b diff --git a/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectsAllocResourceIT.java b/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectsAllocResourceIT.java new file mode 100644 index 00000000..1625c279 --- /dev/null +++ b/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectsAllocResourceIT.java @@ -0,0 +1,7 @@ +package com.usatiuk.objects.alloc.it; + +import io.quarkus.test.junit.QuarkusIntegrationTest; + +@QuarkusIntegrationTest +public class ObjectsAllocResourceIT extends ObjectsAllocResourceTest { +} diff --git a/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectsAllocResourceTest.java b/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectsAllocResourceTest.java new file mode 100644 index 00000000..90f51cd4 --- /dev/null +++ b/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectsAllocResourceTest.java @@ -0,0 +1,21 @@ +package com.usatiuk.objects.alloc.it; + +import static io.restassured.RestAssured.given; +import static org.hamcrest.Matchers.is; + +import org.junit.jupiter.api.Test; + +import io.quarkus.test.junit.QuarkusTest; + +@QuarkusTest +public class ObjectsAllocResourceTest { + + @Test + public void testHelloEndpoint() { + given() + .when().get("/objects-alloc") + .then() + .statusCode(200) + .body(is("Hello objects-alloc")); + } +} diff --git a/dhfs-parent/objects-alloc/pom.xml b/dhfs-parent/objects-alloc/pom.xml new file mode 100644 index 00000000..37e17d36 --- /dev/null +++ b/dhfs-parent/objects-alloc/pom.xml @@ -0,0 +1,22 @@ + + + 4.0.0 + + + com.usatiuk.dhfs + parent + 1.0-SNAPSHOT + + + com.usatiuk + objects-alloc-parent + 1.0-SNAPSHOT + pom + DHFS objects allocation - Parent + + + deployment + runtime + + + diff --git a/dhfs-parent/objects-alloc/runtime/pom.xml b/dhfs-parent/objects-alloc/runtime/pom.xml new file mode 100644 index 00000000..c7fc160b --- /dev/null +++ b/dhfs-parent/objects-alloc/runtime/pom.xml @@ -0,0 +1,64 @@ + + + 4.0.0 + + + com.usatiuk + objects-alloc-parent + 1.0-SNAPSHOT + + objects-alloc + DHFS objects allocation - Runtime + + + + io.quarkus + quarkus-arc + + + com.usatiuk.dhfs + objects-common + 1.0-SNAPSHOT + + + + + + + io.quarkus + quarkus-extension-maven-plugin + ${quarkus.platform.version} + + + compile + + extension-descriptor + + + ${project.groupId}:${project.artifactId}-deployment:${project.version} + + + + + + + maven-compiler-plugin + + + default-compile + + + + io.quarkus + quarkus-extension-processor + ${quarkus.platform.version} + + + + + + + + + diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectAllocator.java b/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ObjectAllocator.java similarity index 76% rename from dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectAllocator.java rename to dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ObjectAllocator.java index abc04ca3..f328d708 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectAllocator.java +++ b/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ObjectAllocator.java @@ -1,4 +1,7 @@ -package com.usatiuk.dhfs.objects; +package com.usatiuk.objects.alloc.runtime; + +import com.usatiuk.objects.common.JData; +import com.usatiuk.objects.common.JObjectKey; public interface ObjectAllocator { T create(Class type, JObjectKey key); diff --git a/dhfs-parent/objects-alloc/runtime/src/main/resources/META-INF/quarkus-extension.yaml b/dhfs-parent/objects-alloc/runtime/src/main/resources/META-INF/quarkus-extension.yaml new file mode 100644 index 00000000..202ecef3 --- /dev/null +++ b/dhfs-parent/objects-alloc/runtime/src/main/resources/META-INF/quarkus-extension.yaml @@ -0,0 +1,9 @@ +name: DHFS objects allocation +#description: Do something useful. +metadata: +# keywords: +# - objects-alloc +# guide: ... # To create and publish this guide, see https://github.com/quarkiverse/quarkiverse/wiki#documenting-your-extension +# categories: +# - "miscellaneous" +# status: "preview" diff --git a/dhfs-parent/objects-common/pom.xml b/dhfs-parent/objects-common/pom.xml new file mode 100644 index 00000000..d13878fc --- /dev/null +++ b/dhfs-parent/objects-common/pom.xml @@ -0,0 +1,20 @@ + + + 4.0.0 + + com.usatiuk.dhfs + parent + 1.0-SNAPSHOT + + + objects-common + + + 21 + 21 + UTF-8 + + + \ No newline at end of file diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JData.java b/dhfs-parent/objects-common/src/main/java/com/usatiuk/objects/common/JData.java similarity index 75% rename from dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JData.java rename to dhfs-parent/objects-common/src/main/java/com/usatiuk/objects/common/JData.java index f032b27b..5109336b 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JData.java +++ b/dhfs-parent/objects-common/src/main/java/com/usatiuk/objects/common/JData.java @@ -1,5 +1,6 @@ -package com.usatiuk.dhfs.objects; +package com.usatiuk.objects.common; +// TODO: This could be maybe moved to a separate module? // The base class for JObject data // Only one instance of this exists per key, the instance in the manager is canonical // When committing a transaction, the instance is checked against it, if it isn't the same, a race occurred. diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java b/dhfs-parent/objects-common/src/main/java/com/usatiuk/objects/common/JObjectKey.java similarity index 72% rename from dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java rename to dhfs-parent/objects-common/src/main/java/com/usatiuk/objects/common/JObjectKey.java index 8cc6c978..74ca3fb2 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java +++ b/dhfs-parent/objects-common/src/main/java/com/usatiuk/objects/common/JObjectKey.java @@ -1,4 +1,4 @@ -package com.usatiuk.dhfs.objects; +package com.usatiuk.objects.common; import java.io.Serializable; diff --git a/dhfs-parent/objects/pom.xml b/dhfs-parent/objects/pom.xml index 49970b92..0e8ad359 100644 --- a/dhfs-parent/objects/pom.xml +++ b/dhfs-parent/objects/pom.xml @@ -64,6 +64,22 @@ supportlib 1.0-SNAPSHOT + + com.usatiuk + objects-alloc + 1.0-SNAPSHOT + + + com.usatiuk + objects-alloc-deployment + 1.0-SNAPSHOT + provided + + + com.usatiuk.dhfs + objects-common + 1.0-SNAPSHOT + diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java index fe309206..3efba2c7 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java @@ -2,6 +2,8 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.transaction.LockingStrategy; import com.usatiuk.dhfs.objects.transaction.Transaction; +import com.usatiuk.objects.common.JData; +import com.usatiuk.objects.common.JObjectKey; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 2ecb5a99..16b2a0e8 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -8,6 +8,9 @@ import com.usatiuk.dhfs.objects.transaction.TransactionPrivate; import com.usatiuk.dhfs.objects.transaction.TxRecord; import com.usatiuk.dhfs.utils.DataLocker; import com.usatiuk.dhfs.utils.VoidFn; +import com.usatiuk.objects.common.JData; +import com.usatiuk.objects.common.JObjectKey; +import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; @@ -21,7 +24,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; -// Manages all access to JData objects. +// Manages all access to com.usatiuk.objects.common.JData objects. // In particular, it serves as a source of truth for what is committed to the backing storage. // All data goes through it, it is responsible for transaction atomicity @ApplicationScoped diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectSerializer.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectSerializer.java index 103d0611..34973a62 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectSerializer.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectSerializer.java @@ -1,6 +1,7 @@ package com.usatiuk.dhfs.objects; import com.google.protobuf.ByteString; +import com.usatiuk.objects.common.JData; public interface ObjectSerializer { ByteString serialize(T obj); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java index e64617a1..1ef7419b 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java @@ -1,5 +1,7 @@ package com.usatiuk.dhfs.objects; +import com.usatiuk.objects.common.JData; + public interface TxBundle { long getId(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java index ed3cbef7..c84bdd31 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java @@ -2,13 +2,12 @@ package com.usatiuk.dhfs.objects.persistence; import com.google.protobuf.ByteString; import com.google.protobuf.UnsafeByteOperations; -import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.objects.common.JObjectKey; import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer; import com.usatiuk.dhfs.utils.ByteUtils; import com.usatiuk.dhfs.utils.SerializationHelper; import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; import io.grpc.Status; -import io.quarkus.arc.lookup.LookupIfProperty; import io.quarkus.arc.properties.IfBuildProperty; import io.quarkus.logging.Log; import io.quarkus.runtime.ShutdownEvent; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java index 41af2572..42fe08ec 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java @@ -1,8 +1,7 @@ package com.usatiuk.dhfs.objects.persistence; import com.google.protobuf.ByteString; -import com.usatiuk.dhfs.objects.JObjectKey; -import io.quarkus.arc.lookup.LookupIfProperty; +import com.usatiuk.objects.common.JObjectKey; import io.quarkus.arc.properties.IfBuildProperty; import jakarta.enterprise.context.ApplicationScoped; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java index 79750a69..01b61d2e 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java @@ -1,8 +1,7 @@ package com.usatiuk.dhfs.objects.persistence; import com.google.protobuf.ByteString; -import com.usatiuk.dhfs.objects.JData; -import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.objects.common.JObjectKey; import javax.annotation.Nonnull; import java.util.Collection; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java index 3a91f71e..34190132 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.objects.persistence; -import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.objects.common.JObjectKey; import java.io.Serializable; import java.util.List; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java index f9220a7d..fbcd3c0a 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.transaction; -import com.usatiuk.dhfs.objects.JData; -import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.objects.common.JData; +import com.usatiuk.objects.common.JObjectKey; import java.util.Optional; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index 5de8255a..563e84b2 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -1,8 +1,8 @@ package com.usatiuk.dhfs.objects.transaction; -import com.usatiuk.dhfs.objects.JData; -import com.usatiuk.dhfs.objects.JObjectKey; -import com.usatiuk.dhfs.objects.ObjectAllocator; +import com.usatiuk.objects.common.JData; +import com.usatiuk.objects.common.JObjectKey; +import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; import lombok.AccessLevel; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java index 2526bcd4..5db79bb3 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.transaction; -import com.usatiuk.dhfs.objects.JData; -import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.objects.common.JData; +import com.usatiuk.objects.common.JObjectKey; import java.util.Optional; import java.util.concurrent.locks.ReadWriteLock; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java index 0fc6835a..900d67e9 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java @@ -1,8 +1,8 @@ package com.usatiuk.dhfs.objects.transaction; -import com.usatiuk.dhfs.objects.JData; -import com.usatiuk.dhfs.objects.JObjectKey; -import com.usatiuk.dhfs.objects.ObjectAllocator; +import com.usatiuk.objects.common.JData; +import com.usatiuk.objects.common.JObjectKey; +import com.usatiuk.objects.alloc.runtime.ObjectAllocator; public class TxRecord { public interface TxObjectRecord { @@ -50,7 +50,7 @@ public class TxRecord { } public record TxObjectRecordCopyLock(TransactionObjectSource.TransactionObject original, - ObjectAllocator.ChangeTrackingJData copy) + ObjectAllocator.ChangeTrackingJData copy) implements TxObjectRecordWrite { @Override public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index 777e3045..ec7bf92b 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -2,6 +2,8 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.data.Parent; import com.usatiuk.dhfs.objects.transaction.LockingStrategy; +import com.usatiuk.objects.common.JObjectKey; +import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import io.quarkus.logging.Log; import io.quarkus.test.junit.QuarkusTest; import jakarta.inject.Inject; diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ChangeTrackerBase.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ChangeTrackerBase.java index 1f0a302a..3a46d1d7 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ChangeTrackerBase.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ChangeTrackerBase.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.allocator; -import com.usatiuk.dhfs.objects.JData; -import com.usatiuk.dhfs.objects.ObjectAllocator; +import com.usatiuk.objects.alloc.runtime.ObjectAllocator; +import com.usatiuk.objects.common.JData; import lombok.Getter; import java.io.Serializable; diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataCT.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataCT.java index adf74a49..9c962e30 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataCT.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataCT.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.objects.allocator; -import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.objects.common.JObjectKey; import com.usatiuk.dhfs.objects.data.Kid; import lombok.Getter; diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataNormal.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataNormal.java index 600ec787..145e41de 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataNormal.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataNormal.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.objects.allocator; -import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.objects.common.JObjectKey; import com.usatiuk.dhfs.objects.data.Kid; import lombok.Getter; import lombok.Setter; diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataCT.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataCT.java index 106f61c7..a0920494 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataCT.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataCT.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.objects.allocator; -import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.objects.common.JObjectKey; import com.usatiuk.dhfs.objects.data.Parent; import lombok.Getter; diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataNormal.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataNormal.java index 77943cf6..a40256ac 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataNormal.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataNormal.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.allocator; -import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.data.Parent; +import com.usatiuk.objects.common.JObjectKey; import lombok.Getter; import lombok.Setter; diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestData.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestData.java index e34db09d..a35da6ae 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestData.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestData.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.allocator; -import com.usatiuk.dhfs.objects.JData; -import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.objects.common.JData; +import com.usatiuk.objects.common.JObjectKey; public abstract class TestData implements JData { private boolean _changed = false; diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestObjectAllocator.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestObjectAllocator.java index d18c1a83..99713b5c 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestObjectAllocator.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestObjectAllocator.java @@ -1,10 +1,10 @@ package com.usatiuk.dhfs.objects.allocator; -import com.usatiuk.dhfs.objects.JData; -import com.usatiuk.dhfs.objects.JObjectKey; -import com.usatiuk.dhfs.objects.ObjectAllocator; +import com.usatiuk.objects.common.JData; +import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import com.usatiuk.dhfs.objects.data.Kid; import com.usatiuk.dhfs.objects.data.Parent; +import com.usatiuk.objects.common.JObjectKey; import jakarta.enterprise.context.ApplicationScoped; @ApplicationScoped diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java index 9c3df9db..acb9a1cc 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.objects.data; -import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.objects.common.JData; public interface Kid extends JData { String getName(); diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java index 1067ea5d..cc094077 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java @@ -1,12 +1,13 @@ package com.usatiuk.dhfs.objects.data; -import com.usatiuk.dhfs.objects.JData; -import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.objects.common.JData; +import com.usatiuk.objects.common.JObjectKey; public interface Parent extends JData { JObjectKey getName(); String getLastName(); + void setLastName(String lastName); JObjectKey getKidKey(); diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/serializer/TestJDataSerializer.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/serializer/TestJDataSerializer.java index 388ffccd..56cbe6f5 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/serializer/TestJDataSerializer.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/serializer/TestJDataSerializer.java @@ -2,9 +2,9 @@ package com.usatiuk.dhfs.objects.serializer; import com.google.protobuf.ByteString; -import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.ObjectSerializer; import com.usatiuk.dhfs.utils.SerializationHelper; +import com.usatiuk.objects.common.JData; import jakarta.enterprise.context.ApplicationScoped; import java.io.Serializable; diff --git a/dhfs-parent/pom.xml b/dhfs-parent/pom.xml index 3140d94e..a54dda6e 100644 --- a/dhfs-parent/pom.xml +++ b/dhfs-parent/pom.xml @@ -17,6 +17,8 @@ autoprotomap objects utils + objects-alloc + objects-common From 3370df6d2cd45d6e40159d6f1f35cef1ce15e80a Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Tue, 3 Dec 2024 00:43:27 +0100 Subject: [PATCH 007/105] object alloc dump --- dhfs-parent/objects-alloc/deployment/pom.xml | 8 + .../alloc/deployment/JDataFieldInfo.java | 6 + .../alloc/deployment/JDataIndexBuildItem.java | 12 ++ .../objects/alloc/deployment/JDataInfo.java | 8 + .../deployment/ObjectsAllocProcessor.java | 137 +++++++++++++++++- .../objects/alloc/test/ObjectsAllocTest.java | 7 +- .../objects-alloc/integration-tests/pom.xml | 9 +- .../src/main/resources/application.properties | 1 + .../alloc/it/ObjectsAllocResourceIT.java | 7 - .../alloc/it/ObjectsAllocResourceTest.java | 22 +-- .../objects/alloc/it/TestJDataAssorted.java | 18 +++ .../objects/alloc/it/TestJDataEmpty.java | 6 + dhfs-parent/objects-alloc/pom.xml | 1 + .../alloc/runtime/ChangeTrackerBase.java | 17 +++ .../src/main/resources/application.properties | 2 + 15 files changed, 235 insertions(+), 26 deletions(-) create mode 100644 dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataFieldInfo.java create mode 100644 dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataIndexBuildItem.java create mode 100644 dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataInfo.java delete mode 100644 dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectsAllocResourceIT.java create mode 100644 dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/TestJDataAssorted.java create mode 100644 dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/TestJDataEmpty.java create mode 100644 dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ChangeTrackerBase.java create mode 100644 dhfs-parent/objects/src/main/resources/application.properties diff --git a/dhfs-parent/objects-alloc/deployment/pom.xml b/dhfs-parent/objects-alloc/deployment/pom.xml index 0c4dfa0d..e44655f5 100644 --- a/dhfs-parent/objects-alloc/deployment/pom.xml +++ b/dhfs-parent/objects-alloc/deployment/pom.xml @@ -26,6 +26,14 @@ quarkus-junit5-internal test + + org.apache.commons + commons-collections4 + + + org.apache.commons + commons-lang3 + diff --git a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataFieldInfo.java b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataFieldInfo.java new file mode 100644 index 00000000..bdc8f3dc --- /dev/null +++ b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataFieldInfo.java @@ -0,0 +1,6 @@ +package com.usatiuk.objects.alloc.deployment; + +import org.jboss.jandex.Type; + +public record JDataFieldInfo(String name, Type type) { +} diff --git a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataIndexBuildItem.java b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataIndexBuildItem.java new file mode 100644 index 00000000..a94e26fb --- /dev/null +++ b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataIndexBuildItem.java @@ -0,0 +1,12 @@ +package com.usatiuk.objects.alloc.deployment; + +import io.quarkus.builder.item.MultiBuildItem; +import org.jboss.jandex.ClassInfo; + +public final class JDataIndexBuildItem extends MultiBuildItem { + public final ClassInfo jData; + + public JDataIndexBuildItem(ClassInfo jData) { + this.jData = jData; + } +} diff --git a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataInfo.java b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataInfo.java new file mode 100644 index 00000000..c46b9de2 --- /dev/null +++ b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataInfo.java @@ -0,0 +1,8 @@ +package com.usatiuk.objects.alloc.deployment; + +import org.jboss.jandex.ClassInfo; + +import java.util.Map; + +public record JDataInfo(ClassInfo klass, Map fields) { +} diff --git a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java index 22db6d10..baffc03e 100644 --- a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java +++ b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java @@ -1,14 +1,143 @@ package com.usatiuk.objects.alloc.deployment; +import com.usatiuk.objects.alloc.runtime.ObjectAllocator; +import com.usatiuk.objects.common.JData; +import com.usatiuk.objects.common.JObjectKey; +import io.quarkus.arc.deployment.GeneratedBeanBuildItem; +import io.quarkus.arc.deployment.GeneratedBeanGizmoAdaptor; +import io.quarkus.deployment.annotations.BuildProducer; import io.quarkus.deployment.annotations.BuildStep; -import io.quarkus.deployment.builditem.FeatureBuildItem; +import io.quarkus.deployment.builditem.ApplicationIndexBuildItem; +import io.quarkus.deployment.builditem.GeneratedClassBuildItem; +import io.quarkus.gizmo.*; +import jakarta.inject.Singleton; +import org.jboss.jandex.ClassInfo; +import org.jboss.jandex.MethodInfo; +import org.jboss.jandex.Type; + +import java.util.Collections; +import java.util.List; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.stream.Collectors; class ObjectsAllocProcessor { + @BuildStep + void collectJDatas(BuildProducer producer, ApplicationIndexBuildItem jandex) { + var jdatas = jandex.getIndex().getAllKnownSubinterfaces(JData.class); - private static final String FEATURE = "objects-alloc"; + // Collect the leaves + for (var jdata : jdatas) { + System.out.println("Found JData: " + jdata.name()); + if (jandex.getIndex().getAllKnownSubinterfaces(jdata.name()).isEmpty()) { + System.out.println("Found JData leaf: " + jdata.name()); + producer.produce(new JDataIndexBuildItem(jdata)); + } + } + } + + + JDataInfo collectData(JDataIndexBuildItem item) { + var methodNameToInfo = item.jData.methods().stream() + .collect(Collectors.toUnmodifiableMap(MethodInfo::name, x -> x)); + + var reducableSet = new TreeSet<>(methodNameToInfo.keySet()); + + var fields = new TreeMap(); + + // Find pairs of getters and setters + // FIXME: + while (!reducableSet.isEmpty()) { + var name = reducableSet.first(); + reducableSet.remove(name); + if (name.startsWith("get")) { + var setterName = "set" + name.substring(3); + if (reducableSet.contains(setterName)) { + reducableSet.remove(setterName); + } else { + throw new RuntimeException("Missing setter for getter: " + name); + } + + var getter = methodNameToInfo.get(name); + var setter = methodNameToInfo.get(setterName); + + if (!getter.returnType().equals(setter.parameters().getFirst().type())) { + throw new RuntimeException("Getter and setter types do not match: " + name); + } + + var variableName = name.substring(3, 4).toLowerCase() + name.substring(4); + + fields.put(variableName, new JDataFieldInfo(variableName, getter.returnType())); + } else { + throw new RuntimeException("Unknown method name: " + name); + } + } + + return new JDataInfo(item.jData, Collections.unmodifiableMap(fields)); + } + + interface TypeFunction { + void apply(Type type); + } + + void matchClass(BytecodeCreator bytecodeCreator, ResultHandle value, List types, TypeFunction fn) { +// bytecodeCreator.insta + } + + interface ClassTagFunction { + void apply(ClassInfo type, BytecodeCreator branch); + } + + // Returns false branch + BytecodeCreator matchClassTag(BytecodeCreator bytecodeCreator, ResultHandle toMatch, List types, ClassTagFunction fn) { + if (types.isEmpty()) { + return bytecodeCreator; + } + + var eq = bytecodeCreator.invokeVirtualMethod( + MethodDescriptor.ofMethod(Object.class, "equals", boolean.class, Object.class), + toMatch, + bytecodeCreator.loadClass(types.getFirst()) + ); + + var cmp = bytecodeCreator.ifTrue(eq); + fn.apply(types.getFirst(), cmp.trueBranch()); + return matchClassTag(cmp.falseBranch(), toMatch, types.subList(1, types.size()), fn); + } @BuildStep - FeatureBuildItem feature() { - return new FeatureBuildItem(FEATURE); + void makeJDataThingy(List jDataItems, + BuildProducer generatedBeans, + BuildProducer generatedClasses) { + + var data = jDataItems.stream().map(this::collectData).collect(Collectors.toUnmodifiableMap(JDataInfo::klass, x -> x)); + var classes = data.keySet().stream().map(ClassInfo::asClass).toList(); + + var gizmoAdapter = new GeneratedBeanGizmoAdaptor(generatedBeans); + + try (ClassCreator classCreator = ClassCreator.builder() + .className("com.usatiuk.objects.alloc.generated.ObjectAllocatorImpl") + .interfaces(ObjectAllocator.class) + .classOutput(gizmoAdapter) + .build()) { + + classCreator.addAnnotation(Singleton.class); + + try (MethodCreator methodCreator = classCreator.getMethodCreator("create", JData.class, Class.class, JObjectKey.class)) { + matchClassTag(methodCreator, methodCreator.getMethodParam(0), classes, (type, branch) -> { + branch.returnValue(branch.newInstance(MethodDescriptor.ofConstructor(type.toString(), JObjectKey.class), branch.getMethodParam(1))); + }); + methodCreator.throwException(IllegalArgumentException.class, "Unknown type"); + } + + try (MethodCreator methodCreator = classCreator.getMethodCreator("copy", ObjectAllocator.ChangeTrackingJData.class, JData.class)) { + methodCreator.returnValue(methodCreator.loadNull()); + } + + try (MethodCreator methodCreator = classCreator.getMethodCreator("unmodifiable", JData.class, JData.class)) { + methodCreator.returnValue(methodCreator.loadNull()); + } + } + } } diff --git a/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocTest.java b/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocTest.java index 14d144c9..637d504f 100644 --- a/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocTest.java +++ b/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocTest.java @@ -1,19 +1,20 @@ package com.usatiuk.objects.alloc.test; +import com.usatiuk.objects.alloc.runtime.ObjectAllocator; +import io.quarkus.test.QuarkusUnitTest; +import jakarta.inject.Inject; import org.jboss.shrinkwrap.api.ShrinkWrap; import org.jboss.shrinkwrap.api.spec.JavaArchive; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.RegisterExtension; -import io.quarkus.test.QuarkusUnitTest; - public class ObjectsAllocTest { // Start unit test with your extension loaded @RegisterExtension static final QuarkusUnitTest unitTest = new QuarkusUnitTest() - .setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class)); + .setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class)); @Test public void writeYourOwnUnitTest() { diff --git a/dhfs-parent/objects-alloc/integration-tests/pom.xml b/dhfs-parent/objects-alloc/integration-tests/pom.xml index d68f96c5..04f38047 100644 --- a/dhfs-parent/objects-alloc/integration-tests/pom.xml +++ b/dhfs-parent/objects-alloc/integration-tests/pom.xml @@ -5,7 +5,7 @@ com.usatiuk objects-alloc-parent - 1.0.0-SNAPSHOT + 1.0-SNAPSHOT objects-alloc-integration-tests DHFS objects allocation - Integration Tests @@ -24,6 +24,11 @@ objects-alloc ${project.version} + + com.usatiuk + objects-alloc-deployment + ${project.version} + io.quarkus quarkus-junit5 @@ -45,6 +50,8 @@ build + generate-code + generate-code-tests diff --git a/dhfs-parent/objects-alloc/integration-tests/src/main/resources/application.properties b/dhfs-parent/objects-alloc/integration-tests/src/main/resources/application.properties index e69de29b..b1645fe9 100644 --- a/dhfs-parent/objects-alloc/integration-tests/src/main/resources/application.properties +++ b/dhfs-parent/objects-alloc/integration-tests/src/main/resources/application.properties @@ -0,0 +1 @@ +quarkus.package.jar.decompiler.enabled=true \ No newline at end of file diff --git a/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectsAllocResourceIT.java b/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectsAllocResourceIT.java deleted file mode 100644 index 1625c279..00000000 --- a/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectsAllocResourceIT.java +++ /dev/null @@ -1,7 +0,0 @@ -package com.usatiuk.objects.alloc.it; - -import io.quarkus.test.junit.QuarkusIntegrationTest; - -@QuarkusIntegrationTest -public class ObjectsAllocResourceIT extends ObjectsAllocResourceTest { -} diff --git a/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectsAllocResourceTest.java b/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectsAllocResourceTest.java index 90f51cd4..dbcec91c 100644 --- a/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectsAllocResourceTest.java +++ b/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectsAllocResourceTest.java @@ -1,21 +1,21 @@ package com.usatiuk.objects.alloc.it; -import static io.restassured.RestAssured.given; -import static org.hamcrest.Matchers.is; - -import org.junit.jupiter.api.Test; - +import com.usatiuk.objects.alloc.runtime.ObjectAllocator; +import com.usatiuk.objects.common.JObjectKey; import io.quarkus.test.junit.QuarkusTest; +import jakarta.inject.Inject; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; @QuarkusTest public class ObjectsAllocResourceTest { + @Inject + ObjectAllocator objectAllocator; @Test - public void testHelloEndpoint() { - given() - .when().get("/objects-alloc") - .then() - .statusCode(200) - .body(is("Hello objects-alloc")); + void testCreateObject() { + var newObject = objectAllocator.create(TestJDataEmpty.class, new JObjectKey("TestJDataEmptyKey")); + Assertions.assertNotNull(newObject); + Assertions.assertEquals("TestJDataEmptyKey", newObject.getKey().name()); } } diff --git a/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/TestJDataAssorted.java b/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/TestJDataAssorted.java new file mode 100644 index 00000000..cef5e96e --- /dev/null +++ b/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/TestJDataAssorted.java @@ -0,0 +1,18 @@ +package com.usatiuk.objects.alloc.it; + +import com.usatiuk.objects.common.JData; +import com.usatiuk.objects.common.JObjectKey; + +interface TestJDataAssorted extends JData { + String getLastName(); + + void setLastName(String lastName); + + long getAge(); + + void setAge(long age); + + JObjectKey getKidKey(); + + void setKidKey(JObjectKey kid); +} diff --git a/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/TestJDataEmpty.java b/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/TestJDataEmpty.java new file mode 100644 index 00000000..62de3b9c --- /dev/null +++ b/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/TestJDataEmpty.java @@ -0,0 +1,6 @@ +package com.usatiuk.objects.alloc.it; + +import com.usatiuk.objects.common.JData; + +public interface TestJDataEmpty extends JData { +} diff --git a/dhfs-parent/objects-alloc/pom.xml b/dhfs-parent/objects-alloc/pom.xml index 37e17d36..a6dc399f 100644 --- a/dhfs-parent/objects-alloc/pom.xml +++ b/dhfs-parent/objects-alloc/pom.xml @@ -17,6 +17,7 @@ deployment runtime + integration-tests diff --git a/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ChangeTrackerBase.java b/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ChangeTrackerBase.java new file mode 100644 index 00000000..9b2c2c76 --- /dev/null +++ b/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ChangeTrackerBase.java @@ -0,0 +1,17 @@ +package com.usatiuk.objects.alloc.runtime; + +import com.usatiuk.objects.common.JData; + +import java.io.Serializable; + +abstract class ChangeTrackerBase implements ObjectAllocator.ChangeTrackingJData, Serializable { + private transient boolean _modified = false; + + public boolean isModified() { + return _modified; + } + + protected void onChange() { + _modified = true; + } +} diff --git a/dhfs-parent/objects/src/main/resources/application.properties b/dhfs-parent/objects/src/main/resources/application.properties new file mode 100644 index 00000000..a9c2019e --- /dev/null +++ b/dhfs-parent/objects/src/main/resources/application.properties @@ -0,0 +1,2 @@ +dhfs.objects.persistence=files +quarkus.package.jar.decompiler.enabled=true From c9c5306e8250985675fed1fca0450f84e0703291 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Tue, 3 Dec 2024 23:28:36 +0100 Subject: [PATCH 008/105] seemingly working object allocator generator --- .../autoprotomap/integration-tests/pom.xml | 9 - .../autoprotomap/it/AutoprotomapResource.java | 32 -- dhfs-parent/objects-alloc/deployment/pom.xml | 5 + .../alloc/deployment/JDataFieldInfo.java | 4 +- .../objects/alloc/deployment/JDataInfo.java | 8 - .../alloc/deployment/JDataInfoBuildItem.java | 16 + .../deployment/ObjectsAllocProcessor.java | 377 ++++++++++++++---- .../objects-alloc/integration-tests/pom.xml | 25 +- .../alloc/it/ObjectsAllocResource.java | 32 -- .../objects/alloc/it/TestJDataAssorted.java | 6 +- .../objects/alloc/it/TestJDataEmpty.java | 2 +- .../objects/alloc/it/ObjectAllocIT.java | 62 +++ .../alloc/it/ObjectsAllocResourceTest.java | 21 - dhfs-parent/objects-alloc/runtime/pom.xml | 2 +- .../alloc/runtime/ChangeTrackerBase.java | 17 - .../alloc/runtime/ObjectAllocator.java | 4 +- dhfs-parent/objects-common/deployment/pom.xml | 51 +++ .../deployment/ObjectsCommonProcessor.java | 14 + .../common/test/ObjectsCommonDevModeTest.java | 23 ++ .../common/test/ObjectsCommonTest.java | 23 ++ .../objects-common/integration-tests/pom.xml | 93 +++++ .../src/main/resources/application.properties | 1 + dhfs-parent/objects-common/pom.xml | 23 +- dhfs-parent/objects-common/runtime/pom.xml | 59 +++ .../objects/common/runtime}/JData.java | 2 +- .../objects/common/runtime}/JObjectKey.java | 2 +- .../resources/META-INF/quarkus-extension.yaml | 9 + dhfs-parent/objects/pom.xml | 7 +- .../dhfs/objects/CurrentTransaction.java | 4 +- .../usatiuk/dhfs/objects/JObjectManager.java | 6 +- .../dhfs/objects/JavaDataSerializer.java} | 7 +- .../dhfs/objects/ObjectSerializer.java | 2 +- .../com/usatiuk/dhfs/objects/TxBundle.java | 2 +- .../FileObjectPersistentStore.java | 2 +- .../MemoryObjectPersistentStore.java | 2 +- .../persistence/ObjectPersistentStore.java | 2 +- .../dhfs/objects/persistence/TxManifest.java | 2 +- .../dhfs/objects/transaction/Transaction.java | 4 +- .../transaction/TransactionFactoryImpl.java | 4 +- .../transaction/TransactionObjectSource.java | 4 +- .../dhfs/objects/transaction/TxRecord.java | 4 +- .../com/usatiuk/dhfs/objects/ObjectsTest.java | 5 +- .../objects/allocator/ChangeTrackerBase.java | 16 - .../dhfs/objects/allocator/KidDataCT.java | 33 -- .../dhfs/objects/allocator/KidDataNormal.java | 23 -- .../dhfs/objects/allocator/ParentDataCT.java | 40 -- .../objects/allocator/ParentDataNormal.java | 29 -- .../dhfs/objects/allocator/TestData.java | 34 -- .../allocator/TestObjectAllocator.java | 40 -- .../com/usatiuk/dhfs/objects/data/Kid.java | 2 +- .../com/usatiuk/dhfs/objects/data/Parent.java | 6 +- 51 files changed, 733 insertions(+), 469 deletions(-) delete mode 100644 dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/AutoprotomapResource.java delete mode 100644 dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataInfo.java create mode 100644 dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataInfoBuildItem.java delete mode 100644 dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/ObjectsAllocResource.java rename dhfs-parent/objects-alloc/integration-tests/src/{test => main}/java/com/usatiuk/objects/alloc/it/TestJDataAssorted.java (59%) rename dhfs-parent/objects-alloc/integration-tests/src/{test => main}/java/com/usatiuk/objects/alloc/it/TestJDataEmpty.java (64%) create mode 100644 dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectAllocIT.java delete mode 100644 dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectsAllocResourceTest.java delete mode 100644 dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ChangeTrackerBase.java create mode 100644 dhfs-parent/objects-common/deployment/pom.xml create mode 100644 dhfs-parent/objects-common/deployment/src/main/java/com/usatiuk/objects/common/deployment/ObjectsCommonProcessor.java create mode 100644 dhfs-parent/objects-common/deployment/src/test/java/com/usatiuk/objects/common/test/ObjectsCommonDevModeTest.java create mode 100644 dhfs-parent/objects-common/deployment/src/test/java/com/usatiuk/objects/common/test/ObjectsCommonTest.java create mode 100644 dhfs-parent/objects-common/integration-tests/pom.xml create mode 100644 dhfs-parent/objects-common/integration-tests/src/main/resources/application.properties create mode 100644 dhfs-parent/objects-common/runtime/pom.xml rename dhfs-parent/objects-common/{src/main/java/com/usatiuk/objects/common => runtime/src/main/java/com/usatiuk/objects/common/runtime}/JData.java (88%) rename dhfs-parent/objects-common/{src/main/java/com/usatiuk/objects/common => runtime/src/main/java/com/usatiuk/objects/common/runtime}/JObjectKey.java (68%) create mode 100644 dhfs-parent/objects-common/runtime/src/main/resources/META-INF/quarkus-extension.yaml rename dhfs-parent/objects/src/{test/java/com/usatiuk/dhfs/objects/serializer/TestJDataSerializer.java => main/java/com/usatiuk/dhfs/objects/JavaDataSerializer.java} (69%) delete mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ChangeTrackerBase.java delete mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataCT.java delete mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataNormal.java delete mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataCT.java delete mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataNormal.java delete mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestData.java delete mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestObjectAllocator.java diff --git a/dhfs-parent/autoprotomap/integration-tests/pom.xml b/dhfs-parent/autoprotomap/integration-tests/pom.xml index 88e789ca..1af18935 100644 --- a/dhfs-parent/autoprotomap/integration-tests/pom.xml +++ b/dhfs-parent/autoprotomap/integration-tests/pom.xml @@ -22,10 +22,6 @@ lombok provided - - io.quarkus - quarkus-resteasy-reactive - com.usatiuk autoprotomap @@ -41,11 +37,6 @@ quarkus-junit5 test - - io.rest-assured - rest-assured - test - io.quarkus quarkus-grpc diff --git a/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/AutoprotomapResource.java b/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/AutoprotomapResource.java deleted file mode 100644 index a56a2f81..00000000 --- a/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/AutoprotomapResource.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.usatiuk.autoprotomap.it; - -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.ws.rs.GET; -import jakarta.ws.rs.Path; - -@Path("/autoprotomap") -@ApplicationScoped -public class AutoprotomapResource { - // add some rest methods here - - @GET - public String hello() { - return "Hello autoprotomap"; - } -} diff --git a/dhfs-parent/objects-alloc/deployment/pom.xml b/dhfs-parent/objects-alloc/deployment/pom.xml index e44655f5..e2107f92 100644 --- a/dhfs-parent/objects-alloc/deployment/pom.xml +++ b/dhfs-parent/objects-alloc/deployment/pom.xml @@ -21,6 +21,11 @@ objects-alloc ${project.version} + + com.usatiuk + objects-common-deployment + ${project.version} + io.quarkus quarkus-junit5-internal diff --git a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataFieldInfo.java b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataFieldInfo.java index bdc8f3dc..198a4cfa 100644 --- a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataFieldInfo.java +++ b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataFieldInfo.java @@ -1,6 +1,6 @@ package com.usatiuk.objects.alloc.deployment; -import org.jboss.jandex.Type; +import org.jboss.jandex.DotName; -public record JDataFieldInfo(String name, Type type) { +public record JDataFieldInfo(String name, DotName type) { } diff --git a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataInfo.java b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataInfo.java deleted file mode 100644 index c46b9de2..00000000 --- a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataInfo.java +++ /dev/null @@ -1,8 +0,0 @@ -package com.usatiuk.objects.alloc.deployment; - -import org.jboss.jandex.ClassInfo; - -import java.util.Map; - -public record JDataInfo(ClassInfo klass, Map fields) { -} diff --git a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataInfoBuildItem.java b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataInfoBuildItem.java new file mode 100644 index 00000000..3b90a98a --- /dev/null +++ b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataInfoBuildItem.java @@ -0,0 +1,16 @@ +package com.usatiuk.objects.alloc.deployment; + +import io.quarkus.builder.item.MultiBuildItem; +import org.jboss.jandex.ClassInfo; + +import java.util.Map; + +public final class JDataInfoBuildItem extends MultiBuildItem { + public final ClassInfo klass; + public final Map fields; + + public JDataInfoBuildItem(ClassInfo klass, Map fields) { + this.klass = klass; + this.fields = fields; + } +} diff --git a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java index baffc03e..f839e363 100644 --- a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java +++ b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java @@ -1,25 +1,28 @@ package com.usatiuk.objects.alloc.deployment; import com.usatiuk.objects.alloc.runtime.ObjectAllocator; -import com.usatiuk.objects.common.JData; -import com.usatiuk.objects.common.JObjectKey; +import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; import io.quarkus.arc.deployment.GeneratedBeanBuildItem; import io.quarkus.arc.deployment.GeneratedBeanGizmoAdaptor; +import io.quarkus.deployment.GeneratedClassGizmoAdaptor; import io.quarkus.deployment.annotations.BuildProducer; import io.quarkus.deployment.annotations.BuildStep; import io.quarkus.deployment.builditem.ApplicationIndexBuildItem; import io.quarkus.deployment.builditem.GeneratedClassBuildItem; import io.quarkus.gizmo.*; import jakarta.inject.Singleton; +import org.apache.commons.lang3.tuple.Pair; import org.jboss.jandex.ClassInfo; +import org.jboss.jandex.DotName; import org.jboss.jandex.MethodInfo; -import org.jboss.jandex.Type; -import java.util.Collections; -import java.util.List; -import java.util.TreeMap; -import java.util.TreeSet; +import java.io.Serializable; +import java.util.*; import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.lang.reflect.Modifier.*; class ObjectsAllocProcessor { @BuildStep @@ -36,81 +39,305 @@ class ObjectsAllocProcessor { } } + private static final String KEY_NAME = "key"; - JDataInfo collectData(JDataIndexBuildItem item) { - var methodNameToInfo = item.jData.methods().stream() - .collect(Collectors.toUnmodifiableMap(MethodInfo::name, x -> x)); - - var reducableSet = new TreeSet<>(methodNameToInfo.keySet()); - - var fields = new TreeMap(); - - // Find pairs of getters and setters - // FIXME: - while (!reducableSet.isEmpty()) { - var name = reducableSet.first(); - reducableSet.remove(name); - if (name.startsWith("get")) { - var setterName = "set" + name.substring(3); - if (reducableSet.contains(setterName)) { - reducableSet.remove(setterName); - } else { - throw new RuntimeException("Missing setter for getter: " + name); - } - - var getter = methodNameToInfo.get(name); - var setter = methodNameToInfo.get(setterName); - - if (!getter.returnType().equals(setter.parameters().getFirst().type())) { - throw new RuntimeException("Getter and setter types do not match: " + name); - } - - var variableName = name.substring(3, 4).toLowerCase() + name.substring(4); - - fields.put(variableName, new JDataFieldInfo(variableName, getter.returnType())); - } else { - throw new RuntimeException("Unknown method name: " + name); - } - } - - return new JDataInfo(item.jData, Collections.unmodifiableMap(fields)); + String propNameToFieldName(String name) { + return name; } - interface TypeFunction { - void apply(Type type); + String propNameToGetterName(String name) { + return "get" + name.substring(0, 1).toUpperCase() + name.substring(1); } - void matchClass(BytecodeCreator bytecodeCreator, ResultHandle value, List types, TypeFunction fn) { -// bytecodeCreator.insta + String propNameToSetterName(String name) { + return "set" + name.substring(0, 1).toUpperCase() + name.substring(1); } - interface ClassTagFunction { - void apply(ClassInfo type, BytecodeCreator branch); + DotName getDataClassName(ClassInfo jData) { + return DotName.createComponentized(jData.name().packagePrefixName(), jData.name().local() + "Data"); } - // Returns false branch - BytecodeCreator matchClassTag(BytecodeCreator bytecodeCreator, ResultHandle toMatch, List types, ClassTagFunction fn) { - if (types.isEmpty()) { - return bytecodeCreator; - } + DotName getCTClassName(ClassInfo jData) { + return DotName.createComponentized(jData.name().packagePrefixName(), jData.name().local() + "CTData"); + } - var eq = bytecodeCreator.invokeVirtualMethod( - MethodDescriptor.ofMethod(Object.class, "equals", boolean.class, Object.class), - toMatch, - bytecodeCreator.loadClass(types.getFirst()) - ); - - var cmp = bytecodeCreator.ifTrue(eq); - fn.apply(types.getFirst(), cmp.trueBranch()); - return matchClassTag(cmp.falseBranch(), toMatch, types.subList(1, types.size()), fn); + DotName getImmutableClassName(ClassInfo jData) { + return DotName.createComponentized(jData.name().packagePrefixName(), jData.name().local() + "ImmutableData"); } @BuildStep - void makeJDataThingy(List jDataItems, - BuildProducer generatedBeans, - BuildProducer generatedClasses) { + void generateDataClass(List jDataItems, BuildProducer generatedClasses) { + var gizmoAdapter = new GeneratedClassGizmoAdaptor(generatedClasses, true); + for (var item : jDataItems) { + try (ClassCreator classCreator = ClassCreator.builder() + .className(getDataClassName(item.klass).toString()) + .interfaces(JData.class) + .interfaces(item.klass.name().toString()) + .interfaces(Serializable.class) + .classOutput(gizmoAdapter) + .build()) { - var data = jDataItems.stream().map(this::collectData).collect(Collectors.toUnmodifiableMap(JDataInfo::klass, x -> x)); + + var fieldsMap = item.fields.values().stream().map(jDataFieldInfo -> { + var fc = classCreator.getFieldCreator(propNameToFieldName(jDataFieldInfo.name()), jDataFieldInfo.type().toString()); + + if (jDataFieldInfo.name().equals(KEY_NAME)) { + fc.setModifiers(PRIVATE | FINAL); + } else { + fc.setModifiers(PRIVATE); + } + + try (var getter = classCreator.getMethodCreator(propNameToGetterName(jDataFieldInfo.name()), jDataFieldInfo.type().toString())) { + getter.returnValue(getter.readInstanceField(fc.getFieldDescriptor(), getter.getThis())); + } + return Pair.of(jDataFieldInfo, fc.getFieldDescriptor()); + }).collect(Collectors.toUnmodifiableMap(Pair::getLeft, Pair::getRight)); + + for (var field : fieldsMap.values()) { + if (field.getName().equals(KEY_NAME)) { + try (var constructor = classCreator.getConstructorCreator(JObjectKey.class)) { + constructor.invokeSpecialMethod(MethodDescriptor.ofConstructor(Object.class), constructor.getThis()); + constructor.writeInstanceField(field, constructor.getThis(), constructor.getMethodParam(0)); + constructor.returnVoid(); + } + } else { + try (var setter = classCreator.getMethodCreator(propNameToSetterName(field.getName()), void.class, field.getType())) { + setter.writeInstanceField(field, setter.getThis(), setter.getMethodParam(0)); + setter.returnVoid(); + } + } + } + } + } + } + + private static final String MODIFIED_FIELD_NAME = "_modified"; + private static final String ON_CHANGE_METHOD_NAME = "onChange"; + + @BuildStep + void generateCTClass(List jDataItems, BuildProducer generatedClasses) { + var gizmoAdapter = new GeneratedClassGizmoAdaptor(generatedClasses, true); + for (var item : jDataItems) { + try (ClassCreator classCreator = ClassCreator.builder() + .className(getCTClassName(item.klass).toString()) + .interfaces(JData.class, ObjectAllocator.ChangeTrackingJData.class) + .interfaces(item.klass.name().toString()) + .interfaces(Serializable.class) + .classOutput(gizmoAdapter) + .build()) { + var modified = classCreator.getFieldCreator(MODIFIED_FIELD_NAME, boolean.class); + modified.setModifiers(PRIVATE | TRANSIENT); + + try (var modifiedGetter = classCreator.getMethodCreator("isModified", boolean.class)) { + modifiedGetter.returnValue(modifiedGetter.readInstanceField(modified.getFieldDescriptor(), modifiedGetter.getThis())); + } + + try (var onChanged = classCreator.getMethodCreator(ON_CHANGE_METHOD_NAME, void.class)) { + onChanged.writeInstanceField(modified.getFieldDescriptor(), onChanged.getThis(), onChanged.load(true)); + onChanged.returnVoid(); + } + + try (var wrapped = classCreator.getMethodCreator("wrapped", item.klass.name().toString())) { + wrapped.returnValue(wrapped.getThis()); + } + + try (var wrapped = classCreator.getMethodCreator("wrapped", JData.class)) { + wrapped.returnValue(wrapped.getThis()); + } + + var fieldsMap = item.fields.values().stream().map(jDataFieldInfo -> { + var fc = classCreator.getFieldCreator(propNameToFieldName(jDataFieldInfo.name()), jDataFieldInfo.type().toString()); + + if (jDataFieldInfo.name().equals(KEY_NAME)) { + fc.setModifiers(PRIVATE | FINAL); + } else { + fc.setModifiers(PRIVATE); + } + + try (var getter = classCreator.getMethodCreator(propNameToGetterName(jDataFieldInfo.name()), jDataFieldInfo.type().toString())) { + getter.returnValue(getter.readInstanceField(fc.getFieldDescriptor(), getter.getThis())); + } + return Pair.of(jDataFieldInfo, fc.getFieldDescriptor()); + }).collect(Collectors.toUnmodifiableMap(Pair::getLeft, Pair::getRight)); + + for (var field : fieldsMap.values()) { + if (!field.getName().equals(KEY_NAME)) { + try (var setter = classCreator.getMethodCreator(propNameToSetterName(field.getName()), void.class, field.getType())) { + setter.writeInstanceField(field, setter.getThis(), setter.getMethodParam(0)); + setter.invokeVirtualMethod(MethodDescriptor.ofMethod(classCreator.getClassName(), ON_CHANGE_METHOD_NAME, void.class), setter.getThis()); + setter.returnVoid(); + } + } + } + + try (var constructor = classCreator.getConstructorCreator(item.klass.name().toString())) { + constructor.invokeSpecialMethod(MethodDescriptor.ofConstructor(Object.class), constructor.getThis()); + constructor.writeInstanceField(modified.getFieldDescriptor(), constructor.getThis(), constructor.load(false)); + for (var field : fieldsMap.values()) { + constructor.writeInstanceField(field, constructor.getThis(), constructor.invokeInterfaceMethod( + MethodDescriptor.ofMethod(item.klass.name().toString(), propNameToGetterName(field.getName()), field.getType()), + constructor.getMethodParam(0) + )); + } + constructor.returnVoid(); + } + } + } + + } + + @BuildStep + void generateImmutableClass(List jDataItems, BuildProducer generatedClasses) { + var gizmoAdapter = new GeneratedClassGizmoAdaptor(generatedClasses, true); + for (var item : jDataItems) { + try (ClassCreator classCreator = ClassCreator.builder() + .className(getImmutableClassName(item.klass).toString()) + .interfaces(JData.class, ObjectAllocator.ChangeTrackingJData.class) + .interfaces(item.klass.name().toString()) + .interfaces(Serializable.class) + .classOutput(gizmoAdapter) + .build()) { + + var fieldsMap = item.fields.values().stream().map(jDataFieldInfo -> { + var fc = classCreator.getFieldCreator(propNameToFieldName(jDataFieldInfo.name()), jDataFieldInfo.type().toString()); + + if (jDataFieldInfo.name().equals(KEY_NAME)) { + fc.setModifiers(PRIVATE | FINAL); + } else { + fc.setModifiers(PRIVATE); + } + + try (var getter = classCreator.getMethodCreator(propNameToGetterName(jDataFieldInfo.name()), jDataFieldInfo.type().toString())) { + getter.returnValue(getter.readInstanceField(fc.getFieldDescriptor(), getter.getThis())); + } + return Pair.of(jDataFieldInfo, fc.getFieldDescriptor()); + }).collect(Collectors.toUnmodifiableMap(Pair::getLeft, Pair::getRight)); + + for (var field : fieldsMap.values()) { + try (var setter = classCreator.getMethodCreator(propNameToSetterName(field.getName()), void.class, field.getType())) { + setter.throwException(UnsupportedOperationException.class, "Immutable object"); + } + } + + try (var constructor = classCreator.getConstructorCreator(item.klass.name().toString())) { + constructor.invokeSpecialMethod(MethodDescriptor.ofConstructor(Object.class), constructor.getThis()); + for (var field : fieldsMap.values()) { + constructor.writeInstanceField(field, constructor.getThis(), constructor.invokeInterfaceMethod( + MethodDescriptor.ofMethod(item.klass.name().toString(), propNameToGetterName(field.getName()), field.getType()), + constructor.getMethodParam(0) + )); + } + constructor.returnVoid(); + } + } + } + + } + + List collectInterfaces(ClassInfo type, ApplicationIndexBuildItem jandex) { + return Stream.concat(Stream.of(type), type.interfaceNames().stream() + .flatMap(x -> { + var ret = jandex.getIndex().getClassByName(x); + if (ret == null) { + System.out.println("Interface not found! " + x); + return Stream.empty(); + } + return Stream.of(ret); + }) + .flatMap(i -> collectInterfaces(i, jandex).stream())) + .collect(Collectors.toList()); + } + + Map collectMethods(List types) { + return types.stream() + .flatMap(x -> x.methods().stream()) + .collect(Collectors.toMap(MethodInfo::name, x -> x)); + } + + @BuildStep + void collectData(BuildProducer producer, List items, ApplicationIndexBuildItem jandex) { + for (var item : items) { + var methodNameToInfo = collectMethods(collectInterfaces(item.jData, jandex)); + + var reducableSet = new TreeSet<>(methodNameToInfo.keySet()); + + var fields = new TreeMap(); + if (reducableSet.contains(propNameToGetterName(KEY_NAME))) { + reducableSet.remove(propNameToGetterName(KEY_NAME)); + var methodInfo = methodNameToInfo.get(propNameToGetterName(KEY_NAME)); + if (!methodInfo.returnType().name().equals(DotName.createSimple(JObjectKey.class.getName()))) { + throw new RuntimeException("Key getter must return JObjectKey"); + } + fields.put(KEY_NAME, new JDataFieldInfo(KEY_NAME, methodNameToInfo.get(propNameToGetterName(KEY_NAME)).returnType().name())); + } else { +// throw new RuntimeException("Missing key getter"); + System.out.println("Missing key getter for " + item.jData); + // FIXME!: No matter what, I couldn't get JData to get indexed by jandex + fields.put(KEY_NAME, new JDataFieldInfo(KEY_NAME, DotName.createSimple(JObjectKey.class))); + } + + // Find pairs of getters and setters + // FIXME: + while (!reducableSet.isEmpty()) { + var name = reducableSet.first(); + reducableSet.remove(name); + if (name.startsWith("get")) { + var setterName = "set" + name.substring(3); + if (reducableSet.contains(setterName)) { + reducableSet.remove(setterName); + } else { + throw new RuntimeException("Missing setter for getter: " + name); + } + + var getter = methodNameToInfo.get(name); + var setter = methodNameToInfo.get(setterName); + + if (!getter.returnType().equals(setter.parameters().getFirst().type())) { + throw new RuntimeException("Getter and setter types do not match: " + name); + } + + var variableName = name.substring(3, 4).toLowerCase() + name.substring(4); + + fields.put(variableName, new JDataFieldInfo(variableName, getter.returnType().name())); + } else { + throw new RuntimeException("Unknown method name: " + name); + } + } + producer.produce(new JDataInfoBuildItem(item.jData, Collections.unmodifiableMap(fields))); + } + } + + // Returns false branch + void matchClass(BytecodeCreator bytecodeCreator, ResultHandle toMatch, List types, ClassTagFunction fn) { + for (var type : types) { + var eq = bytecodeCreator.instanceOf(toMatch, type.name().toString()); + var cmp = bytecodeCreator.ifTrue(eq); + fn.apply(type, cmp.trueBranch(), toMatch); + } + } + + interface ClassTagFunction { + void apply(ClassInfo type, BytecodeCreator branch, ResultHandle value); + } + + // Returns false branch + void matchClassTag(BytecodeCreator bytecodeCreator, ResultHandle toMatch, List types, ClassTagFunction fn) { + for (var type : types) { + var eq = bytecodeCreator.invokeVirtualMethod( + MethodDescriptor.ofMethod(Object.class, "equals", boolean.class, Object.class), + toMatch, + bytecodeCreator.loadClass(type.name().toString()) + ); + + var cmp = bytecodeCreator.ifTrue(eq); + fn.apply(type, cmp.trueBranch(), toMatch); + } + } + + @BuildStep + void makeJDataThingy(List jDataItems, + BuildProducer generatedBeans) { + var data = jDataItems.stream().collect(Collectors.toUnmodifiableMap(i -> i.klass, x -> x)); var classes = data.keySet().stream().map(ClassInfo::asClass).toList(); var gizmoAdapter = new GeneratedBeanGizmoAdaptor(generatedBeans); @@ -124,18 +351,24 @@ class ObjectsAllocProcessor { classCreator.addAnnotation(Singleton.class); try (MethodCreator methodCreator = classCreator.getMethodCreator("create", JData.class, Class.class, JObjectKey.class)) { - matchClassTag(methodCreator, methodCreator.getMethodParam(0), classes, (type, branch) -> { - branch.returnValue(branch.newInstance(MethodDescriptor.ofConstructor(type.toString(), JObjectKey.class), branch.getMethodParam(1))); + matchClassTag(methodCreator, methodCreator.getMethodParam(0), classes, (type, branch, value) -> { + branch.returnValue(branch.newInstance(MethodDescriptor.ofConstructor(getDataClassName(type).toString(), JObjectKey.class), branch.getMethodParam(1))); }); methodCreator.throwException(IllegalArgumentException.class, "Unknown type"); } try (MethodCreator methodCreator = classCreator.getMethodCreator("copy", ObjectAllocator.ChangeTrackingJData.class, JData.class)) { - methodCreator.returnValue(methodCreator.loadNull()); + matchClass(methodCreator, methodCreator.getMethodParam(0), classes, (type, branch, value) -> { + branch.returnValue(branch.newInstance(MethodDescriptor.ofConstructor(getCTClassName(type).toString(), type.name().toString()), value)); + }); + methodCreator.throwException(IllegalArgumentException.class, "Unknown type"); } try (MethodCreator methodCreator = classCreator.getMethodCreator("unmodifiable", JData.class, JData.class)) { - methodCreator.returnValue(methodCreator.loadNull()); + matchClass(methodCreator, methodCreator.getMethodParam(0), classes, (type, branch, value) -> { + branch.returnValue(branch.newInstance(MethodDescriptor.ofConstructor(getImmutableClassName(type).toString(), type.name().toString()), value)); + }); + methodCreator.throwException(IllegalArgumentException.class, "Unknown type"); } } diff --git a/dhfs-parent/objects-alloc/integration-tests/pom.xml b/dhfs-parent/objects-alloc/integration-tests/pom.xml index 04f38047..3bf24678 100644 --- a/dhfs-parent/objects-alloc/integration-tests/pom.xml +++ b/dhfs-parent/objects-alloc/integration-tests/pom.xml @@ -1,5 +1,6 @@ - + 4.0.0 @@ -15,10 +16,6 @@ - - io.quarkus - quarkus-rest - com.usatiuk objects-alloc @@ -30,13 +27,18 @@ ${project.version} - io.quarkus - quarkus-junit5 - test + com.usatiuk + objects-common + ${project.version} - io.rest-assured - rest-assured + com.usatiuk + objects-common-deployment + ${project.version} + + + io.quarkus + quarkus-junit5 test @@ -68,7 +70,8 @@ - ${project.build.directory}/${project.build.finalName}-runner + ${project.build.directory}/${project.build.finalName}-runner + org.jboss.logmanager.LogManager ${maven.home} diff --git a/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/ObjectsAllocResource.java b/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/ObjectsAllocResource.java deleted file mode 100644 index 9a3c5942..00000000 --- a/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/ObjectsAllocResource.java +++ /dev/null @@ -1,32 +0,0 @@ -/* -* Licensed to the Apache Software Foundation (ASF) under one or more -* contributor license agreements. See the NOTICE file distributed with -* this work for additional information regarding copyright ownership. -* The ASF licenses this file to You under the Apache License, Version 2.0 -* (the "License"); you may not use this file except in compliance with -* the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ -package com.usatiuk.objects.alloc.it; - -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.ws.rs.GET; -import jakarta.ws.rs.Path; - -@Path("/objects-alloc") -@ApplicationScoped -public class ObjectsAllocResource { - // add some rest methods here - - @GET - public String hello() { - return "Hello objects-alloc"; - } -} diff --git a/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/TestJDataAssorted.java b/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/TestJDataAssorted.java similarity index 59% rename from dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/TestJDataAssorted.java rename to dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/TestJDataAssorted.java index cef5e96e..429f5370 100644 --- a/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/TestJDataAssorted.java +++ b/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/TestJDataAssorted.java @@ -1,9 +1,9 @@ package com.usatiuk.objects.alloc.it; -import com.usatiuk.objects.common.JData; -import com.usatiuk.objects.common.JObjectKey; +import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; -interface TestJDataAssorted extends JData { +public interface TestJDataAssorted extends JData { String getLastName(); void setLastName(String lastName); diff --git a/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/TestJDataEmpty.java b/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/TestJDataEmpty.java similarity index 64% rename from dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/TestJDataEmpty.java rename to dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/TestJDataEmpty.java index 62de3b9c..cfac67f3 100644 --- a/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/TestJDataEmpty.java +++ b/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/TestJDataEmpty.java @@ -1,6 +1,6 @@ package com.usatiuk.objects.alloc.it; -import com.usatiuk.objects.common.JData; +import com.usatiuk.objects.common.runtime.JData; public interface TestJDataEmpty extends JData { } diff --git a/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectAllocIT.java b/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectAllocIT.java new file mode 100644 index 00000000..51ea070e --- /dev/null +++ b/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectAllocIT.java @@ -0,0 +1,62 @@ +package com.usatiuk.objects.alloc.it; + +import com.usatiuk.objects.alloc.runtime.ObjectAllocator; +import com.usatiuk.objects.common.runtime.JObjectKey; +import io.quarkus.test.junit.QuarkusTest; +import jakarta.inject.Inject; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +@QuarkusTest +public class ObjectAllocIT { + @Inject + ObjectAllocator objectAllocator; + + @Test + void testCreateObject() { + var newObject = objectAllocator.create(TestJDataEmpty.class, new JObjectKey("TestJDataEmptyKey")); + Assertions.assertNotNull(newObject); + Assertions.assertEquals("TestJDataEmptyKey", newObject.getKey().name()); + } + + + @Test + void testCopyObject() { + var newObject = objectAllocator.create(TestJDataAssorted.class, new JObjectKey("TestJDataAssorted")); + newObject.setLastName("1"); + Assertions.assertNotNull(newObject); + Assertions.assertEquals("TestJDataAssorted", newObject.getKey().name()); + + var copyObject = objectAllocator.copy(newObject); + Assertions.assertNotNull(copyObject); + Assertions.assertFalse(copyObject.isModified()); + Assertions.assertEquals("1", copyObject.wrapped().getLastName()); + copyObject.wrapped().setLastName("2"); + Assertions.assertTrue(copyObject.isModified()); + Assertions.assertEquals("2", copyObject.wrapped().getLastName()); + Assertions.assertEquals("1", newObject.getLastName()); + } + + @Test + void testImmutable() { + var newObject = objectAllocator.create(TestJDataAssorted.class, new JObjectKey("TestJDataAssorted")); + newObject.setLastName("1"); + Assertions.assertNotNull(newObject); + Assertions.assertEquals("TestJDataAssorted", newObject.getKey().name()); + + var copyObject = objectAllocator.unmodifiable(newObject); + Assertions.assertNotNull(copyObject); + Assertions.assertEquals("1", copyObject.getLastName()); + Assertions.assertThrows(UnsupportedOperationException.class, () -> copyObject.setLastName("2")); + } + + @Test + void testImmutable2() { + var newObject = objectAllocator.create(TestJDataEmpty.class, new JObjectKey("TestJDataEmpty")); + Assertions.assertNotNull(newObject); + Assertions.assertEquals("TestJDataEmpty", newObject.getKey().name()); + + var copyObject = objectAllocator.unmodifiable(newObject); + Assertions.assertNotNull(copyObject); + } +} diff --git a/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectsAllocResourceTest.java b/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectsAllocResourceTest.java deleted file mode 100644 index dbcec91c..00000000 --- a/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectsAllocResourceTest.java +++ /dev/null @@ -1,21 +0,0 @@ -package com.usatiuk.objects.alloc.it; - -import com.usatiuk.objects.alloc.runtime.ObjectAllocator; -import com.usatiuk.objects.common.JObjectKey; -import io.quarkus.test.junit.QuarkusTest; -import jakarta.inject.Inject; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -@QuarkusTest -public class ObjectsAllocResourceTest { - @Inject - ObjectAllocator objectAllocator; - - @Test - void testCreateObject() { - var newObject = objectAllocator.create(TestJDataEmpty.class, new JObjectKey("TestJDataEmptyKey")); - Assertions.assertNotNull(newObject); - Assertions.assertEquals("TestJDataEmptyKey", newObject.getKey().name()); - } -} diff --git a/dhfs-parent/objects-alloc/runtime/pom.xml b/dhfs-parent/objects-alloc/runtime/pom.xml index c7fc160b..64dce149 100644 --- a/dhfs-parent/objects-alloc/runtime/pom.xml +++ b/dhfs-parent/objects-alloc/runtime/pom.xml @@ -17,7 +17,7 @@ quarkus-arc - com.usatiuk.dhfs + com.usatiuk objects-common 1.0-SNAPSHOT diff --git a/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ChangeTrackerBase.java b/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ChangeTrackerBase.java deleted file mode 100644 index 9b2c2c76..00000000 --- a/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ChangeTrackerBase.java +++ /dev/null @@ -1,17 +0,0 @@ -package com.usatiuk.objects.alloc.runtime; - -import com.usatiuk.objects.common.JData; - -import java.io.Serializable; - -abstract class ChangeTrackerBase implements ObjectAllocator.ChangeTrackingJData, Serializable { - private transient boolean _modified = false; - - public boolean isModified() { - return _modified; - } - - protected void onChange() { - _modified = true; - } -} diff --git a/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ObjectAllocator.java b/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ObjectAllocator.java index f328d708..b4a3361e 100644 --- a/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ObjectAllocator.java +++ b/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ObjectAllocator.java @@ -1,7 +1,7 @@ package com.usatiuk.objects.alloc.runtime; -import com.usatiuk.objects.common.JData; -import com.usatiuk.objects.common.JObjectKey; +import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; public interface ObjectAllocator { T create(Class type, JObjectKey key); diff --git a/dhfs-parent/objects-common/deployment/pom.xml b/dhfs-parent/objects-common/deployment/pom.xml new file mode 100644 index 00000000..36504616 --- /dev/null +++ b/dhfs-parent/objects-common/deployment/pom.xml @@ -0,0 +1,51 @@ + + + 4.0.0 + + + com.usatiuk + objects-common-parent + 1.0-SNAPSHOT + + objects-common-deployment + DHFS objects common stuff - Deployment + + + + io.quarkus + quarkus-arc-deployment + + + com.usatiuk + objects-common + ${project.version} + + + io.quarkus + quarkus-junit5-internal + test + + + + + + + maven-compiler-plugin + + + default-compile + + + + io.quarkus + quarkus-extension-processor + ${quarkus.platform.version} + + + + + + + + + diff --git a/dhfs-parent/objects-common/deployment/src/main/java/com/usatiuk/objects/common/deployment/ObjectsCommonProcessor.java b/dhfs-parent/objects-common/deployment/src/main/java/com/usatiuk/objects/common/deployment/ObjectsCommonProcessor.java new file mode 100644 index 00000000..e4316e72 --- /dev/null +++ b/dhfs-parent/objects-common/deployment/src/main/java/com/usatiuk/objects/common/deployment/ObjectsCommonProcessor.java @@ -0,0 +1,14 @@ +package com.usatiuk.objects.common.deployment; + +import io.quarkus.deployment.annotations.BuildStep; +import io.quarkus.deployment.builditem.FeatureBuildItem; + +class ObjectsCommonProcessor { + + private static final String FEATURE = "objects-common"; + + @BuildStep + FeatureBuildItem feature() { + return new FeatureBuildItem(FEATURE); + } +} diff --git a/dhfs-parent/objects-common/deployment/src/test/java/com/usatiuk/objects/common/test/ObjectsCommonDevModeTest.java b/dhfs-parent/objects-common/deployment/src/test/java/com/usatiuk/objects/common/test/ObjectsCommonDevModeTest.java new file mode 100644 index 00000000..78fbe4f7 --- /dev/null +++ b/dhfs-parent/objects-common/deployment/src/test/java/com/usatiuk/objects/common/test/ObjectsCommonDevModeTest.java @@ -0,0 +1,23 @@ +package com.usatiuk.objects.common.test; + +import org.jboss.shrinkwrap.api.ShrinkWrap; +import org.jboss.shrinkwrap.api.spec.JavaArchive; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; + +import io.quarkus.test.QuarkusDevModeTest; + +public class ObjectsCommonDevModeTest { + + // Start hot reload (DevMode) test with your extension loaded + @RegisterExtension + static final QuarkusDevModeTest devModeTest = new QuarkusDevModeTest() + .setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class)); + + @Test + public void writeYourOwnDevModeTest() { + // Write your dev mode tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-hot-reload for more information + Assertions.assertTrue(true, "Add dev mode assertions to " + getClass().getName()); + } +} diff --git a/dhfs-parent/objects-common/deployment/src/test/java/com/usatiuk/objects/common/test/ObjectsCommonTest.java b/dhfs-parent/objects-common/deployment/src/test/java/com/usatiuk/objects/common/test/ObjectsCommonTest.java new file mode 100644 index 00000000..c74a2c67 --- /dev/null +++ b/dhfs-parent/objects-common/deployment/src/test/java/com/usatiuk/objects/common/test/ObjectsCommonTest.java @@ -0,0 +1,23 @@ +package com.usatiuk.objects.common.test; + +import org.jboss.shrinkwrap.api.ShrinkWrap; +import org.jboss.shrinkwrap.api.spec.JavaArchive; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; + +import io.quarkus.test.QuarkusUnitTest; + +public class ObjectsCommonTest { + + // Start unit test with your extension loaded + @RegisterExtension + static final QuarkusUnitTest unitTest = new QuarkusUnitTest() + .setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class)); + + @Test + public void writeYourOwnUnitTest() { + // Write your unit tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-extensions for more information + Assertions.assertTrue(true, "Add some assertions to " + getClass().getName()); + } +} diff --git a/dhfs-parent/objects-common/integration-tests/pom.xml b/dhfs-parent/objects-common/integration-tests/pom.xml new file mode 100644 index 00000000..80313213 --- /dev/null +++ b/dhfs-parent/objects-common/integration-tests/pom.xml @@ -0,0 +1,93 @@ + + + 4.0.0 + + + com.usatiuk + objects-common-parent + 1.0-SNAPSHOT + + objects-common-integration-tests + DHFS objects common stuff - Integration Tests + + + true + + + + + com.usatiuk + objects-common + ${project.version} + + + com.usatiuk + objects-common-deployment + ${project.version} + + + io.quarkus + quarkus-junit5 + test + + + + + + + io.quarkus + quarkus-maven-plugin + + + + build + + + + + + maven-failsafe-plugin + + + + integration-test + verify + + + + + + ${project.build.directory}/${project.build.finalName}-runner + org.jboss.logmanager.LogManager + ${maven.home} + + + + + + + + + native-image + + + native + + + + + + maven-surefire-plugin + + ${native.surefire.skip} + + + + + + false + true + + + + diff --git a/dhfs-parent/objects-common/integration-tests/src/main/resources/application.properties b/dhfs-parent/objects-common/integration-tests/src/main/resources/application.properties new file mode 100644 index 00000000..b1645fe9 --- /dev/null +++ b/dhfs-parent/objects-common/integration-tests/src/main/resources/application.properties @@ -0,0 +1 @@ +quarkus.package.jar.decompiler.enabled=true \ No newline at end of file diff --git a/dhfs-parent/objects-common/pom.xml b/dhfs-parent/objects-common/pom.xml index d13878fc..2d462191 100644 --- a/dhfs-parent/objects-common/pom.xml +++ b/dhfs-parent/objects-common/pom.xml @@ -1,20 +1,23 @@ - + 4.0.0 + com.usatiuk.dhfs parent 1.0-SNAPSHOT - objects-common + com.usatiuk + objects-common-parent + 1.0-SNAPSHOT + pom + DHFS objects common stuff - Parent - - 21 - 21 - UTF-8 - + + deployment + runtime + integration-tests + - \ No newline at end of file + diff --git a/dhfs-parent/objects-common/runtime/pom.xml b/dhfs-parent/objects-common/runtime/pom.xml new file mode 100644 index 00000000..d02d31aa --- /dev/null +++ b/dhfs-parent/objects-common/runtime/pom.xml @@ -0,0 +1,59 @@ + + + 4.0.0 + + + com.usatiuk + objects-common-parent + 1.0-SNAPSHOT + + objects-common + DHFS objects common stuff - Runtime + + + + io.quarkus + quarkus-arc + + + + + + + io.quarkus + quarkus-extension-maven-plugin + ${quarkus.platform.version} + + + compile + + extension-descriptor + + + ${project.groupId}:${project.artifactId}-deployment:${project.version} + + + + + + + maven-compiler-plugin + + + default-compile + + + + io.quarkus + quarkus-extension-processor + ${quarkus.platform.version} + + + + + + + + + diff --git a/dhfs-parent/objects-common/src/main/java/com/usatiuk/objects/common/JData.java b/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JData.java similarity index 88% rename from dhfs-parent/objects-common/src/main/java/com/usatiuk/objects/common/JData.java rename to dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JData.java index 5109336b..5d97c2dd 100644 --- a/dhfs-parent/objects-common/src/main/java/com/usatiuk/objects/common/JData.java +++ b/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JData.java @@ -1,4 +1,4 @@ -package com.usatiuk.objects.common; +package com.usatiuk.objects.common.runtime; // TODO: This could be maybe moved to a separate module? // The base class for JObject data diff --git a/dhfs-parent/objects-common/src/main/java/com/usatiuk/objects/common/JObjectKey.java b/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JObjectKey.java similarity index 68% rename from dhfs-parent/objects-common/src/main/java/com/usatiuk/objects/common/JObjectKey.java rename to dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JObjectKey.java index 74ca3fb2..bce27fdc 100644 --- a/dhfs-parent/objects-common/src/main/java/com/usatiuk/objects/common/JObjectKey.java +++ b/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JObjectKey.java @@ -1,4 +1,4 @@ -package com.usatiuk.objects.common; +package com.usatiuk.objects.common.runtime; import java.io.Serializable; diff --git a/dhfs-parent/objects-common/runtime/src/main/resources/META-INF/quarkus-extension.yaml b/dhfs-parent/objects-common/runtime/src/main/resources/META-INF/quarkus-extension.yaml new file mode 100644 index 00000000..b05649d2 --- /dev/null +++ b/dhfs-parent/objects-common/runtime/src/main/resources/META-INF/quarkus-extension.yaml @@ -0,0 +1,9 @@ +name: DHFS objects common stuff +#description: Do something useful. +metadata: +# keywords: +# - objects-common +# guide: ... # To create and publish this guide, see https://github.com/quarkiverse/quarkiverse/wiki#documenting-your-extension +# categories: +# - "miscellaneous" +# status: "preview" diff --git a/dhfs-parent/objects/pom.xml b/dhfs-parent/objects/pom.xml index 0e8ad359..96e660a5 100644 --- a/dhfs-parent/objects/pom.xml +++ b/dhfs-parent/objects/pom.xml @@ -76,10 +76,15 @@ provided - com.usatiuk.dhfs + com.usatiuk objects-common 1.0-SNAPSHOT + + com.usatiuk + objects-common-deployment + 1.0-SNAPSHOT + diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java index 3efba2c7..bd8dab04 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java @@ -2,8 +2,8 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.transaction.LockingStrategy; import com.usatiuk.dhfs.objects.transaction.Transaction; -import com.usatiuk.objects.common.JData; -import com.usatiuk.objects.common.JObjectKey; +import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 16b2a0e8..ec9eece8 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -8,8 +8,8 @@ import com.usatiuk.dhfs.objects.transaction.TransactionPrivate; import com.usatiuk.dhfs.objects.transaction.TxRecord; import com.usatiuk.dhfs.utils.DataLocker; import com.usatiuk.dhfs.utils.VoidFn; -import com.usatiuk.objects.common.JData; -import com.usatiuk.objects.common.JObjectKey; +import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; @@ -24,7 +24,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; -// Manages all access to com.usatiuk.objects.common.JData objects. +// Manages all access to com.usatiuk.objects.common.runtime.JData objects. // In particular, it serves as a source of truth for what is committed to the backing storage. // All data goes through it, it is responsible for transaction atomicity @ApplicationScoped diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/serializer/TestJDataSerializer.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JavaDataSerializer.java similarity index 69% rename from dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/serializer/TestJDataSerializer.java rename to dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JavaDataSerializer.java index 56cbe6f5..d7ab1597 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/serializer/TestJDataSerializer.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JavaDataSerializer.java @@ -1,16 +1,15 @@ -package com.usatiuk.dhfs.objects.serializer; +package com.usatiuk.dhfs.objects; import com.google.protobuf.ByteString; -import com.usatiuk.dhfs.objects.ObjectSerializer; import com.usatiuk.dhfs.utils.SerializationHelper; -import com.usatiuk.objects.common.JData; +import com.usatiuk.objects.common.runtime.JData; import jakarta.enterprise.context.ApplicationScoped; import java.io.Serializable; @ApplicationScoped -public class TestJDataSerializer implements ObjectSerializer { +public class JavaDataSerializer implements ObjectSerializer { @Override public ByteString serialize(JData obj) { return SerializationHelper.serialize((Serializable) obj); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectSerializer.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectSerializer.java index 34973a62..e5922c67 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectSerializer.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectSerializer.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects; import com.google.protobuf.ByteString; -import com.usatiuk.objects.common.JData; +import com.usatiuk.objects.common.runtime.JData; public interface ObjectSerializer { ByteString serialize(T obj); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java index 1ef7419b..1cd63774 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.objects; -import com.usatiuk.objects.common.JData; +import com.usatiuk.objects.common.runtime.JData; public interface TxBundle { long getId(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java index c84bdd31..4c37c14f 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java @@ -2,7 +2,7 @@ package com.usatiuk.dhfs.objects.persistence; import com.google.protobuf.ByteString; import com.google.protobuf.UnsafeByteOperations; -import com.usatiuk.objects.common.JObjectKey; +import com.usatiuk.objects.common.runtime.JObjectKey; import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer; import com.usatiuk.dhfs.utils.ByteUtils; import com.usatiuk.dhfs.utils.SerializationHelper; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java index 42fe08ec..4d07aae2 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.persistence; import com.google.protobuf.ByteString; -import com.usatiuk.objects.common.JObjectKey; +import com.usatiuk.objects.common.runtime.JObjectKey; import io.quarkus.arc.properties.IfBuildProperty; import jakarta.enterprise.context.ApplicationScoped; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java index 01b61d2e..afcab147 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.persistence; import com.google.protobuf.ByteString; -import com.usatiuk.objects.common.JObjectKey; +import com.usatiuk.objects.common.runtime.JObjectKey; import javax.annotation.Nonnull; import java.util.Collection; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java index 34190132..3244b4cd 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.objects.persistence; -import com.usatiuk.objects.common.JObjectKey; +import com.usatiuk.objects.common.runtime.JObjectKey; import java.io.Serializable; import java.util.List; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java index fbcd3c0a..72794c7b 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.transaction; -import com.usatiuk.objects.common.JData; -import com.usatiuk.objects.common.JObjectKey; +import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; import java.util.Optional; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index 563e84b2..7ab3a41c 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.transaction; -import com.usatiuk.objects.common.JData; -import com.usatiuk.objects.common.JObjectKey; +import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java index 5db79bb3..7a4dcd22 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.transaction; -import com.usatiuk.objects.common.JData; -import com.usatiuk.objects.common.JObjectKey; +import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; import java.util.Optional; import java.util.concurrent.locks.ReadWriteLock; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java index 900d67e9..abf2e39f 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.transaction; -import com.usatiuk.objects.common.JData; -import com.usatiuk.objects.common.JObjectKey; +import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; import com.usatiuk.objects.alloc.runtime.ObjectAllocator; public class TxRecord { diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index ec7bf92b..69278976 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -2,8 +2,9 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.data.Parent; import com.usatiuk.dhfs.objects.transaction.LockingStrategy; -import com.usatiuk.objects.common.JObjectKey; +import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.objects.alloc.runtime.ObjectAllocator; +import com.usatiuk.objects.common.runtime.JObjectKey; import io.quarkus.logging.Log; import io.quarkus.test.junit.QuarkusTest; import jakarta.inject.Inject; @@ -20,7 +21,7 @@ public class ObjectsTest { TransactionManager txm; @Inject - CurrentTransaction curTx; + Transaction curTx; @Inject ObjectAllocator alloc; diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ChangeTrackerBase.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ChangeTrackerBase.java deleted file mode 100644 index 3a46d1d7..00000000 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ChangeTrackerBase.java +++ /dev/null @@ -1,16 +0,0 @@ -package com.usatiuk.dhfs.objects.allocator; - -import com.usatiuk.objects.alloc.runtime.ObjectAllocator; -import com.usatiuk.objects.common.JData; -import lombok.Getter; - -import java.io.Serializable; - -public abstract class ChangeTrackerBase implements ObjectAllocator.ChangeTrackingJData, Serializable { - @Getter - private transient boolean _modified = false; - - protected void onChange() { - _modified = true; - } -} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataCT.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataCT.java deleted file mode 100644 index 9c962e30..00000000 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataCT.java +++ /dev/null @@ -1,33 +0,0 @@ -package com.usatiuk.dhfs.objects.allocator; - -import com.usatiuk.objects.common.JObjectKey; -import com.usatiuk.dhfs.objects.data.Kid; -import lombok.Getter; - -public class KidDataCT extends ChangeTrackerBase implements Kid { - private final JObjectKey _key; - - @Getter - private String _name; - - @Override - public void setName(String name) { - _name = name; - onChange(); - } - - public KidDataCT(Kid normal) { - _key = normal.getKey(); - _name = normal.getName(); - } - - @Override - public JObjectKey getKey() { - return _key; - } - - @Override - public Kid wrapped() { - return this; - } -} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataNormal.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataNormal.java deleted file mode 100644 index 145e41de..00000000 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/KidDataNormal.java +++ /dev/null @@ -1,23 +0,0 @@ -package com.usatiuk.dhfs.objects.allocator; - -import com.usatiuk.objects.common.JObjectKey; -import com.usatiuk.dhfs.objects.data.Kid; -import lombok.Getter; -import lombok.Setter; - -import java.io.Serializable; - -public class KidDataNormal implements Kid, Serializable { - private final JObjectKey _key; - - @Getter - @Setter - private String _name; - - public KidDataNormal(JObjectKey key) {_key = key;} - - @Override - public JObjectKey getKey() { - return _key; - } -} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataCT.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataCT.java deleted file mode 100644 index a0920494..00000000 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataCT.java +++ /dev/null @@ -1,40 +0,0 @@ -package com.usatiuk.dhfs.objects.allocator; - -import com.usatiuk.objects.common.JObjectKey; -import com.usatiuk.dhfs.objects.data.Parent; -import lombok.Getter; - -public class ParentDataCT extends ChangeTrackerBase implements Parent { - @Getter - private JObjectKey _name; - @Getter - private JObjectKey _kidKey; - @Getter - private String _lastName; - - public void setKidKey(JObjectKey key) { - _kidKey = key; - onChange(); - } - - public void setLastName(String lastName) { - _lastName = lastName; - onChange(); - } - - public ParentDataCT(Parent normal) { - _name = normal.getKey(); - _kidKey = normal.getKidKey(); - _lastName = normal.getLastName(); - } - - @Override - public JObjectKey getKey() { - return _name; - } - - @Override - public Parent wrapped() { - return this; - } -} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataNormal.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataNormal.java deleted file mode 100644 index a40256ac..00000000 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/ParentDataNormal.java +++ /dev/null @@ -1,29 +0,0 @@ -package com.usatiuk.dhfs.objects.allocator; - -import com.usatiuk.dhfs.objects.data.Parent; -import com.usatiuk.objects.common.JObjectKey; -import lombok.Getter; -import lombok.Setter; - -import java.io.Serializable; - -public class ParentDataNormal implements Parent, Serializable { - @Getter - private JObjectKey _name; - @Getter - @Setter - private JObjectKey _kidKey; - @Getter - @Setter - private String _lastName; - - public ParentDataNormal(JObjectKey name) { - _name = name; - } - - @Override - public JObjectKey getKey() { - return _name; - } - -} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestData.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestData.java deleted file mode 100644 index a35da6ae..00000000 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestData.java +++ /dev/null @@ -1,34 +0,0 @@ -package com.usatiuk.dhfs.objects.allocator; - -import com.usatiuk.objects.common.JData; -import com.usatiuk.objects.common.JObjectKey; - -public abstract class TestData implements JData { - private boolean _changed = false; - private final long _version; - private final JObjectKey _key; - - protected TestData(long version, JObjectKey key) { - _version = version; - _key = key; - } - - void onChanged() { - _changed = true; - } - - public boolean isChanged() { - return _changed; - } - - public long getVersion() { - return _version; - } - - @Override - public JObjectKey getKey() { - return _key; - } - - public abstract TestData copy(); -} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestObjectAllocator.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestObjectAllocator.java deleted file mode 100644 index 99713b5c..00000000 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/allocator/TestObjectAllocator.java +++ /dev/null @@ -1,40 +0,0 @@ -package com.usatiuk.dhfs.objects.allocator; - -import com.usatiuk.objects.common.JData; -import com.usatiuk.objects.alloc.runtime.ObjectAllocator; -import com.usatiuk.dhfs.objects.data.Kid; -import com.usatiuk.dhfs.objects.data.Parent; -import com.usatiuk.objects.common.JObjectKey; -import jakarta.enterprise.context.ApplicationScoped; - -@ApplicationScoped -public class TestObjectAllocator implements ObjectAllocator { - @Override - public T create(Class type, JObjectKey key) { - if (type == Kid.class) { - return type.cast(new KidDataNormal(key)); - } else if (type == Parent.class) { - return type.cast(new ParentDataNormal(key)); - } else { - throw new IllegalArgumentException("Unknown type: " + type); - } - } - - @Override - public ChangeTrackingJData copy(T obj) { -// if (obj instanceof ChangeTrackerBase) { -// throw new IllegalArgumentException("Cannot copy a ChangeTrackerBase object"); -// } - - return switch (obj) { - case Kid kid -> (ChangeTrackingJData) new KidDataCT(kid); - case Parent parent -> (ChangeTrackingJData) new ParentDataCT(parent); - default -> throw new IllegalStateException("Unexpected value: " + obj); - }; - } - - @Override - public T unmodifiable(T obj) { - return obj; // TODO: - } -} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java index acb9a1cc..a3665e4a 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.objects.data; -import com.usatiuk.objects.common.JData; +import com.usatiuk.objects.common.runtime.JData; public interface Kid extends JData { String getName(); diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java index cc094077..424fdf6f 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java @@ -1,11 +1,9 @@ package com.usatiuk.dhfs.objects.data; -import com.usatiuk.objects.common.JData; -import com.usatiuk.objects.common.JObjectKey; +import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; public interface Parent extends JData { - JObjectKey getName(); - String getLastName(); void setLastName(String lastName); From 00a50152080d4187648957b4053e422969ab0833 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Wed, 4 Dec 2024 00:04:45 +0100 Subject: [PATCH 009/105] a bit nicer tx object locking --- .../usatiuk/dhfs/objects/JObjectManager.java | 40 ++++++++++++++++++- .../transaction/TransactionFactoryImpl.java | 38 ++++++------------ .../transaction/TransactionObjectSource.java | 4 ++ 3 files changed, 55 insertions(+), 27 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index ec9eece8..440f606e 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -8,9 +8,9 @@ import com.usatiuk.dhfs.objects.transaction.TransactionPrivate; import com.usatiuk.dhfs.objects.transaction.TxRecord; import com.usatiuk.dhfs.utils.DataLocker; import com.usatiuk.dhfs.utils.VoidFn; +import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import com.usatiuk.objects.common.runtime.JData; import com.usatiuk.objects.common.runtime.JObjectKey; -import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; @@ -106,6 +106,43 @@ public class JObjectManager { if (got == null) return Optional.empty(); return Optional.of(new TransactionObjectImpl<>(got.getLeft(), got.getRight().lock)); } + + private Optional> getLocked(Class type, JObjectKey key, boolean write) { + var read = get(type, key).orElse(null); + if (read == null) return Optional.empty(); + var lock = write ? read.lock().writeLock() : read.lock().readLock(); + lock.lock(); + while (true) { + try { + var readAgain = get(type, key).orElse(null); + if (readAgain == null) { + lock.unlock(); + return Optional.empty(); + } + if (!Objects.equals(read, readAgain)) { + lock.unlock(); + read = readAgain; + lock = write ? read.lock().writeLock() : read.lock().readLock(); + lock.lock(); + continue; + } + return Optional.of(new TransactionObjectImpl<>(read.data(), read.lock())); + } catch (Throwable e) { + lock.unlock(); + throw e; + } + } + } + + @Override + public Optional> getReadLocked(Class type, JObjectKey key) { + return getLocked(type, key, false); + } + + @Override + public Optional> getWriteLocked(Class type, JObjectKey key) { + return getLocked(type, key, true); + } }; public TransactionPrivate createTransaction() { @@ -114,7 +151,6 @@ public class JObjectManager { return transactionFactory.createTransaction(counter, _objSource); } - public void commit(TransactionPrivate tx) { var toUnlock = new LinkedList(); var toFlush = new LinkedList>(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index 7ab3a41c..a84741a4 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -1,8 +1,8 @@ package com.usatiuk.dhfs.objects.transaction; +import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import com.usatiuk.objects.common.runtime.JData; import com.usatiuk.objects.common.runtime.JObjectKey; -import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; import lombok.AccessLevel; @@ -49,9 +49,12 @@ public class TransactionFactoryImpl implements TransactionFactory { switch (strategy) { case READ_ONLY: { - read.lock().readLock().lock(); - var view = objectAllocator.unmodifiable(read.data()); - _objects.put(key, new TxRecord.TxObjectRecordRead<>(read, view)); + var locked = _source.getReadLocked(type, key).orElse(null); + if (locked == null) { + return Optional.empty(); + } + var view = objectAllocator.unmodifiable(locked.data()); + _objects.put(key, new TxRecord.TxObjectRecordRead<>(locked, view)); return Optional.of(view); } case OPTIMISTIC: { @@ -60,28 +63,13 @@ public class TransactionFactoryImpl implements TransactionFactory { return Optional.of(copy.wrapped()); } case WRITE: { - read.lock().writeLock().lock(); - while (true) { - try { - var readAgain = _source.get(type, key).orElse(null); - if (readAgain == null) { - read.lock().writeLock().unlock(); - return Optional.empty(); - } - if (!Objects.equals(read, readAgain)) { - read.lock().writeLock().unlock(); - read = readAgain; - read.lock().writeLock().lock(); - continue; - } - var copy = objectAllocator.copy(read.data()); - _objects.put(key, new TxRecord.TxObjectRecordCopyLock<>(read, copy)); - return Optional.of(copy.wrapped()); - } catch (Throwable e) { - read.lock().writeLock().unlock(); - throw e; - } + var locked = _source.getWriteLocked(type, key).orElse(null); + if (locked == null) { + return Optional.empty(); } + var copy = objectAllocator.copy(locked.data()); + _objects.put(key, new TxRecord.TxObjectRecordCopyLock<>(locked, copy)); + return Optional.of(copy.wrapped()); } default: throw new IllegalArgumentException("Unknown locking strategy"); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java index 7a4dcd22..b6cd156e 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java @@ -14,4 +14,8 @@ public interface TransactionObjectSource { } Optional> get(Class type, JObjectKey key); + + Optional> getReadLocked(Class type, JObjectKey key); + + Optional> getWriteLocked(Class type, JObjectKey key); } From c242a318f315afcf8e71d58c67243b721ab8bdd5 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Wed, 4 Dec 2024 16:57:03 +0100 Subject: [PATCH 010/105] some more lock tidying, serializable thingy --- .../usatiuk/dhfs/objects/JObjectManager.java | 169 ++++++++++++------ .../dhfs/objects/TransactionManager.java | 27 +++ .../dhfs/objects/TransactionManagerImpl.java | 12 ++ .../objects/transaction/LockingStrategy.java | 8 +- .../dhfs/objects/transaction/Transaction.java | 2 +- .../transaction/TransactionFactoryImpl.java | 37 ++-- .../transaction/TransactionObjectSource.java | 4 + .../dhfs/objects/transaction/TxRecord.java | 27 ++- .../com/usatiuk/dhfs/objects/ObjectsTest.java | 113 +++++++++--- 9 files changed, 299 insertions(+), 100 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 440f606e..0fa3b3a7 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -2,10 +2,7 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.persistence.ObjectPersistentStore; import com.usatiuk.dhfs.objects.persistence.TxManifest; -import com.usatiuk.dhfs.objects.transaction.TransactionFactory; -import com.usatiuk.dhfs.objects.transaction.TransactionObjectSource; -import com.usatiuk.dhfs.objects.transaction.TransactionPrivate; -import com.usatiuk.dhfs.objects.transaction.TxRecord; +import com.usatiuk.dhfs.objects.transaction.*; import com.usatiuk.dhfs.utils.DataLocker; import com.usatiuk.dhfs.utils.VoidFn; import com.usatiuk.objects.alloc.runtime.ObjectAllocator; @@ -14,7 +11,6 @@ import com.usatiuk.objects.common.runtime.JObjectKey; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; -import org.apache.commons.lang3.tuple.Pair; import java.lang.ref.Cleaner; import java.lang.ref.WeakReference; @@ -46,7 +42,7 @@ public class JObjectManager { private static final Cleaner CLEANER = Cleaner.create(); final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); - long lastWriteTx = 0; + long lastWriteTx = -1; public JDataWrapper(T referent) { super(referent); @@ -55,9 +51,21 @@ public class JObjectManager { _objects.remove(key, this); }); } + + @Override + public String toString() { + return "JDataWrapper{" + + "ref=" + get() + + ", lock=" + lock + + ", lastWriteTx=" + lastWriteTx + + '}'; + } } - private Pair> get(Class type, JObjectKey key) { + private record WrapperRet(T obj, JDataWrapper wrapper) { + } + + private WrapperRet get(Class type, JObjectKey key) { while (true) { { var got = _objects.get(key); @@ -65,7 +73,7 @@ public class JObjectManager { if (got != null) { var ref = got.get(); if (type.isInstance(ref)) { - return Pair.of(type.cast(ref), (JDataWrapper) got); + return new WrapperRet<>((T) ref, (JDataWrapper) got); } else if (ref == null) { _objects.remove(key, got); } else { @@ -82,10 +90,10 @@ public class JObjectManager { var got = objectSerializer.deserialize(read); if (type.isInstance(got)) { - var wrapper = new JDataWrapper((T) got); + var wrapper = new JDataWrapper<>((T) got); var old = _objects.putIfAbsent(key, wrapper); if (old != null) continue; - return Pair.of(type.cast(got), wrapper); + return new WrapperRet<>((T) got, wrapper); } else if (got == null) { return null; } else { @@ -95,60 +103,96 @@ public class JObjectManager { } } + + private WrapperRet getLocked(Class type, JObjectKey key, boolean write) { + var read = get(type, key); + if (read == null) return null; + var lock = write ? read.wrapper().lock.writeLock() : read.wrapper().lock.readLock(); + lock.lock(); + while (true) { + try { + var readAgain = get(type, key); + if (readAgain == null) { + lock.unlock(); + return null; + } + if (!Objects.equals(read, readAgain)) { + lock.unlock(); + read = readAgain; + lock = write ? read.wrapper().lock.writeLock() : read.wrapper().lock.readLock(); + lock.lock(); + continue; + } + return read; + } catch (Throwable e) { + lock.unlock(); + throw e; + } + } + } + private record TransactionObjectImpl (T data, ReadWriteLock lock) - implements TransactionObjectSource.TransactionObject {} + implements TransactionObjectSource.TransactionObject { + } + + private class TransactionObjectSourceImpl implements TransactionObjectSource { + private final long _txId; + + private TransactionObjectSourceImpl(long txId) { + _txId = txId; + } - private final TransactionObjectSource _objSource = new TransactionObjectSource() { @Override public Optional> get(Class type, JObjectKey key) { var got = JObjectManager.this.get(type, key); if (got == null) return Optional.empty(); - return Optional.of(new TransactionObjectImpl<>(got.getLeft(), got.getRight().lock)); - } - - private Optional> getLocked(Class type, JObjectKey key, boolean write) { - var read = get(type, key).orElse(null); - if (read == null) return Optional.empty(); - var lock = write ? read.lock().writeLock() : read.lock().readLock(); - lock.lock(); - while (true) { - try { - var readAgain = get(type, key).orElse(null); - if (readAgain == null) { - lock.unlock(); - return Optional.empty(); - } - if (!Objects.equals(read, readAgain)) { - lock.unlock(); - read = readAgain; - lock = write ? read.lock().writeLock() : read.lock().readLock(); - lock.lock(); - continue; - } - return Optional.of(new TransactionObjectImpl<>(read.data(), read.lock())); - } catch (Throwable e) { - lock.unlock(); - throw e; - } - } + return Optional.of(new TransactionObjectImpl<>(got.obj(), got.wrapper().lock)); } @Override public Optional> getReadLocked(Class type, JObjectKey key) { - return getLocked(type, key, false); + var got = JObjectManager.this.getLocked(type, key, false); + if (got == null) return Optional.empty(); + return Optional.of(new TransactionObjectImpl<>(got.obj(), got.wrapper().lock)); } @Override public Optional> getWriteLocked(Class type, JObjectKey key) { - return getLocked(type, key, true); + var got = JObjectManager.this.getLocked(type, key, true); + if (got == null) return Optional.empty(); + return Optional.of(new TransactionObjectImpl<>(got.obj(), got.wrapper().lock)); } - }; + + @Override + public Optional> getReadLockedSerializable(Class type, JObjectKey key) { + var got = JObjectManager.this.getLocked(type, key, false); + if (got == null) return Optional.empty(); + if (got.wrapper().lastWriteTx >= _txId) { + got.wrapper().lock.readLock().unlock(); + throw new IllegalStateException("Serialization race"); + } + return Optional.of(new TransactionObjectImpl<>(got.obj(), got.wrapper().lock)); + } + + @Override + public Optional> getWriteLockedSerializable(Class type, JObjectKey key) { + var got = JObjectManager.this.getLocked(type, key, true); + if (got == null) return Optional.empty(); + if (got.wrapper().lastWriteTx >= _txId) { + got.wrapper().lock.writeLock().unlock(); + throw new IllegalStateException("Serialization race"); + } + return Optional.of(new TransactionObjectImpl<>(got.obj(), got.wrapper().lock)); + } + } + + ; public TransactionPrivate createTransaction() { var counter = _txCounter.getAndIncrement(); Log.trace("Creating transaction " + counter); - return transactionFactory.createTransaction(counter, _objSource); + return transactionFactory.createTransaction(counter, new TransactionObjectSourceImpl(counter)); } public void commit(TransactionPrivate tx) { @@ -162,15 +206,21 @@ public class JObjectManager { for (var entry : tx.drain()) { Log.trace("Processing entry " + entry.toString()); switch (entry) { - case TxRecord.TxObjectRecordRead read -> { - toUnlock.add(read.original().lock().readLock()::unlock); - } + case TxRecord.TxObjectRecordRead read -> toUnlock.add(read.original().lock().readLock()::unlock); + case TxRecord.TxObjectRecordReadSerializable read -> + toUnlock.add(read.original().lock().readLock()::unlock); case TxRecord.TxObjectRecordCopyLock copy -> { toUnlock.add(copy.original().lock().writeLock()::unlock); if (copy.copy().isModified()) { toFlush.add(copy); } } + case TxRecord.TxObjectRecordCopyLockSerializable copy -> { // FIXME + toUnlock.add(copy.original().lock().writeLock()::unlock); + if (copy.copy().isModified()) { + toFlush.add(copy); + } + } case TxRecord.TxObjectRecordCopyNoLock copy -> { if (copy.copy().isModified()) { toLock.add(copy); @@ -189,14 +239,17 @@ public class JObjectManager { for (var record : toLock) { Log.trace("Locking " + record.toString()); - var found = _objects.get(record.original().getKey()); + var got = getLocked(record.original().getClass(), record.original().getKey(), true); - if (found.get() != record.original()) { - throw new IllegalStateException("Object changed during transaction"); + if (got == null) { + throw new IllegalStateException("Object not found"); } - found.lock.writeLock().lock(); - toUnlock.add(found.lock.writeLock()::unlock); + toUnlock.add(got.wrapper().lock.writeLock()::unlock); + + if (got.obj() != record.original()) { + throw new IllegalStateException("Object changed during transaction"); + } } for (var record : toFlush) { @@ -209,15 +262,23 @@ public class JObjectManager { } else if (current != null) { var old = switch (record) { case TxRecord.TxObjectRecordCopyLock copy -> copy.original().data(); + case TxRecord.TxObjectRecordCopyLockSerializable copy -> copy.original().data(); case TxRecord.TxObjectRecordCopyNoLock copy -> copy.original(); default -> throw new IllegalStateException("Unexpected value: " + record); }; if (current.get() != old) { + assert record instanceof TxRecord.TxObjectRecordCopyNoLock; throw new IllegalStateException("Object changed during transaction"); } - if (current.lastWriteTx > tx.getId()) { + // In case of NoLock write, the instance is replaced and the following shouldn't happen + // It can happen without serializable writes, as then the read of object to transaction + // can happen after another transaction had read, changed and committed it. + if (record instanceof TxRecord.TxObjectRecordCopyLockSerializable + && current.lastWriteTx > tx.getId()) { + assert false; + // Shouldn't happen as we should check for serialization in the tx object source throw new IllegalStateException("Transaction race"); } @@ -241,7 +302,7 @@ public class JObjectManager { // Have all locks now for (var record : toFlush) { - Log.trace("Flushing " + record.toString()); + Log.trace("Flushing " + record.toString() + " " + _objects.get(record.copy().wrapped().getKey()).toString()); assert record.copy().isModified(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java index cbbf59ae..39a3b9a2 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java @@ -1,6 +1,9 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.transaction.Transaction; +import com.usatiuk.dhfs.utils.VoidFn; + +import java.util.function.Supplier; public interface TransactionManager { void begin(); @@ -9,5 +12,29 @@ public interface TransactionManager { void rollback(); + default T run(Supplier supplier) { + begin(); + try { + var ret = supplier.get(); + commit(); + return ret; + } catch (Throwable e) { + rollback(); + throw e; + } + } + + default void run(VoidFn fn) { + begin(); + try { + fn.apply(); + commit(); + } catch (Throwable e) { + rollback(); + throw e; + } + } + + Transaction current(); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java index f4944aa0..83e8f2ec 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java @@ -2,6 +2,7 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.dhfs.objects.transaction.TransactionPrivate; +import com.usatiuk.dhfs.objects.transaction.TxRecord; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; @@ -43,6 +44,17 @@ public class TransactionManagerImpl implements TransactionManager { @Override public void rollback() { + var tx = _currentTransaction.get(); + for (var o : tx.drain()) { + switch (o) { + case TxRecord.TxObjectRecordRead r -> r.original().lock().readLock().unlock(); + case TxRecord.TxObjectRecordReadSerializable r -> r.original().lock().readLock().unlock(); + case TxRecord.TxObjectRecordCopyLock r -> r.original().lock().writeLock().unlock(); + case TxRecord.TxObjectRecordCopyLockSerializable r -> r.original().lock().writeLock().unlock(); + default -> { + } + } + } _currentTransaction.remove(); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/LockingStrategy.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/LockingStrategy.java index f3fb25e4..52cc750b 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/LockingStrategy.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/LockingStrategy.java @@ -1,7 +1,9 @@ package com.usatiuk.dhfs.objects.transaction; public enum LockingStrategy { - READ_ONLY, // Read only, no writes allowed, blocks writers - OPTIMISTIC, // Optimistic write, no blocking other possible writers - WRITE // Write lock, blocks all other writers + READ, // Read only, no writes allowed, blocks writers + READ_SERIALIZABLE, // Exception if object was written to after transaction start + OPTIMISTIC, // Optimistic write, no blocking other possible writers + WRITE, // Write lock, blocks all other writers + WRITE_SERIALIZABLE // Exception if object was written to after transaction start } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java index 72794c7b..f92368fe 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java @@ -12,6 +12,6 @@ public interface Transaction { void putObject(JData obj); default Optional getObject(Class type, JObjectKey key) { - return getObject(type, key, LockingStrategy.READ_ONLY); + return getObject(type, key, LockingStrategy.READ); } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index a84741a4..f4e364b9 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -41,34 +41,47 @@ public class TransactionFactoryImpl implements TransactionFactory { return Optional.of(type.cast(compatible)); } - var read = _source.get(type, key).orElse(null); - - if (read == null) { - return Optional.empty(); - } - switch (strategy) { - case READ_ONLY: { - var locked = _source.getReadLocked(type, key).orElse(null); + case READ: + case READ_SERIALIZABLE: { + var locked = strategy == LockingStrategy.READ_SERIALIZABLE + ? _source.getReadLockedSerializable(type, key).orElse(null) + : _source.getReadLocked(type, key).orElse(null); if (locked == null) { return Optional.empty(); } var view = objectAllocator.unmodifiable(locked.data()); - _objects.put(key, new TxRecord.TxObjectRecordRead<>(locked, view)); + _objects.put(key, + strategy == LockingStrategy.READ_SERIALIZABLE + ? new TxRecord.TxObjectRecordReadSerializable<>(locked, view) + : new TxRecord.TxObjectRecordRead<>(locked, view) + ); return Optional.of(view); } case OPTIMISTIC: { + var read = _source.get(type, key).orElse(null); + + if (read == null) { + return Optional.empty(); + } var copy = objectAllocator.copy(read.data()); _objects.put(key, new TxRecord.TxObjectRecordCopyNoLock<>(read.data(), copy)); return Optional.of(copy.wrapped()); } - case WRITE: { - var locked = _source.getWriteLocked(type, key).orElse(null); + case WRITE: + case WRITE_SERIALIZABLE: { + var locked = strategy == LockingStrategy.WRITE_SERIALIZABLE + ? _source.getWriteLockedSerializable(type, key).orElse(null) + : _source.getWriteLocked(type, key).orElse(null); if (locked == null) { return Optional.empty(); } var copy = objectAllocator.copy(locked.data()); - _objects.put(key, new TxRecord.TxObjectRecordCopyLock<>(locked, copy)); + _objects.put(key, + strategy == LockingStrategy.WRITE_SERIALIZABLE + ? new TxRecord.TxObjectRecordCopyLockSerializable<>(locked, copy) + : new TxRecord.TxObjectRecordCopyLock<>(locked, copy) + ); return Optional.of(copy.wrapped()); } default: diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java index b6cd156e..815f7aff 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java @@ -18,4 +18,8 @@ public interface TransactionObjectSource { Optional> getReadLocked(Class type, JObjectKey key); Optional> getWriteLocked(Class type, JObjectKey key); + + Optional> getReadLockedSerializable(Class type, JObjectKey key); + + Optional> getWriteLockedSerializable(Class type, JObjectKey key); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java index abf2e39f..039bd88e 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java @@ -18,7 +18,18 @@ public class TxRecord { implements TxObjectRecord { @Override public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { - if (strategy == LockingStrategy.READ_ONLY) + if (strategy == LockingStrategy.READ) + return copy; + return null; + } + } + + public record TxObjectRecordReadSerializable(TransactionObjectSource.TransactionObject original, + T copy) + implements TxObjectRecord { + @Override + public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { + if (strategy == LockingStrategy.READ_SERIALIZABLE) return copy; return null; } @@ -50,7 +61,7 @@ public class TxRecord { } public record TxObjectRecordCopyLock(TransactionObjectSource.TransactionObject original, - ObjectAllocator.ChangeTrackingJData copy) + ObjectAllocator.ChangeTrackingJData copy) implements TxObjectRecordWrite { @Override public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { @@ -60,6 +71,18 @@ public class TxRecord { } } + public record TxObjectRecordCopyLockSerializable( + TransactionObjectSource.TransactionObject original, + ObjectAllocator.ChangeTrackingJData copy) + implements TxObjectRecordWrite { + @Override + public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { + if (strategy == LockingStrategy.WRITE_SERIALIZABLE) + return copy.wrapped(); + return null; + } + } + public record TxObjectRecordCopyNoLock(T original, ObjectAllocator.ChangeTrackingJData copy) implements TxObjectRecordWrite { diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index 69278976..a175cd0e 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -9,10 +9,11 @@ import io.quarkus.logging.Log; import io.quarkus.test.junit.QuarkusTest; import jakarta.inject.Inject; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.Semaphore; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; @QuarkusTest @@ -38,7 +39,7 @@ public class ObjectsTest { { txm.begin(); - var parent = curTx.getObject(Parent.class, new JObjectKey("Parent"), LockingStrategy.READ_ONLY).orElse(null); + var parent = curTx.getObject(Parent.class, new JObjectKey("Parent"), LockingStrategy.READ).orElse(null); Assertions.assertEquals("John", parent.getLastName()); txm.commit(); } @@ -72,7 +73,7 @@ public class ObjectsTest { { txm.begin(); - var parent = curTx.getObject(Parent.class, new JObjectKey("Parent3"), LockingStrategy.READ_ONLY).orElse(null); + var parent = curTx.getObject(Parent.class, new JObjectKey("Parent3"), LockingStrategy.READ).orElse(null); Assertions.assertEquals("John3", parent.getLastName()); txm.commit(); } @@ -83,21 +84,20 @@ public class ObjectsTest { AtomicBoolean thread1Failed = new AtomicBoolean(true); AtomicBoolean thread2Failed = new AtomicBoolean(true); - var signal = new Semaphore(0); + var barrier = new CyclicBarrier(2); var latch = new CountDownLatch(2); Just.run(() -> { try { Log.warn("Thread 1"); txm.begin(); + barrier.await(); var newParent = alloc.create(Parent.class, new JObjectKey("Parent2")); newParent.setLastName("John"); curTx.putObject(newParent); - signal.acquire(); Log.warn("Thread 1 commit"); txm.commit(); thread1Failed.set(false); - signal.release(); return null; } finally { latch.countDown(); @@ -107,25 +107,23 @@ public class ObjectsTest { try { Log.warn("Thread 2"); txm.begin(); + barrier.await(); var newParent = alloc.create(Parent.class, new JObjectKey("Parent2")); newParent.setLastName("John2"); curTx.putObject(newParent); - signal.acquire(); Log.warn("Thread 2 commit"); txm.commit(); thread2Failed.set(false); - signal.release(); return null; } finally { latch.countDown(); } }); - signal.release(2); latch.await(); txm.begin(); - var got = curTx.getObject(Parent.class, new JObjectKey("Parent2"), LockingStrategy.READ_ONLY).orElse(null); + var got = curTx.getObject(Parent.class, new JObjectKey("Parent2"), LockingStrategy.READ).orElse(null); txm.commit(); if (!thread1Failed.get()) { @@ -151,47 +149,44 @@ public class ObjectsTest { AtomicBoolean thread1Failed = new AtomicBoolean(true); AtomicBoolean thread2Failed = new AtomicBoolean(true); - var signal = new Semaphore(0); - var latch = new CountDownLatch(2); + var barrier = new CyclicBarrier(2); + var latchEnd = new CountDownLatch(2); Just.run(() -> { try { Log.warn("Thread 1"); txm.begin(); + barrier.await(); var parent = curTx.getObject(Parent.class, new JObjectKey("Parent4"), LockingStrategy.OPTIMISTIC).orElse(null); parent.setLastName("John"); - signal.acquire(); Log.warn("Thread 1 commit"); txm.commit(); thread1Failed.set(false); - signal.release(); return null; } finally { - latch.countDown(); + latchEnd.countDown(); } }); Just.run(() -> { try { Log.warn("Thread 2"); txm.begin(); + barrier.await(); var parent = curTx.getObject(Parent.class, new JObjectKey("Parent4"), LockingStrategy.OPTIMISTIC).orElse(null); parent.setLastName("John2"); - signal.acquire(); Log.warn("Thread 2 commit"); txm.commit(); thread2Failed.set(false); - signal.release(); return null; } finally { - latch.countDown(); + latchEnd.countDown(); } }); - signal.release(2); - latch.await(); + latchEnd.await(); txm.begin(); - var got = curTx.getObject(Parent.class, new JObjectKey("Parent4"), LockingStrategy.READ_ONLY).orElse(null); + var got = curTx.getObject(Parent.class, new JObjectKey("Parent4"), LockingStrategy.READ).orElse(null); txm.commit(); if (!thread1Failed.get()) { @@ -219,20 +214,19 @@ public class ObjectsTest { AtomicBoolean thread1Failed = new AtomicBoolean(true); AtomicBoolean thread2Failed = new AtomicBoolean(true); - var signal = new Semaphore(0); + var barrier = new CyclicBarrier(2); var latch = new CountDownLatch(2); Just.run(() -> { try { Log.warn("Thread 1"); txm.begin(); + barrier.await(); var parent = curTx.getObject(Parent.class, new JObjectKey("Parent5"), LockingStrategy.WRITE).orElse(null); parent.setLastName("John"); - signal.acquire(); Log.warn("Thread 1 commit"); txm.commit(); thread1Failed.set(false); - signal.release(); return null; } finally { latch.countDown(); @@ -242,30 +236,93 @@ public class ObjectsTest { try { Log.warn("Thread 2"); txm.begin(); + barrier.await(); var parent = curTx.getObject(Parent.class, new JObjectKey("Parent5"), LockingStrategy.WRITE).orElse(null); parent.setLastName("John2"); - signal.acquire(); Log.warn("Thread 2 commit"); txm.commit(); thread2Failed.set(false); - signal.release(); return null; } finally { latch.countDown(); } }); - signal.release(2); latch.await(); txm.begin(); - var got = curTx.getObject(Parent.class, new JObjectKey("Parent5"), LockingStrategy.READ_ONLY).orElse(null); + var got = curTx.getObject(Parent.class, new JObjectKey("Parent5"), LockingStrategy.READ).orElse(null); txm.commit(); Assertions.assertTrue(!thread1Failed.get() && !thread2Failed.get()); Assertions.assertTrue(got.getLastName().equals("John") || got.getLastName().equals("John2")); } + @Test + @Disabled // Doesn't work as "lastWrittenTx" is not persistent + void editLockSerializable() throws InterruptedException { + { + txm.begin(); + var newParent = alloc.create(Parent.class, new JObjectKey("Parent6")); + newParent.setLastName("John3"); + curTx.putObject(newParent); + txm.commit(); + } + + AtomicBoolean thread1Failed = new AtomicBoolean(true); + AtomicBoolean thread2Failed = new AtomicBoolean(true); + + var barrier = new CyclicBarrier(2); + var latchEnd = new CountDownLatch(2); + + Just.run(() -> { + try { + Log.warn("Thread 1"); + txm.begin(); + barrier.await(); + var parent = curTx.getObject(Parent.class, new JObjectKey("Parent6"), LockingStrategy.WRITE_SERIALIZABLE).orElse(null); + parent.setLastName("John"); + Log.warn("Thread 1 commit"); + txm.commit(); + thread1Failed.set(false); + return null; + } finally { + latchEnd.countDown(); + } + }); + Just.run(() -> { + try { + Log.warn("Thread 2"); + txm.begin(); + barrier.await(); + var parent = curTx.getObject(Parent.class, new JObjectKey("Parent6"), LockingStrategy.WRITE_SERIALIZABLE).orElse(null); + parent.setLastName("John2"); + Log.warn("Thread 2 commit"); + txm.commit(); + thread2Failed.set(false); + return null; + } finally { + latchEnd.countDown(); + } + }); + + latchEnd.await(); + + txm.begin(); + var got = curTx.getObject(Parent.class, new JObjectKey("Parent6"), LockingStrategy.READ).orElse(null); + txm.commit(); + + if (!thread1Failed.get()) { + Assertions.assertTrue(thread2Failed.get()); + Assertions.assertEquals("John", got.getLastName()); + } else if (!thread2Failed.get()) { + Assertions.assertEquals("John2", got.getLastName()); + } else { + Assertions.fail("No thread succeeded"); + } + + Assertions.assertTrue(thread1Failed.get() || thread2Failed.get()); + } // } // // @Test From 80e73fe7af5927b0c331de99f2493b65b0b565fd Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Wed, 4 Dec 2024 22:39:31 +0100 Subject: [PATCH 011/105] simplify transaction isolation --- .../deployment/ObjectsAllocProcessor.java | 7 +- .../alloc/runtime/ChangeTrackingJData.java | 9 + .../alloc/runtime/ObjectAllocator.java | 7 - .../usatiuk/dhfs/objects/JObjectManager.java | 158 +++++++---------- .../dhfs/objects/TransactionManagerImpl.java | 3 - .../objects/transaction/LockingStrategy.java | 5 +- .../dhfs/objects/transaction/Transaction.java | 2 +- .../transaction/TransactionFactoryImpl.java | 32 +--- .../transaction/TransactionObject.java | 11 ++ .../transaction/TransactionObjectSource.java | 13 -- .../dhfs/objects/transaction/TxRecord.java | 66 +------ .../com/usatiuk/dhfs/objects/ObjectsTest.java | 167 ++++-------------- 12 files changed, 133 insertions(+), 347 deletions(-) create mode 100644 dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ChangeTrackingJData.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java diff --git a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java index f839e363..91bedd0d 100644 --- a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java +++ b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java @@ -1,5 +1,6 @@ package com.usatiuk.objects.alloc.deployment; +import com.usatiuk.objects.alloc.runtime.ChangeTrackingJData; import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import com.usatiuk.objects.common.runtime.JData; import com.usatiuk.objects.common.runtime.JObjectKey; @@ -120,7 +121,7 @@ class ObjectsAllocProcessor { for (var item : jDataItems) { try (ClassCreator classCreator = ClassCreator.builder() .className(getCTClassName(item.klass).toString()) - .interfaces(JData.class, ObjectAllocator.ChangeTrackingJData.class) + .interfaces(JData.class, ChangeTrackingJData.class) .interfaces(item.klass.name().toString()) .interfaces(Serializable.class) .classOutput(gizmoAdapter) @@ -192,7 +193,7 @@ class ObjectsAllocProcessor { for (var item : jDataItems) { try (ClassCreator classCreator = ClassCreator.builder() .className(getImmutableClassName(item.klass).toString()) - .interfaces(JData.class, ObjectAllocator.ChangeTrackingJData.class) + .interfaces(JData.class, ChangeTrackingJData.class) .interfaces(item.klass.name().toString()) .interfaces(Serializable.class) .classOutput(gizmoAdapter) @@ -357,7 +358,7 @@ class ObjectsAllocProcessor { methodCreator.throwException(IllegalArgumentException.class, "Unknown type"); } - try (MethodCreator methodCreator = classCreator.getMethodCreator("copy", ObjectAllocator.ChangeTrackingJData.class, JData.class)) { + try (MethodCreator methodCreator = classCreator.getMethodCreator("copy", ChangeTrackingJData.class, JData.class)) { matchClass(methodCreator, methodCreator.getMethodParam(0), classes, (type, branch, value) -> { branch.returnValue(branch.newInstance(MethodDescriptor.ofConstructor(getCTClassName(type).toString(), type.name().toString()), value)); }); diff --git a/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ChangeTrackingJData.java b/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ChangeTrackingJData.java new file mode 100644 index 00000000..846ab9fa --- /dev/null +++ b/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ChangeTrackingJData.java @@ -0,0 +1,9 @@ +package com.usatiuk.objects.alloc.runtime; + +import com.usatiuk.objects.common.runtime.JData; + +public interface ChangeTrackingJData { + T wrapped(); + + boolean isModified(); +} diff --git a/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ObjectAllocator.java b/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ObjectAllocator.java index b4a3361e..763f1051 100644 --- a/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ObjectAllocator.java +++ b/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ObjectAllocator.java @@ -6,13 +6,6 @@ import com.usatiuk.objects.common.runtime.JObjectKey; public interface ObjectAllocator { T create(Class type, JObjectKey key); - interface ChangeTrackingJData { - T wrapped(); - - boolean isModified(); - } - - // A copy of data that can be modified without affecting the original, and that can track changes ChangeTrackingJData copy(T obj); T unmodifiable(T obj); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 0fa3b3a7..43a172fe 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -1,5 +1,6 @@ package com.usatiuk.dhfs.objects; +import com.google.common.collect.Streams; import com.usatiuk.dhfs.objects.persistence.ObjectPersistentStore; import com.usatiuk.dhfs.objects.persistence.TxManifest; import com.usatiuk.dhfs.objects.transaction.*; @@ -42,7 +43,7 @@ public class JObjectManager { private static final Cleaner CLEANER = Cleaner.create(); final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); - long lastWriteTx = -1; + long lastWriteTx = -1; // FIXME: This should be persistent public JDataWrapper(T referent) { super(referent); @@ -77,7 +78,7 @@ public class JObjectManager { } else if (ref == null) { _objects.remove(key, got); } else { - throw new IllegalArgumentException("Object type mismatch"); + throw new IllegalArgumentException("Object type mismatch: " + ref.getClass() + " vs " + type); } } } @@ -85,7 +86,7 @@ public class JObjectManager { //noinspection unused try (var readLock = _storageReadLocker.lock(key)) { var read = objectStorage.readObject(key).orElse(null); - if (read == null) throw new IllegalArgumentException("Object not found"); + if (read == null) throw new IllegalArgumentException("Object not found: " + key); var got = objectSerializer.deserialize(read); @@ -97,7 +98,7 @@ public class JObjectManager { } else if (got == null) { return null; } else { - throw new IllegalArgumentException("Object type mismatch"); + throw new IllegalArgumentException("Object type mismatch: " + got.getClass() + " vs " + type); } } } @@ -133,7 +134,7 @@ public class JObjectManager { private record TransactionObjectImpl (T data, ReadWriteLock lock) - implements TransactionObjectSource.TransactionObject { + implements TransactionObject { } private class TransactionObjectSourceImpl implements TransactionObjectSource { @@ -150,33 +151,8 @@ public class JObjectManager { return Optional.of(new TransactionObjectImpl<>(got.obj(), got.wrapper().lock)); } - @Override - public Optional> getReadLocked(Class type, JObjectKey key) { - var got = JObjectManager.this.getLocked(type, key, false); - if (got == null) return Optional.empty(); - return Optional.of(new TransactionObjectImpl<>(got.obj(), got.wrapper().lock)); - } - @Override public Optional> getWriteLocked(Class type, JObjectKey key) { - var got = JObjectManager.this.getLocked(type, key, true); - if (got == null) return Optional.empty(); - return Optional.of(new TransactionObjectImpl<>(got.obj(), got.wrapper().lock)); - } - - @Override - public Optional> getReadLockedSerializable(Class type, JObjectKey key) { - var got = JObjectManager.this.getLocked(type, key, false); - if (got == null) return Optional.empty(); - if (got.wrapper().lastWriteTx >= _txId) { - got.wrapper().lock.readLock().unlock(); - throw new IllegalStateException("Serialization race"); - } - return Optional.of(new TransactionObjectImpl<>(got.obj(), got.wrapper().lock)); - } - - @Override - public Optional> getWriteLockedSerializable(Class type, JObjectKey key) { var got = JObjectManager.this.getLocked(type, key, true); if (got == null) return Optional.empty(); if (got.wrapper().lastWriteTx >= _txId) { @@ -187,8 +163,6 @@ public class JObjectManager { } } - ; - public TransactionPrivate createTransaction() { var counter = _txCounter.getAndIncrement(); Log.trace("Creating transaction " + counter); @@ -198,7 +172,9 @@ public class JObjectManager { public void commit(TransactionPrivate tx) { var toUnlock = new LinkedList(); var toFlush = new LinkedList>(); - var toLock = new ArrayList>(); + var toPut = new LinkedList>(); + var toLock = new ArrayList>(); + var dependencies = new LinkedList>(); Log.trace("Committing transaction " + tx.getId()); @@ -206,29 +182,22 @@ public class JObjectManager { for (var entry : tx.drain()) { Log.trace("Processing entry " + entry.toString()); switch (entry) { - case TxRecord.TxObjectRecordRead read -> toUnlock.add(read.original().lock().readLock()::unlock); - case TxRecord.TxObjectRecordReadSerializable read -> - toUnlock.add(read.original().lock().readLock()::unlock); case TxRecord.TxObjectRecordCopyLock copy -> { toUnlock.add(copy.original().lock().writeLock()::unlock); + dependencies.add(copy.original()); if (copy.copy().isModified()) { toFlush.add(copy); } } - case TxRecord.TxObjectRecordCopyLockSerializable copy -> { // FIXME - toUnlock.add(copy.original().lock().writeLock()::unlock); + case TxRecord.TxObjectRecordOptimistic copy -> { + toLock.add(copy); + dependencies.add(copy.original()); if (copy.copy().isModified()) { toFlush.add(copy); } } - case TxRecord.TxObjectRecordCopyNoLock copy -> { - if (copy.copy().isModified()) { - toLock.add(copy); - toFlush.add(copy); - } - } case TxRecord.TxObjectRecordNew created -> { - toFlush.add(created); + toPut.add(created); } default -> throw new IllegalStateException("Unexpected value: " + entry); } @@ -239,86 +208,77 @@ public class JObjectManager { for (var record : toLock) { Log.trace("Locking " + record.toString()); - var got = getLocked(record.original().getClass(), record.original().getKey(), true); + var got = getLocked(record.original().data().getClass(), record.original().data().getKey(), true); if (got == null) { - throw new IllegalStateException("Object not found"); + throw new IllegalStateException("Object " + record.original().data().getKey() + " not found"); } toUnlock.add(got.wrapper().lock.writeLock()::unlock); - if (got.obj() != record.original()) { - throw new IllegalStateException("Object changed during transaction"); + if (got.obj() != record.original().data()) { + throw new IllegalStateException("Object changed during transaction: " + got.obj() + " vs " + record.original().data()); } } - for (var record : toFlush) { - Log.trace("Processing flush entry " + record.toString()); + for (var dep : dependencies) { + Log.trace("Checking dependency " + dep.toString()); + var current = _objects.get(dep.data().getKey()); - var current = _objects.get(record.copy().wrapped().getKey()); + if (current.get() != dep.data()) { + throw new IllegalStateException("Object changed during transaction: " + current.get() + " vs " + dep.data()); + } - if (current == null && !(record instanceof TxRecord.TxObjectRecordNew)) { - throw new IllegalStateException("Object not found during transaction"); - } else if (current != null) { - var old = switch (record) { - case TxRecord.TxObjectRecordCopyLock copy -> copy.original().data(); - case TxRecord.TxObjectRecordCopyLockSerializable copy -> copy.original().data(); - case TxRecord.TxObjectRecordCopyNoLock copy -> copy.original(); - default -> throw new IllegalStateException("Unexpected value: " + record); - }; - - if (current.get() != old) { - assert record instanceof TxRecord.TxObjectRecordCopyNoLock; - throw new IllegalStateException("Object changed during transaction"); - } - - // In case of NoLock write, the instance is replaced and the following shouldn't happen - // It can happen without serializable writes, as then the read of object to transaction - // can happen after another transaction had read, changed and committed it. - if (record instanceof TxRecord.TxObjectRecordCopyLockSerializable - && current.lastWriteTx > tx.getId()) { - assert false; - // Shouldn't happen as we should check for serialization in the tx object source - throw new IllegalStateException("Transaction race"); - } - - var newWrapper = new JDataWrapper<>(record.copy().wrapped()); - newWrapper.lock.writeLock().lock(); - if (!_objects.replace(record.copy().wrapped().getKey(), current, newWrapper)) { - throw new IllegalStateException("Object changed during transaction"); - } - toUnlock.add(newWrapper.lock.writeLock()::unlock); - } else if (record instanceof TxRecord.TxObjectRecordNew created) { - var wrapper = new JDataWrapper<>(created.created()); - wrapper.lock.writeLock().lock(); - var old = _objects.putIfAbsent(created.created().getKey(), wrapper); - if (old != null) - throw new IllegalStateException("Object already exists"); - toUnlock.add(wrapper.lock.writeLock()::unlock); - } else { - throw new IllegalStateException("Object not found during transaction"); + if (current.lastWriteTx >= tx.getId()) { + throw new IllegalStateException("Serialization hazard: " + current.lastWriteTx + " vs " + tx.getId()); } } - // Have all locks now - for (var record : toFlush) { - Log.trace("Flushing " + record.toString() + " " + _objects.get(record.copy().wrapped().getKey()).toString()); + for (var put : toPut) { + Log.trace("Putting new object " + put.toString()); + var wrapper = new JDataWrapper<>(put.created()); + wrapper.lock.writeLock().lock(); + var old = _objects.putIfAbsent(put.created().getKey(), wrapper); + if (old != null) + throw new IllegalStateException("Object already exists: " + old.get()); + toUnlock.add(wrapper.lock.writeLock()::unlock); + } + for (var record : toFlush) { + Log.trace("Flushing changed " + record.toString()); + var current = _objects.get(record.original().data().getKey()); assert record.copy().isModified(); - var obj = record.copy().wrapped(); + var newWrapper = new JDataWrapper<>(record.copy().wrapped()); + newWrapper.lock.writeLock().lock(); + if (!_objects.replace(record.copy().wrapped().getKey(), current, newWrapper)) { + assert false; + throw new IllegalStateException("Object changed during transaction after locking: " + current.get() + " vs " + record.copy().wrapped()); + } + toUnlock.add(newWrapper.lock.writeLock()::unlock); + } + + Log.tracef("Flushing transaction %d to storage", tx.getId()); + + var written = Streams.concat(toFlush.stream().map(f -> f.copy().wrapped()), + toPut.stream().map(TxRecord.TxObjectRecordNew::created)).toList(); + + // Really flushing to storage + written.forEach(obj -> { + Log.trace("Flushing object " + obj.getKey()); var key = obj.getKey(); var data = objectSerializer.serialize(obj); objectStorage.writeObject(key, data); _objects.get(key).lastWriteTx = tx.getId(); // FIXME: - } + }); - Log.trace("Flushing transaction " + tx.getId()); + Log.tracef("Committing transaction %d to storage", tx.getId()); objectStorage.commitTx(new TxManifest() { @Override public List getWritten() { - return toFlush.stream().map(r -> r.copy().wrapped().getKey()).toList(); + // FIXME: + return written.stream().map(JData::getKey).toList(); } @Override diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java index 83e8f2ec..f872e932 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java @@ -47,10 +47,7 @@ public class TransactionManagerImpl implements TransactionManager { var tx = _currentTransaction.get(); for (var o : tx.drain()) { switch (o) { - case TxRecord.TxObjectRecordRead r -> r.original().lock().readLock().unlock(); - case TxRecord.TxObjectRecordReadSerializable r -> r.original().lock().readLock().unlock(); case TxRecord.TxObjectRecordCopyLock r -> r.original().lock().writeLock().unlock(); - case TxRecord.TxObjectRecordCopyLockSerializable r -> r.original().lock().writeLock().unlock(); default -> { } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/LockingStrategy.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/LockingStrategy.java index 52cc750b..1cf28822 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/LockingStrategy.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/LockingStrategy.java @@ -1,9 +1,6 @@ package com.usatiuk.dhfs.objects.transaction; public enum LockingStrategy { - READ, // Read only, no writes allowed, blocks writers - READ_SERIALIZABLE, // Exception if object was written to after transaction start - OPTIMISTIC, // Optimistic write, no blocking other possible writers + OPTIMISTIC, // Optimistic write, no blocking other possible writers/readers WRITE, // Write lock, blocks all other writers - WRITE_SERIALIZABLE // Exception if object was written to after transaction start } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java index f92368fe..544c3a5c 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java @@ -12,6 +12,6 @@ public interface Transaction { void putObject(JData obj); default Optional getObject(Class type, JObjectKey key) { - return getObject(type, key, LockingStrategy.READ); + return getObject(type, key, LockingStrategy.OPTIMISTIC); } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index f4e364b9..b10ff167 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -42,46 +42,22 @@ public class TransactionFactoryImpl implements TransactionFactory { } switch (strategy) { - case READ: - case READ_SERIALIZABLE: { - var locked = strategy == LockingStrategy.READ_SERIALIZABLE - ? _source.getReadLockedSerializable(type, key).orElse(null) - : _source.getReadLocked(type, key).orElse(null); - if (locked == null) { - return Optional.empty(); - } - var view = objectAllocator.unmodifiable(locked.data()); - _objects.put(key, - strategy == LockingStrategy.READ_SERIALIZABLE - ? new TxRecord.TxObjectRecordReadSerializable<>(locked, view) - : new TxRecord.TxObjectRecordRead<>(locked, view) - ); - return Optional.of(view); - } case OPTIMISTIC: { var read = _source.get(type, key).orElse(null); - if (read == null) { return Optional.empty(); } var copy = objectAllocator.copy(read.data()); - _objects.put(key, new TxRecord.TxObjectRecordCopyNoLock<>(read.data(), copy)); + _objects.put(key, new TxRecord.TxObjectRecordOptimistic<>(read, copy)); return Optional.of(copy.wrapped()); } - case WRITE: - case WRITE_SERIALIZABLE: { - var locked = strategy == LockingStrategy.WRITE_SERIALIZABLE - ? _source.getWriteLockedSerializable(type, key).orElse(null) - : _source.getWriteLocked(type, key).orElse(null); + case WRITE: { + var locked = _source.getWriteLocked(type, key).orElse(null); if (locked == null) { return Optional.empty(); } var copy = objectAllocator.copy(locked.data()); - _objects.put(key, - strategy == LockingStrategy.WRITE_SERIALIZABLE - ? new TxRecord.TxObjectRecordCopyLockSerializable<>(locked, copy) - : new TxRecord.TxObjectRecordCopyLock<>(locked, copy) - ); + _objects.put(key, new TxRecord.TxObjectRecordCopyLock<>(locked, copy)); return Optional.of(copy.wrapped()); } default: diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java new file mode 100644 index 00000000..cd5dc7e6 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java @@ -0,0 +1,11 @@ +package com.usatiuk.dhfs.objects.transaction; + +import com.usatiuk.objects.common.runtime.JData; + +import java.util.concurrent.locks.ReadWriteLock; + +public interface TransactionObject { + T data(); + + ReadWriteLock lock(); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java index 815f7aff..14835797 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java @@ -4,22 +4,9 @@ import com.usatiuk.objects.common.runtime.JData; import com.usatiuk.objects.common.runtime.JObjectKey; import java.util.Optional; -import java.util.concurrent.locks.ReadWriteLock; public interface TransactionObjectSource { - interface TransactionObject { - T data(); - - ReadWriteLock lock(); - } - Optional> get(Class type, JObjectKey key); - Optional> getReadLocked(Class type, JObjectKey key); - Optional> getWriteLocked(Class type, JObjectKey key); - - Optional> getReadLockedSerializable(Class type, JObjectKey key); - - Optional> getWriteLockedSerializable(Class type, JObjectKey key); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java index 039bd88e..dc0b590d 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java @@ -1,8 +1,8 @@ package com.usatiuk.dhfs.objects.transaction; +import com.usatiuk.objects.alloc.runtime.ChangeTrackingJData; import com.usatiuk.objects.common.runtime.JData; import com.usatiuk.objects.common.runtime.JObjectKey; -import com.usatiuk.objects.alloc.runtime.ObjectAllocator; public class TxRecord { public interface TxObjectRecord { @@ -10,58 +10,22 @@ public class TxRecord { } public interface TxObjectRecordWrite extends TxObjectRecord { - ObjectAllocator.ChangeTrackingJData copy(); + TransactionObject original(); + + ChangeTrackingJData copy(); } - public record TxObjectRecordRead(TransactionObjectSource.TransactionObject original, - T copy) - implements TxObjectRecord { - @Override - public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { - if (strategy == LockingStrategy.READ) - return copy; - return null; - } - } - - public record TxObjectRecordReadSerializable(TransactionObjectSource.TransactionObject original, - T copy) - implements TxObjectRecord { - @Override - public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { - if (strategy == LockingStrategy.READ_SERIALIZABLE) - return copy; - return null; - } - } - - public record TxObjectRecordNew(T created) - implements TxObjectRecordWrite { + public record TxObjectRecordNew(T created) implements TxObjectRecord { @Override public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { if (strategy == LockingStrategy.WRITE || strategy == LockingStrategy.OPTIMISTIC) return created; return null; } - - @Override - public ObjectAllocator.ChangeTrackingJData copy() { - return new ObjectAllocator.ChangeTrackingJData() { - @Override - public T wrapped() { - return created; - } - - @Override - public boolean isModified() { - return true; - } - }; - } } - public record TxObjectRecordCopyLock(TransactionObjectSource.TransactionObject original, - ObjectAllocator.ChangeTrackingJData copy) + public record TxObjectRecordCopyLock(TransactionObject original, + ChangeTrackingJData copy) implements TxObjectRecordWrite { @Override public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { @@ -71,20 +35,8 @@ public class TxRecord { } } - public record TxObjectRecordCopyLockSerializable( - TransactionObjectSource.TransactionObject original, - ObjectAllocator.ChangeTrackingJData copy) - implements TxObjectRecordWrite { - @Override - public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { - if (strategy == LockingStrategy.WRITE_SERIALIZABLE) - return copy.wrapped(); - return null; - } - } - - public record TxObjectRecordCopyNoLock(T original, - ObjectAllocator.ChangeTrackingJData copy) + public record TxObjectRecordOptimistic(TransactionObject original, + ChangeTrackingJData copy) implements TxObjectRecordWrite { @Override public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index a175cd0e..89e2d8d7 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -9,8 +9,9 @@ import io.quarkus.logging.Log; import io.quarkus.test.junit.QuarkusTest; import jakarta.inject.Inject; import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; @@ -39,7 +40,29 @@ public class ObjectsTest { { txm.begin(); - var parent = curTx.getObject(Parent.class, new JObjectKey("Parent"), LockingStrategy.READ).orElse(null); + var parent = curTx.getObject(Parent.class, new JObjectKey("Parent")).orElse(null); + Assertions.assertEquals("John", parent.getLastName()); + txm.commit(); + } + } + + @Test + void createCreateObject() { + { + txm.begin(); + var newParent = alloc.create(Parent.class, new JObjectKey("Parent")); + newParent.setLastName("John"); + curTx.putObject(newParent); + txm.commit(); + } + Assertions.assertThrows(Exception.class, () -> txm.run(() -> { + var newParent = alloc.create(Parent.class, new JObjectKey("Parent")); + newParent.setLastName("John2"); + curTx.putObject(newParent); + })); + { + txm.begin(); + var parent = curTx.getObject(Parent.class, new JObjectKey("Parent")).orElse(null); Assertions.assertEquals("John", parent.getLastName()); txm.commit(); } @@ -73,7 +96,7 @@ public class ObjectsTest { { txm.begin(); - var parent = curTx.getObject(Parent.class, new JObjectKey("Parent3"), LockingStrategy.READ).orElse(null); + var parent = curTx.getObject(Parent.class, new JObjectKey("Parent3")).orElse(null); Assertions.assertEquals("John3", parent.getLastName()); txm.commit(); } @@ -123,7 +146,7 @@ public class ObjectsTest { latch.await(); txm.begin(); - var got = curTx.getObject(Parent.class, new JObjectKey("Parent2"), LockingStrategy.READ).orElse(null); + var got = curTx.getObject(Parent.class, new JObjectKey("Parent2")).orElse(null); txm.commit(); if (!thread1Failed.get()) { @@ -136,11 +159,13 @@ public class ObjectsTest { } } - @Test - void editConflict() throws InterruptedException { + @ParameterizedTest + @EnumSource(LockingStrategy.class) + void editConflict(LockingStrategy strategy) throws InterruptedException { + String key = "Parent4" + strategy.name(); { txm.begin(); - var newParent = alloc.create(Parent.class, new JObjectKey("Parent4")); + var newParent = alloc.create(Parent.class, new JObjectKey(key)); newParent.setLastName("John3"); curTx.putObject(newParent); txm.commit(); @@ -157,7 +182,7 @@ public class ObjectsTest { Log.warn("Thread 1"); txm.begin(); barrier.await(); - var parent = curTx.getObject(Parent.class, new JObjectKey("Parent4"), LockingStrategy.OPTIMISTIC).orElse(null); + var parent = curTx.getObject(Parent.class, new JObjectKey(key), strategy).orElse(null); parent.setLastName("John"); Log.warn("Thread 1 commit"); txm.commit(); @@ -172,7 +197,7 @@ public class ObjectsTest { Log.warn("Thread 2"); txm.begin(); barrier.await(); - var parent = curTx.getObject(Parent.class, new JObjectKey("Parent4"), LockingStrategy.OPTIMISTIC).orElse(null); + var parent = curTx.getObject(Parent.class, new JObjectKey(key), strategy).orElse(null); parent.setLastName("John2"); Log.warn("Thread 2 commit"); txm.commit(); @@ -186,7 +211,7 @@ public class ObjectsTest { latchEnd.await(); txm.begin(); - var got = curTx.getObject(Parent.class, new JObjectKey("Parent4"), LockingStrategy.READ).orElse(null); + var got = curTx.getObject(Parent.class, new JObjectKey(key)).orElse(null); txm.commit(); if (!thread1Failed.get()) { @@ -201,128 +226,6 @@ public class ObjectsTest { Assertions.assertTrue(thread1Failed.get() || thread2Failed.get()); } - @Test - void editLock() throws InterruptedException { - { - txm.begin(); - var newParent = alloc.create(Parent.class, new JObjectKey("Parent5")); - newParent.setLastName("John3"); - curTx.putObject(newParent); - txm.commit(); - } - - AtomicBoolean thread1Failed = new AtomicBoolean(true); - AtomicBoolean thread2Failed = new AtomicBoolean(true); - - var barrier = new CyclicBarrier(2); - var latch = new CountDownLatch(2); - - Just.run(() -> { - try { - Log.warn("Thread 1"); - txm.begin(); - barrier.await(); - var parent = curTx.getObject(Parent.class, new JObjectKey("Parent5"), LockingStrategy.WRITE).orElse(null); - parent.setLastName("John"); - Log.warn("Thread 1 commit"); - txm.commit(); - thread1Failed.set(false); - return null; - } finally { - latch.countDown(); - } - }); - Just.run(() -> { - try { - Log.warn("Thread 2"); - txm.begin(); - barrier.await(); - var parent = curTx.getObject(Parent.class, new JObjectKey("Parent5"), LockingStrategy.WRITE).orElse(null); - parent.setLastName("John2"); - Log.warn("Thread 2 commit"); - txm.commit(); - thread2Failed.set(false); - return null; - } finally { - latch.countDown(); - } - }); - - latch.await(); - - txm.begin(); - var got = curTx.getObject(Parent.class, new JObjectKey("Parent5"), LockingStrategy.READ).orElse(null); - txm.commit(); - - Assertions.assertTrue(!thread1Failed.get() && !thread2Failed.get()); - Assertions.assertTrue(got.getLastName().equals("John") || got.getLastName().equals("John2")); - } - - @Test - @Disabled // Doesn't work as "lastWrittenTx" is not persistent - void editLockSerializable() throws InterruptedException { - { - txm.begin(); - var newParent = alloc.create(Parent.class, new JObjectKey("Parent6")); - newParent.setLastName("John3"); - curTx.putObject(newParent); - txm.commit(); - } - - AtomicBoolean thread1Failed = new AtomicBoolean(true); - AtomicBoolean thread2Failed = new AtomicBoolean(true); - - var barrier = new CyclicBarrier(2); - var latchEnd = new CountDownLatch(2); - - Just.run(() -> { - try { - Log.warn("Thread 1"); - txm.begin(); - barrier.await(); - var parent = curTx.getObject(Parent.class, new JObjectKey("Parent6"), LockingStrategy.WRITE_SERIALIZABLE).orElse(null); - parent.setLastName("John"); - Log.warn("Thread 1 commit"); - txm.commit(); - thread1Failed.set(false); - return null; - } finally { - latchEnd.countDown(); - } - }); - Just.run(() -> { - try { - Log.warn("Thread 2"); - txm.begin(); - barrier.await(); - var parent = curTx.getObject(Parent.class, new JObjectKey("Parent6"), LockingStrategy.WRITE_SERIALIZABLE).orElse(null); - parent.setLastName("John2"); - Log.warn("Thread 2 commit"); - txm.commit(); - thread2Failed.set(false); - return null; - } finally { - latchEnd.countDown(); - } - }); - - latchEnd.await(); - - txm.begin(); - var got = curTx.getObject(Parent.class, new JObjectKey("Parent6"), LockingStrategy.READ).orElse(null); - txm.commit(); - - if (!thread1Failed.get()) { - Assertions.assertTrue(thread2Failed.get()); - Assertions.assertEquals("John", got.getLastName()); - } else if (!thread2Failed.get()) { - Assertions.assertEquals("John2", got.getLastName()); - } else { - Assertions.fail("No thread succeeded"); - } - - Assertions.assertTrue(thread1Failed.get() || thread2Failed.get()); - } // } // // @Test From 6da883fef91ccde0fd8740afca1abc9c5dbac772 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Wed, 4 Dec 2024 23:15:05 +0100 Subject: [PATCH 012/105] object alloc version --- .../deployment/ObjectsAllocProcessor.java | 111 +++++++++--------- .../alloc/it/DummyVersionProvider.java | 20 ++++ .../objects/alloc/it/ObjectAllocIT.java | 34 ++++++ .../usatiuk/objects/common/runtime/JData.java | 5 +- .../runtime/JDataAllocVersionProvider.java | 5 + 5 files changed, 117 insertions(+), 58 deletions(-) create mode 100644 dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/DummyVersionProvider.java create mode 100644 dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JDataAllocVersionProvider.java diff --git a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java index 91bedd0d..5a339234 100644 --- a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java +++ b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java @@ -3,6 +3,7 @@ package com.usatiuk.objects.alloc.deployment; import com.usatiuk.objects.alloc.runtime.ChangeTrackingJData; import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JDataAllocVersionProvider; import com.usatiuk.objects.common.runtime.JObjectKey; import io.quarkus.arc.deployment.GeneratedBeanBuildItem; import io.quarkus.arc.deployment.GeneratedBeanGizmoAdaptor; @@ -12,6 +13,7 @@ import io.quarkus.deployment.annotations.BuildStep; import io.quarkus.deployment.builditem.ApplicationIndexBuildItem; import io.quarkus.deployment.builditem.GeneratedClassBuildItem; import io.quarkus.gizmo.*; +import jakarta.inject.Inject; import jakarta.inject.Singleton; import org.apache.commons.lang3.tuple.Pair; import org.jboss.jandex.ClassInfo; @@ -20,6 +22,7 @@ import org.jboss.jandex.MethodInfo; import java.io.Serializable; import java.util.*; +import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -41,6 +44,8 @@ class ObjectsAllocProcessor { } private static final String KEY_NAME = "key"; + private static final String VERSION_NAME = "version"; + private static final List SPECIAL_FIELDS = List.of(KEY_NAME, VERSION_NAME); String propNameToFieldName(String name) { return name; @@ -79,35 +84,24 @@ class ObjectsAllocProcessor { .build()) { - var fieldsMap = item.fields.values().stream().map(jDataFieldInfo -> { - var fc = classCreator.getFieldCreator(propNameToFieldName(jDataFieldInfo.name()), jDataFieldInfo.type().toString()); - - if (jDataFieldInfo.name().equals(KEY_NAME)) { - fc.setModifiers(PRIVATE | FINAL); - } else { - fc.setModifiers(PRIVATE); - } - - try (var getter = classCreator.getMethodCreator(propNameToGetterName(jDataFieldInfo.name()), jDataFieldInfo.type().toString())) { - getter.returnValue(getter.readInstanceField(fc.getFieldDescriptor(), getter.getThis())); - } - return Pair.of(jDataFieldInfo, fc.getFieldDescriptor()); - }).collect(Collectors.toUnmodifiableMap(Pair::getLeft, Pair::getRight)); + var fieldsMap = createFields(item, classCreator); for (var field : fieldsMap.values()) { - if (field.getName().equals(KEY_NAME)) { - try (var constructor = classCreator.getConstructorCreator(JObjectKey.class)) { - constructor.invokeSpecialMethod(MethodDescriptor.ofConstructor(Object.class), constructor.getThis()); - constructor.writeInstanceField(field, constructor.getThis(), constructor.getMethodParam(0)); - constructor.returnVoid(); - } - } else { + if (!SPECIAL_FIELDS.contains(field.getName())) { try (var setter = classCreator.getMethodCreator(propNameToSetterName(field.getName()), void.class, field.getType())) { setter.writeInstanceField(field, setter.getThis(), setter.getMethodParam(0)); setter.returnVoid(); } } } + + try (var constructor = classCreator.getConstructorCreator(JObjectKey.class, long.class)) { + constructor.invokeSpecialMethod(MethodDescriptor.ofConstructor(Object.class), constructor.getThis()); + constructor.writeInstanceField(fieldsMap.get(KEY_NAME), constructor.getThis(), constructor.getMethodParam(0)); + constructor.writeInstanceField(fieldsMap.get(VERSION_NAME), constructor.getThis(), constructor.getMethodParam(1)); + constructor.returnVoid(); + } + } } } @@ -146,23 +140,10 @@ class ObjectsAllocProcessor { wrapped.returnValue(wrapped.getThis()); } - var fieldsMap = item.fields.values().stream().map(jDataFieldInfo -> { - var fc = classCreator.getFieldCreator(propNameToFieldName(jDataFieldInfo.name()), jDataFieldInfo.type().toString()); - - if (jDataFieldInfo.name().equals(KEY_NAME)) { - fc.setModifiers(PRIVATE | FINAL); - } else { - fc.setModifiers(PRIVATE); - } - - try (var getter = classCreator.getMethodCreator(propNameToGetterName(jDataFieldInfo.name()), jDataFieldInfo.type().toString())) { - getter.returnValue(getter.readInstanceField(fc.getFieldDescriptor(), getter.getThis())); - } - return Pair.of(jDataFieldInfo, fc.getFieldDescriptor()); - }).collect(Collectors.toUnmodifiableMap(Pair::getLeft, Pair::getRight)); + var fieldsMap = createFields(item, classCreator); for (var field : fieldsMap.values()) { - if (!field.getName().equals(KEY_NAME)) { + if (!SPECIAL_FIELDS.contains(field.getName())) { try (var setter = classCreator.getMethodCreator(propNameToSetterName(field.getName()), void.class, field.getType())) { setter.writeInstanceField(field, setter.getThis(), setter.getMethodParam(0)); setter.invokeVirtualMethod(MethodDescriptor.ofMethod(classCreator.getClassName(), ON_CHANGE_METHOD_NAME, void.class), setter.getThis()); @@ -171,15 +152,17 @@ class ObjectsAllocProcessor { } } - try (var constructor = classCreator.getConstructorCreator(item.klass.name().toString())) { + try (var constructor = classCreator.getConstructorCreator(item.klass.name().toString(), long.class.getName())) { constructor.invokeSpecialMethod(MethodDescriptor.ofConstructor(Object.class), constructor.getThis()); constructor.writeInstanceField(modified.getFieldDescriptor(), constructor.getThis(), constructor.load(false)); for (var field : fieldsMap.values()) { - constructor.writeInstanceField(field, constructor.getThis(), constructor.invokeInterfaceMethod( - MethodDescriptor.ofMethod(item.klass.name().toString(), propNameToGetterName(field.getName()), field.getType()), - constructor.getMethodParam(0) - )); + if (!Objects.equals(field.getName(), VERSION_NAME)) + constructor.writeInstanceField(field, constructor.getThis(), constructor.invokeInterfaceMethod( + MethodDescriptor.ofMethod(item.klass.name().toString(), propNameToGetterName(field.getName()), field.getType()), + constructor.getMethodParam(0) + )); } + constructor.writeInstanceField(fieldsMap.get(VERSION_NAME), constructor.getThis(), constructor.getMethodParam(1)); constructor.returnVoid(); } } @@ -199,20 +182,7 @@ class ObjectsAllocProcessor { .classOutput(gizmoAdapter) .build()) { - var fieldsMap = item.fields.values().stream().map(jDataFieldInfo -> { - var fc = classCreator.getFieldCreator(propNameToFieldName(jDataFieldInfo.name()), jDataFieldInfo.type().toString()); - - if (jDataFieldInfo.name().equals(KEY_NAME)) { - fc.setModifiers(PRIVATE | FINAL); - } else { - fc.setModifiers(PRIVATE); - } - - try (var getter = classCreator.getMethodCreator(propNameToGetterName(jDataFieldInfo.name()), jDataFieldInfo.type().toString())) { - getter.returnValue(getter.readInstanceField(fc.getFieldDescriptor(), getter.getThis())); - } - return Pair.of(jDataFieldInfo, fc.getFieldDescriptor()); - }).collect(Collectors.toUnmodifiableMap(Pair::getLeft, Pair::getRight)); + var fieldsMap = createFields(item, classCreator); for (var field : fieldsMap.values()) { try (var setter = classCreator.getMethodCreator(propNameToSetterName(field.getName()), void.class, field.getType())) { @@ -235,6 +205,23 @@ class ObjectsAllocProcessor { } + private Map createFields(JDataInfoBuildItem item, ClassCreator classCreator) { + return item.fields.values().stream().map(jDataFieldInfo -> { + var fc = classCreator.getFieldCreator(propNameToFieldName(jDataFieldInfo.name()), jDataFieldInfo.type().toString()); + + if (SPECIAL_FIELDS.contains(jDataFieldInfo.name())) { + fc.setModifiers(PRIVATE | FINAL); + } else { + fc.setModifiers(PRIVATE); + } + + try (var getter = classCreator.getMethodCreator(propNameToGetterName(jDataFieldInfo.name()), jDataFieldInfo.type().toString())) { + getter.returnValue(getter.readInstanceField(fc.getFieldDescriptor(), getter.getThis())); + } + return Pair.of(jDataFieldInfo, fc.getFieldDescriptor()); + }).collect(Collectors.toUnmodifiableMap(i -> i.getLeft().name(), Pair::getRight)); + } + List collectInterfaces(ClassInfo type, ApplicationIndexBuildItem jandex) { return Stream.concat(Stream.of(type), type.interfaceNames().stream() .flatMap(x -> { @@ -275,6 +262,7 @@ class ObjectsAllocProcessor { System.out.println("Missing key getter for " + item.jData); // FIXME!: No matter what, I couldn't get JData to get indexed by jandex fields.put(KEY_NAME, new JDataFieldInfo(KEY_NAME, DotName.createSimple(JObjectKey.class))); + fields.put(VERSION_NAME, new JDataFieldInfo(VERSION_NAME, DotName.createSimple(long.class))); } // Find pairs of getters and setters @@ -351,16 +339,25 @@ class ObjectsAllocProcessor { classCreator.addAnnotation(Singleton.class); + var versionProvider = classCreator.getFieldCreator("versionProvider", JDataAllocVersionProvider.class); + versionProvider.addAnnotation(Inject.class); + versionProvider.setModifiers(PUBLIC); + + Function loadVersion = (block) -> block.invokeInterfaceMethod( + MethodDescriptor.ofMethod(JDataAllocVersionProvider.class, "getVersion", long.class), + block.readInstanceField(versionProvider.getFieldDescriptor(), block.getThis()) + ); + try (MethodCreator methodCreator = classCreator.getMethodCreator("create", JData.class, Class.class, JObjectKey.class)) { matchClassTag(methodCreator, methodCreator.getMethodParam(0), classes, (type, branch, value) -> { - branch.returnValue(branch.newInstance(MethodDescriptor.ofConstructor(getDataClassName(type).toString(), JObjectKey.class), branch.getMethodParam(1))); + branch.returnValue(branch.newInstance(MethodDescriptor.ofConstructor(getDataClassName(type).toString(), JObjectKey.class, long.class), branch.getMethodParam(1), loadVersion.apply(branch))); }); methodCreator.throwException(IllegalArgumentException.class, "Unknown type"); } try (MethodCreator methodCreator = classCreator.getMethodCreator("copy", ChangeTrackingJData.class, JData.class)) { matchClass(methodCreator, methodCreator.getMethodParam(0), classes, (type, branch, value) -> { - branch.returnValue(branch.newInstance(MethodDescriptor.ofConstructor(getCTClassName(type).toString(), type.name().toString()), value)); + branch.returnValue(branch.newInstance(MethodDescriptor.ofConstructor(getCTClassName(type).toString(), type.name().toString(), long.class.getName()), value, loadVersion.apply(branch))); }); methodCreator.throwException(IllegalArgumentException.class, "Unknown type"); } diff --git a/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/DummyVersionProvider.java b/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/DummyVersionProvider.java new file mode 100644 index 00000000..5db49c0c --- /dev/null +++ b/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/DummyVersionProvider.java @@ -0,0 +1,20 @@ +package com.usatiuk.objects.alloc.it; + +import com.usatiuk.objects.common.runtime.JDataAllocVersionProvider; +import jakarta.enterprise.context.ApplicationScoped; + +@ApplicationScoped +public class DummyVersionProvider implements JDataAllocVersionProvider { + + long version = 0; + + @Override + public long getVersion() { + return version; + } + + public void setVersion(long version) { + this.version = version; + } + +} diff --git a/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectAllocIT.java b/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectAllocIT.java index 51ea070e..7c70671f 100644 --- a/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectAllocIT.java +++ b/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectAllocIT.java @@ -12,6 +12,40 @@ public class ObjectAllocIT { @Inject ObjectAllocator objectAllocator; + @Inject + DummyVersionProvider dummyVersionProvider; + + @Test + void testCreateVersion() { + dummyVersionProvider.setVersion(1); + var newObject = objectAllocator.create(TestJDataEmpty.class, new JObjectKey("TestJDataEmptyKey")); + Assertions.assertNotNull(newObject); + Assertions.assertEquals("TestJDataEmptyKey", newObject.getKey().name()); + Assertions.assertEquals(1, newObject.getVersion()); + } + + @Test + void testCopyVersion() { + dummyVersionProvider.setVersion(1); + var newObject = objectAllocator.create(TestJDataAssorted.class, new JObjectKey("TestJDataAssorted")); + newObject.setLastName("1"); + Assertions.assertNotNull(newObject); + Assertions.assertEquals("TestJDataAssorted", newObject.getKey().name()); + Assertions.assertEquals(1, newObject.getVersion()); + + dummyVersionProvider.setVersion(2); + var copyObject = objectAllocator.copy(newObject); + Assertions.assertNotNull(copyObject); + Assertions.assertFalse(copyObject.isModified()); + Assertions.assertEquals("1", copyObject.wrapped().getLastName()); + Assertions.assertEquals(2, copyObject.wrapped().getVersion()); + Assertions.assertEquals(1, newObject.getVersion()); + copyObject.wrapped().setLastName("2"); + Assertions.assertTrue(copyObject.isModified()); + Assertions.assertEquals("2", copyObject.wrapped().getLastName()); + Assertions.assertEquals("1", newObject.getLastName()); + } + @Test void testCreateObject() { var newObject = objectAllocator.create(TestJDataEmpty.class, new JObjectKey("TestJDataEmptyKey")); diff --git a/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JData.java b/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JData.java index 5d97c2dd..f1ca4baf 100644 --- a/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JData.java +++ b/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JData.java @@ -2,8 +2,11 @@ package com.usatiuk.objects.common.runtime; // TODO: This could be maybe moved to a separate module? // The base class for JObject data -// Only one instance of this exists per key, the instance in the manager is canonical +// Only one instance of this "exists" per key, the instance in the manager is canonical // When committing a transaction, the instance is checked against it, if it isn't the same, a race occurred. +// It is immutable, its version is filled in by the allocator from the AllocVersionProvider public interface JData { JObjectKey getKey(); + + long getVersion(); } diff --git a/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JDataAllocVersionProvider.java b/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JDataAllocVersionProvider.java new file mode 100644 index 00000000..27c2b110 --- /dev/null +++ b/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JDataAllocVersionProvider.java @@ -0,0 +1,5 @@ +package com.usatiuk.objects.common.runtime; + +public interface JDataAllocVersionProvider { + long getVersion(); +} From aa69ae13a4129fa7ed3938b65751b25391253c16 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Wed, 4 Dec 2024 23:25:54 +0100 Subject: [PATCH 013/105] object alloc version persistent (but not yet loaded to transaction) --- .../dhfs/objects/CurrentTransaction.java | 5 +++++ .../usatiuk/dhfs/objects/JObjectManager.java | 21 +++++++++---------- .../dhfs/objects/transaction/Transaction.java | 2 ++ ...TransactionObjectAllocVersionProvider.java | 16 ++++++++++++++ .../transaction/TransactionPrivate.java | 1 - .../com/usatiuk/dhfs/objects/ObjectsTest.java | 6 +++--- 6 files changed, 36 insertions(+), 15 deletions(-) create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectAllocVersionProvider.java diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java index bd8dab04..b986b4e4 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java @@ -14,6 +14,11 @@ public class CurrentTransaction implements Transaction { @Inject TransactionManager transactionManager; + @Override + public long getId() { + return transactionManager.current().getId(); + } + @Override public Optional getObject(Class type, JObjectKey key, LockingStrategy strategy) { return transactionManager.current().getObject(type, key, strategy); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 43a172fe..687a15fa 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -43,7 +43,6 @@ public class JObjectManager { private static final Cleaner CLEANER = Cleaner.create(); final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); - long lastWriteTx = -1; // FIXME: This should be persistent public JDataWrapper(T referent) { super(referent); @@ -58,7 +57,6 @@ public class JObjectManager { return "JDataWrapper{" + "ref=" + get() + ", lock=" + lock + - ", lastWriteTx=" + lastWriteTx + '}'; } } @@ -155,7 +153,7 @@ public class JObjectManager { public Optional> getWriteLocked(Class type, JObjectKey key) { var got = JObjectManager.this.getLocked(type, key, true); if (got == null) return Optional.empty(); - if (got.wrapper().lastWriteTx >= _txId) { + if (got.obj.getVersion() >= _txId) { got.wrapper().lock.writeLock().unlock(); throw new IllegalStateException("Serialization race"); } @@ -170,7 +168,9 @@ public class JObjectManager { } public void commit(TransactionPrivate tx) { + // This also holds the weak references var toUnlock = new LinkedList(); + var toFlush = new LinkedList>(); var toPut = new LinkedList>(); var toLock = new ArrayList>(); @@ -223,14 +223,13 @@ public class JObjectManager { for (var dep : dependencies) { Log.trace("Checking dependency " + dep.toString()); - var current = _objects.get(dep.data().getKey()); + var current = _objects.get(dep.data().getKey()).get(); - if (current.get() != dep.data()) { - throw new IllegalStateException("Object changed during transaction: " + current.get() + " vs " + dep.data()); - } + // Checked above + assert current == dep.data(); - if (current.lastWriteTx >= tx.getId()) { - throw new IllegalStateException("Serialization hazard: " + current.lastWriteTx + " vs " + tx.getId()); + if (current.getVersion() >= tx.getId()) { + throw new IllegalStateException("Serialization hazard: " + current.getVersion() + " vs " + tx.getId()); } } @@ -252,7 +251,7 @@ public class JObjectManager { var newWrapper = new JDataWrapper<>(record.copy().wrapped()); newWrapper.lock.writeLock().lock(); if (!_objects.replace(record.copy().wrapped().getKey(), current, newWrapper)) { - assert false; + assert false; // Should not happen, as the object is locked throw new IllegalStateException("Object changed during transaction after locking: " + current.get() + " vs " + record.copy().wrapped()); } toUnlock.add(newWrapper.lock.writeLock()::unlock); @@ -266,10 +265,10 @@ public class JObjectManager { // Really flushing to storage written.forEach(obj -> { Log.trace("Flushing object " + obj.getKey()); + assert obj.getVersion() == tx.getId(); var key = obj.getKey(); var data = objectSerializer.serialize(obj); objectStorage.writeObject(key, data); - _objects.get(key).lastWriteTx = tx.getId(); // FIXME: }); Log.tracef("Committing transaction %d to storage", tx.getId()); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java index 544c3a5c..6e7f2228 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java @@ -7,6 +7,8 @@ import java.util.Optional; // The transaction interface actually used by user code to retrieve objects public interface Transaction { + long getId(); + Optional getObject(Class type, JObjectKey key, LockingStrategy strategy); void putObject(JData obj); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectAllocVersionProvider.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectAllocVersionProvider.java new file mode 100644 index 00000000..4bb6118e --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectAllocVersionProvider.java @@ -0,0 +1,16 @@ +package com.usatiuk.dhfs.objects.transaction; + +import com.usatiuk.objects.common.runtime.JDataAllocVersionProvider; +import jakarta.inject.Inject; +import jakarta.inject.Singleton; + +@Singleton +public class TransactionObjectAllocVersionProvider implements JDataAllocVersionProvider { + @Inject + Transaction transaction; + + public long getVersion() { + return transaction.getId(); + } + +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java index 2d3300bc..d2f12014 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java @@ -4,6 +4,5 @@ import java.util.Collection; // The transaction interface actually used by user code to retrieve objects public interface TransactionPrivate extends Transaction{ - long getId(); Collection> drain(); } diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index 89e2d8d7..ccacd341 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -50,19 +50,19 @@ public class ObjectsTest { void createCreateObject() { { txm.begin(); - var newParent = alloc.create(Parent.class, new JObjectKey("Parent")); + var newParent = alloc.create(Parent.class, new JObjectKey("Parent7")); newParent.setLastName("John"); curTx.putObject(newParent); txm.commit(); } Assertions.assertThrows(Exception.class, () -> txm.run(() -> { - var newParent = alloc.create(Parent.class, new JObjectKey("Parent")); + var newParent = alloc.create(Parent.class, new JObjectKey("Parent7")); newParent.setLastName("John2"); curTx.putObject(newParent); })); { txm.begin(); - var parent = curTx.getObject(Parent.class, new JObjectKey("Parent")).orElse(null); + var parent = curTx.getObject(Parent.class, new JObjectKey("Parent7")).orElse(null); Assertions.assertEquals("John", parent.getLastName()); txm.commit(); } From b92877025f1921bd92b283431505388561548f63 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sat, 7 Dec 2024 16:58:23 +0100 Subject: [PATCH 014/105] slightly nicer tx dependency tracking --- .../usatiuk/dhfs/objects/JObjectManager.java | 53 ++++++++---- .../dhfs/objects/TransactionManagerImpl.java | 2 +- .../transaction/ReadTrackingObjectSource.java | 83 +++++++++++++++++++ .../transaction/TransactionFactoryImpl.java | 11 ++- .../transaction/TransactionPrivate.java | 9 +- .../dhfs/objects/transaction/TxRecord.java | 7 ++ 6 files changed, 141 insertions(+), 24 deletions(-) create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSource.java diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 687a15fa..e7466e59 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -24,6 +24,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; // Manages all access to com.usatiuk.objects.common.runtime.JData objects. // In particular, it serves as a source of truth for what is committed to the backing storage. // All data goes through it, it is responsible for transaction atomicity +// TODO: persistent tx id @ApplicationScoped public class JObjectManager { @Inject @@ -173,28 +174,26 @@ public class JObjectManager { var toFlush = new LinkedList>(); var toPut = new LinkedList>(); - var toLock = new ArrayList>(); + var toLock = new ArrayList>(); var dependencies = new LinkedList>(); Log.trace("Committing transaction " + tx.getId()); + // For existing objects: + // Check that their version is not higher than the version of transaction being committed + // TODO: check deletions, inserts + try { - for (var entry : tx.drain()) { - Log.trace("Processing entry " + entry.toString()); + for (var entry : tx.writes()) { + Log.trace("Processing write " + entry.toString()); switch (entry) { case TxRecord.TxObjectRecordCopyLock copy -> { toUnlock.add(copy.original().lock().writeLock()::unlock); - dependencies.add(copy.original()); - if (copy.copy().isModified()) { - toFlush.add(copy); - } + toFlush.add(copy); } case TxRecord.TxObjectRecordOptimistic copy -> { - toLock.add(copy); - dependencies.add(copy.original()); - if (copy.copy().isModified()) { - toFlush.add(copy); - } + toLock.add(copy.original()); + toFlush.add(copy); } case TxRecord.TxObjectRecordNew created -> { toPut.add(created); @@ -203,21 +202,35 @@ public class JObjectManager { } } - toLock.sort(Comparator.comparingInt(a -> System.identityHashCode(a.original()))); + for (var entry : tx.reads().entrySet()) { + Log.trace("Processing read " + entry.toString()); + switch (entry.getValue()) { + case ReadTrackingObjectSource.TxReadObjectNone none -> { + // TODO: Check this + } + case ReadTrackingObjectSource.TxReadObjectSome(var obj) -> { + toLock.add(obj); + dependencies.add(obj); + } + default -> throw new IllegalStateException("Unexpected value: " + entry); + } + } + + toLock.sort(Comparator.comparingInt(System::identityHashCode)); for (var record : toLock) { Log.trace("Locking " + record.toString()); - var got = getLocked(record.original().data().getClass(), record.original().data().getKey(), true); + var got = getLocked(record.data().getClass(), record.data().getKey(), true); if (got == null) { - throw new IllegalStateException("Object " + record.original().data().getKey() + " not found"); + throw new IllegalStateException("Object " + record.data().getKey() + " not found"); } toUnlock.add(got.wrapper().lock.writeLock()::unlock); - if (got.obj() != record.original().data()) { - throw new IllegalStateException("Object changed during transaction: " + got.obj() + " vs " + record.original().data()); + if (got.obj() != record.data()) { + throw new IllegalStateException("Object changed during transaction: " + got.obj() + " vs " + record.data()); } } @@ -244,9 +257,13 @@ public class JObjectManager { } for (var record : toFlush) { + if (!record.copy().isModified()) { + Log.trace("Not changed " + record.toString()); + continue; + } + Log.trace("Flushing changed " + record.toString()); var current = _objects.get(record.original().data().getKey()); - assert record.copy().isModified(); var newWrapper = new JDataWrapper<>(record.copy().wrapped()); newWrapper.lock.writeLock().lock(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java index f872e932..f08159a4 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java @@ -45,7 +45,7 @@ public class TransactionManagerImpl implements TransactionManager { @Override public void rollback() { var tx = _currentTransaction.get(); - for (var o : tx.drain()) { + for (var o : tx.writes()) { switch (o) { case TxRecord.TxObjectRecordCopyLock r -> r.original().lock().writeLock().unlock(); default -> { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSource.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSource.java new file mode 100644 index 00000000..a8337b95 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSource.java @@ -0,0 +1,83 @@ +package com.usatiuk.dhfs.objects.transaction; + +import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +public class ReadTrackingObjectSource implements TransactionObjectSource { + private final TransactionObjectSource _delegate; + + public interface TxReadObject {} + + public record TxReadObjectNone() implements TxReadObject {} + + public record TxReadObjectSome(TransactionObject obj) implements TxReadObject {} + + private final Map> _readSet = new HashMap<>(); + + public ReadTrackingObjectSource(TransactionObjectSource delegate) { + _delegate = delegate; + } + + public Map> getRead() { + return Collections.unmodifiableMap(_readSet); + } + + @Override + public Optional> get(Class type, JObjectKey key) { + var got = _readSet.get(key); + + if (got == null) { + var read = _delegate.get(type, key); + if (read.isPresent()) { + _readSet.put(key, new TxReadObjectSome<>(read.get())); + } else { + _readSet.put(key, new TxReadObjectNone<>()); + } + return read; + } + + return switch (got) { + case TxReadObjectNone none -> Optional.empty(); + case TxReadObjectSome some -> { + if (type.isInstance(some.obj().data())) { + yield Optional.of((TransactionObject) some.obj()); + } else { + yield Optional.empty(); + } + } + default -> throw new IllegalStateException("Unexpected value: " + got); + }; + } + + @Override + public Optional> getWriteLocked(Class type, JObjectKey key) { + var got = _readSet.get(key); + + if (got == null) { + var read = _delegate.getWriteLocked(type, key); + if (read.isPresent()) { + _readSet.put(key, new TxReadObjectSome<>(read.get())); + } else { + _readSet.put(key, new TxReadObjectNone<>()); + } + return read; + } + + return switch (got) { + case TxReadObjectNone none -> Optional.empty(); + case TxReadObjectSome some -> { + if (type.isInstance(some.obj().data())) { + yield Optional.of((TransactionObject) some.obj()); + } else { + yield Optional.empty(); + } + } + default -> throw new IllegalStateException("Unexpected value: " + got); + }; + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index b10ff167..3b35db50 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -18,13 +18,13 @@ public class TransactionFactoryImpl implements TransactionFactory { private class TransactionImpl implements TransactionPrivate { @Getter(AccessLevel.PUBLIC) private final long _id; - private final TransactionObjectSource _source; + private final ReadTrackingObjectSource _source; private final Map> _objects = new HashMap<>(); private TransactionImpl(long id, TransactionObjectSource source) { _id = id; - _source = source; + _source = new ReadTrackingObjectSource(source); } @Override @@ -75,9 +75,14 @@ public class TransactionFactoryImpl implements TransactionFactory { } @Override - public Collection> drain() { + public Collection> writes() { return Collections.unmodifiableCollection(_objects.values()); } + + @Override + public Map> reads() { + return _source.getRead(); + } } @Override diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java index d2f12014..18a5f488 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java @@ -1,8 +1,13 @@ package com.usatiuk.dhfs.objects.transaction; +import com.usatiuk.objects.common.runtime.JObjectKey; + import java.util.Collection; +import java.util.Map; // The transaction interface actually used by user code to retrieve objects -public interface TransactionPrivate extends Transaction{ - Collection> drain(); +public interface TransactionPrivate extends Transaction { + Collection> writes(); + + Map> reads(); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java index dc0b590d..818ab340 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java @@ -9,6 +9,13 @@ public class TxRecord { T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy); } + public record TxObjectRecordMissing(JObjectKey key) implements TxObjectRecord { + @Override + public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { + return null; + } + } + public interface TxObjectRecordWrite extends TxObjectRecord { TransactionObject original(); From e213e7a8f6660fd38acd313503493299c1dabe29 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sat, 7 Dec 2024 17:24:23 +0100 Subject: [PATCH 015/105] crapfs import --- dhfs-parent/crapfs/pom.xml | 208 ++++++++ dhfs-parent/crapfs/src/lombok.config | 1 + .../crapfs/src/main/docker/Dockerfile.jvm | 97 ++++ .../src/main/docker/Dockerfile.legacy-jar | 93 ++++ .../crapfs/src/main/docker/Dockerfile.native | 27 ++ .../src/main/docker/Dockerfile.native-micro | 30 ++ .../crapfs/src/main/java/org/acme/Main.java | 21 + .../org/acme/files/objects/ChunkData.java | 9 + .../java/org/acme/files/objects/File.java | 16 + .../acme/files/service/DhfsFileService.java | 443 ++++++++++++++++++ .../src/main/java/org/acme/fuse/DhfsFuse.java | 260 ++++++++++ .../src/main/resources/application.properties | 22 + .../crapfs/src/main/resources/import.sql | 6 + dhfs-parent/pom.xml | 1 + 14 files changed, 1234 insertions(+) create mode 100644 dhfs-parent/crapfs/pom.xml create mode 100644 dhfs-parent/crapfs/src/lombok.config create mode 100644 dhfs-parent/crapfs/src/main/docker/Dockerfile.jvm create mode 100644 dhfs-parent/crapfs/src/main/docker/Dockerfile.legacy-jar create mode 100644 dhfs-parent/crapfs/src/main/docker/Dockerfile.native create mode 100644 dhfs-parent/crapfs/src/main/docker/Dockerfile.native-micro create mode 100644 dhfs-parent/crapfs/src/main/java/org/acme/Main.java create mode 100644 dhfs-parent/crapfs/src/main/java/org/acme/files/objects/ChunkData.java create mode 100644 dhfs-parent/crapfs/src/main/java/org/acme/files/objects/File.java create mode 100644 dhfs-parent/crapfs/src/main/java/org/acme/files/service/DhfsFileService.java create mode 100644 dhfs-parent/crapfs/src/main/java/org/acme/fuse/DhfsFuse.java create mode 100644 dhfs-parent/crapfs/src/main/resources/application.properties create mode 100644 dhfs-parent/crapfs/src/main/resources/import.sql diff --git a/dhfs-parent/crapfs/pom.xml b/dhfs-parent/crapfs/pom.xml new file mode 100644 index 00000000..d24db205 --- /dev/null +++ b/dhfs-parent/crapfs/pom.xml @@ -0,0 +1,208 @@ + + + 4.0.0 + crapfs + 1.0.0-SNAPSHOT + + + com.usatiuk.dhfs + parent + 1.0-SNAPSHOT + + + + + org.testcontainers + testcontainers + test + + + org.awaitility + awaitility + test + + + com.usatiuk + autoprotomap + 1.0-SNAPSHOT + + + com.usatiuk + autoprotomap-deployment + 1.0-SNAPSHOT + provided + + + org.bouncycastle + bcprov-jdk18on + 1.78.1 + + + org.bouncycastle + bcpkix-jdk18on + 1.78.1 + + + io.quarkus + quarkus-security + + + net.openhft + zero-allocation-hashing + + + io.quarkus + quarkus-grpc + + + io.quarkus + quarkus-arc + + + io.quarkus + quarkus-rest + + + io.quarkus + quarkus-rest-client + + + io.quarkus + quarkus-rest-client-jsonb + + + io.quarkus + quarkus-rest-jsonb + + + io.quarkus + quarkus-scheduler + + + io.quarkus + quarkus-junit5 + test + + + org.projectlombok + lombok + provided + + + com.github.SerCeMan + jnr-fuse + 44ed40f8ce + + + com.github.jnr + jnr-ffi + 2.2.16 + + + com.github.jnr + jnr-posix + 3.1.19 + + + com.github.jnr + jnr-constants + 0.10.4 + + + org.apache.commons + commons-lang3 + + + commons-io + commons-io + + + org.jboss.slf4j + slf4j-jboss-logmanager + test + + + commons-codec + commons-codec + + + org.apache.commons + commons-collections4 + + + org.apache.commons + commons-math3 + 3.6.1 + + + com.usatiuk + kleppmanntree + 1.0-SNAPSHOT + + + com.usatiuk.dhfs + supportlib + 1.0-SNAPSHOT + + + com.usatiuk.dhfs + objects + 1.0-SNAPSHOT + + + com.usatiuk.dhfs + utils + 1.0-SNAPSHOT + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + 1C + false + classes + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + + true + + + concurrent + + + 0.5 + + true + true + + + + + ${quarkus.platform.group-id} + quarkus-maven-plugin + ${quarkus.platform.version} + true + + + quarkus-plugin + + build + generate-code + generate-code-tests + + + + + + + diff --git a/dhfs-parent/crapfs/src/lombok.config b/dhfs-parent/crapfs/src/lombok.config new file mode 100644 index 00000000..f1c474ce --- /dev/null +++ b/dhfs-parent/crapfs/src/lombok.config @@ -0,0 +1 @@ +lombok.accessors.prefix += _ diff --git a/dhfs-parent/crapfs/src/main/docker/Dockerfile.jvm b/dhfs-parent/crapfs/src/main/docker/Dockerfile.jvm new file mode 100644 index 00000000..2c7432c7 --- /dev/null +++ b/dhfs-parent/crapfs/src/main/docker/Dockerfile.jvm @@ -0,0 +1,97 @@ +#### +# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode +# +# Before building the container image run: +# +# ./mvnw package +# +# Then, build the image with: +# +# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/crapfs-jvm . +# +# Then run the container using: +# +# docker run -i --rm -p 8080:8080 quarkus/crapfs-jvm +# +# If you want to include the debug port into your docker image +# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005. +# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005 +# when running the container +# +# Then run the container using : +# +# docker run -i --rm -p 8080:8080 quarkus/crapfs-jvm +# +# This image uses the `run-java.sh` script to run the application. +# This scripts computes the command line to execute your Java application, and +# includes memory/GC tuning. +# You can configure the behavior using the following environment properties: +# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") +# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options +# in JAVA_OPTS (example: "-Dsome.property=foo") +# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is +# used to calculate a default maximal heap memory based on a containers restriction. +# If used in a container without any memory constraints for the container then this +# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio +# of the container available memory as set here. The default is `50` which means 50% +# of the available memory is used as an upper boundary. You can skip this mechanism by +# setting this value to `0` in which case no `-Xmx` option is added. +# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This +# is used to calculate a default initial heap memory based on the maximum heap memory. +# If used in a container without any memory constraints for the container then this +# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio +# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` +# is used as the initial heap size. You can skip this mechanism by setting this value +# to `0` in which case no `-Xms` option is added (example: "25") +# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. +# This is used to calculate the maximum value of the initial heap memory. If used in +# a container without any memory constraints for the container then this option has +# no effect. If there is a memory constraint then `-Xms` is limited to the value set +# here. The default is 4096MB which means the calculated value of `-Xms` never will +# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") +# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output +# when things are happening. This option, if set to true, will set +# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). +# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: +# true"). +# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). +# - CONTAINER_CORE_LIMIT: A calculated core limit as described in +# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") +# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). +# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. +# (example: "20") +# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. +# (example: "40") +# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. +# (example: "4") +# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus +# previous GC times. (example: "90") +# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") +# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") +# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should +# contain the necessary JRE command-line options to specify the required GC, which +# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). +# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") +# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") +# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be +# accessed directly. (example: "foo.example.com,bar.example.com") +# +### +FROM registry.access.redhat.com/ubi8/openjdk-21:1.20 + +ENV LANGUAGE='en_US:en' + + +# We make four distinct layers so if there are application changes the library layers can be re-used +COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/ +COPY --chown=185 target/quarkus-app/*.jar /deployments/ +COPY --chown=185 target/quarkus-app/app/ /deployments/app/ +COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/ + +EXPOSE 8080 +USER 185 +ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" +ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" + +ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ] + diff --git a/dhfs-parent/crapfs/src/main/docker/Dockerfile.legacy-jar b/dhfs-parent/crapfs/src/main/docker/Dockerfile.legacy-jar new file mode 100644 index 00000000..fcc2aef7 --- /dev/null +++ b/dhfs-parent/crapfs/src/main/docker/Dockerfile.legacy-jar @@ -0,0 +1,93 @@ +#### +# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode +# +# Before building the container image run: +# +# ./mvnw package -Dquarkus.package.jar.type=legacy-jar +# +# Then, build the image with: +# +# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/crapfs-legacy-jar . +# +# Then run the container using: +# +# docker run -i --rm -p 8080:8080 quarkus/crapfs-legacy-jar +# +# If you want to include the debug port into your docker image +# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005. +# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005 +# when running the container +# +# Then run the container using : +# +# docker run -i --rm -p 8080:8080 quarkus/crapfs-legacy-jar +# +# This image uses the `run-java.sh` script to run the application. +# This scripts computes the command line to execute your Java application, and +# includes memory/GC tuning. +# You can configure the behavior using the following environment properties: +# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") +# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options +# in JAVA_OPTS (example: "-Dsome.property=foo") +# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is +# used to calculate a default maximal heap memory based on a containers restriction. +# If used in a container without any memory constraints for the container then this +# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio +# of the container available memory as set here. The default is `50` which means 50% +# of the available memory is used as an upper boundary. You can skip this mechanism by +# setting this value to `0` in which case no `-Xmx` option is added. +# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This +# is used to calculate a default initial heap memory based on the maximum heap memory. +# If used in a container without any memory constraints for the container then this +# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio +# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` +# is used as the initial heap size. You can skip this mechanism by setting this value +# to `0` in which case no `-Xms` option is added (example: "25") +# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. +# This is used to calculate the maximum value of the initial heap memory. If used in +# a container without any memory constraints for the container then this option has +# no effect. If there is a memory constraint then `-Xms` is limited to the value set +# here. The default is 4096MB which means the calculated value of `-Xms` never will +# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") +# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output +# when things are happening. This option, if set to true, will set +# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). +# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: +# true"). +# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). +# - CONTAINER_CORE_LIMIT: A calculated core limit as described in +# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") +# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). +# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. +# (example: "20") +# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. +# (example: "40") +# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. +# (example: "4") +# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus +# previous GC times. (example: "90") +# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") +# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") +# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should +# contain the necessary JRE command-line options to specify the required GC, which +# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). +# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") +# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") +# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be +# accessed directly. (example: "foo.example.com,bar.example.com") +# +### +FROM registry.access.redhat.com/ubi8/openjdk-21:1.20 + +ENV LANGUAGE='en_US:en' + + +COPY target/lib/* /deployments/lib/ +COPY target/*-runner.jar /deployments/quarkus-run.jar + +EXPOSE 8080 +USER 185 +ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" +ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" + +ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ] diff --git a/dhfs-parent/crapfs/src/main/docker/Dockerfile.native b/dhfs-parent/crapfs/src/main/docker/Dockerfile.native new file mode 100644 index 00000000..4e549443 --- /dev/null +++ b/dhfs-parent/crapfs/src/main/docker/Dockerfile.native @@ -0,0 +1,27 @@ +#### +# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. +# +# Before building the container image run: +# +# ./mvnw package -Dnative +# +# Then, build the image with: +# +# docker build -f src/main/docker/Dockerfile.native -t quarkus/crapfs . +# +# Then run the container using: +# +# docker run -i --rm -p 8080:8080 quarkus/crapfs +# +### +FROM registry.access.redhat.com/ubi8/ubi-minimal:8.10 +WORKDIR /work/ +RUN chown 1001 /work \ + && chmod "g+rwX" /work \ + && chown 1001:root /work +COPY --chown=1001:root target/*-runner /work/application + +EXPOSE 8080 +USER 1001 + +ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/dhfs-parent/crapfs/src/main/docker/Dockerfile.native-micro b/dhfs-parent/crapfs/src/main/docker/Dockerfile.native-micro new file mode 100644 index 00000000..d3b97ce7 --- /dev/null +++ b/dhfs-parent/crapfs/src/main/docker/Dockerfile.native-micro @@ -0,0 +1,30 @@ +#### +# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. +# It uses a micro base image, tuned for Quarkus native executables. +# It reduces the size of the resulting container image. +# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image. +# +# Before building the container image run: +# +# ./mvnw package -Dnative +# +# Then, build the image with: +# +# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/crapfs . +# +# Then run the container using: +# +# docker run -i --rm -p 8080:8080 quarkus/crapfs +# +### +FROM quay.io/quarkus/quarkus-micro-image:2.0 +WORKDIR /work/ +RUN chown 1001 /work \ + && chmod "g+rwX" /work \ + && chown 1001:root /work +COPY --chown=1001:root target/*-runner /work/application + +EXPOSE 8080 +USER 1001 + +ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/dhfs-parent/crapfs/src/main/java/org/acme/Main.java b/dhfs-parent/crapfs/src/main/java/org/acme/Main.java new file mode 100644 index 00000000..9afa3a9a --- /dev/null +++ b/dhfs-parent/crapfs/src/main/java/org/acme/Main.java @@ -0,0 +1,21 @@ +package org.acme; + +import io.quarkus.runtime.Quarkus; +import io.quarkus.runtime.QuarkusApplication; +import io.quarkus.runtime.annotations.QuarkusMain; + +@QuarkusMain +public class Main { + public static void main(String... args) { + Quarkus.run(CrapfsServerApp.class, args); + } + + public static class CrapfsServerApp implements QuarkusApplication { + + @Override + public int run(String... args) throws Exception { + Quarkus.waitForExit(); + return 0; + } + } +} \ No newline at end of file diff --git a/dhfs-parent/crapfs/src/main/java/org/acme/files/objects/ChunkData.java b/dhfs-parent/crapfs/src/main/java/org/acme/files/objects/ChunkData.java new file mode 100644 index 00000000..4803cad5 --- /dev/null +++ b/dhfs-parent/crapfs/src/main/java/org/acme/files/objects/ChunkData.java @@ -0,0 +1,9 @@ +package org.acme.files.objects; + +import com.usatiuk.objects.common.runtime.JData; + +public interface ChunkData extends JData { + byte[] getData(); + + void setData(byte[] data); +} diff --git a/dhfs-parent/crapfs/src/main/java/org/acme/files/objects/File.java b/dhfs-parent/crapfs/src/main/java/org/acme/files/objects/File.java new file mode 100644 index 00000000..cc3ebeef --- /dev/null +++ b/dhfs-parent/crapfs/src/main/java/org/acme/files/objects/File.java @@ -0,0 +1,16 @@ +package org.acme.files.objects; + +import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; + +import java.util.NavigableMap; + +public interface File extends JData { + NavigableMap getChunks(); + + void setChunks(NavigableMap chunk); + + long getSize(); + + void setSize(long size); +} diff --git a/dhfs-parent/crapfs/src/main/java/org/acme/files/service/DhfsFileService.java b/dhfs-parent/crapfs/src/main/java/org/acme/files/service/DhfsFileService.java new file mode 100644 index 00000000..9b7744a1 --- /dev/null +++ b/dhfs-parent/crapfs/src/main/java/org/acme/files/service/DhfsFileService.java @@ -0,0 +1,443 @@ +package org.acme.files.service; + +import com.google.protobuf.ByteString; +import com.google.protobuf.UnsafeByteOperations; +import com.usatiuk.dhfs.objects.TransactionManager; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import com.usatiuk.objects.alloc.runtime.ObjectAllocator; +import com.usatiuk.objects.common.runtime.JObjectKey; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.quarkus.logging.Log; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import org.acme.files.objects.ChunkData; +import org.acme.files.objects.File; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.util.*; + +@ApplicationScoped +public class DhfsFileService { + + @ConfigProperty(name = "dhfs.files.target_chunk_size") + int targetChunkSize; + + @ConfigProperty(name = "dhfs.files.write_merge_threshold") + float writeMergeThreshold; + + @ConfigProperty(name = "dhfs.files.write_merge_max_chunk_to_take") + float writeMergeMaxChunkToTake; + + @ConfigProperty(name = "dhfs.files.write_merge_limit") + float writeMergeLimit; + + @ConfigProperty(name = "dhfs.files.write_last_chunk_limit") + float writeLastChunkLimit; + + @ConfigProperty(name = "dhfs.objects.write_log") + boolean writeLogging; + + @Inject + Transaction curTx; + + @Inject + TransactionManager txm; + + @Inject + ObjectAllocator alloc; + + long chunkCounter = 0; + + void init(@Observes @Priority(500) StartupEvent event) { + Log.info("Initializing file service"); + } + + public Optional open(String path) { + return txm.run(() -> { + if (curTx.getObject(File.class, new JObjectKey(path)).orElse(null) != null) { + return Optional.of(path); + } + return Optional.empty(); + }); + } + + public Optional create(String path) { + if (path.contains("/")) { + throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Path should not contain slashes")); + } + + return txm.run(() -> { + var file = alloc.create(File.class, new JObjectKey(path)); + curTx.putObject(file); + return Optional.of(path); + }); + } + + private JObjectKey createChunk(ByteString bytes) { + var cd = alloc.create(ChunkData.class, new JObjectKey("chunk-" + chunkCounter++)); + cd.setData(bytes.toByteArray()); + curTx.putObject(cd); + return cd.getKey(); + } + + private ByteString readChunk(JObjectKey uuid) { + var chunk = curTx.getObject(ChunkData.class, uuid); + if (chunk.isEmpty()) { + Log.error("Chunk not found when trying to read: " + uuid); + throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("Chunk not found")); + } + return UnsafeByteOperations.unsafeWrap(chunk.get().getData()); + } + + private static final List fileNames = List.of("file1", "file2"); + + public List readdir(String path) { + if (!path.equals("")) { + throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Only root directory is supported")); + } + + return txm.run(() -> { + var ret = new ArrayList(); + for (String fileName : fileNames) { + var got = curTx.getObject(File.class, new JObjectKey(fileName)); + if (got.isPresent()) { + ret.add(fileName); + } + } + return ret; + }); + } + + public Optional read(String fileUuid, long offset, int length) { + if (length < 0) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length)); + if (offset < 0) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset)); + + return txm.run(() -> { + var file = curTx.getObject(File.class, new JObjectKey(fileUuid)).orElse(null); + if (file == null) { + Log.error("File not found when trying to read: " + fileUuid); + return Optional.empty(); + } + + try { + var chunksAll = new TreeMap<>(file.getChunks()); + if (chunksAll.isEmpty()) { + return Optional.of(ByteString.empty()); + } + var chunksList = chunksAll.tailMap(chunksAll.floorKey(offset)).entrySet(); + + if (chunksList.isEmpty()) { + return Optional.of(ByteString.empty()); + } + + var chunks = chunksList.iterator(); + ByteString buf = ByteString.empty(); + + long curPos = offset; + var chunk = chunks.next(); + + while (curPos < offset + length) { + var chunkPos = chunk.getKey(); + + long offInChunk = curPos - chunkPos; + + long toReadInChunk = (offset + length) - curPos; + + var chunkBytes = readChunk(chunk.getValue()); + + long readableLen = chunkBytes.size() - offInChunk; + + var toReadReally = Math.min(readableLen, toReadInChunk); + + if (toReadReally < 0) break; + + buf = buf.concat(chunkBytes.substring((int) offInChunk, (int) (offInChunk + toReadReally))); + + curPos += toReadReally; + + if (readableLen > toReadInChunk) + break; + + if (!chunks.hasNext()) break; + + chunk = chunks.next(); + } + + // FIXME: + return Optional.of(buf); + } catch (Exception e) { + Log.error("Error reading file: " + fileUuid, e); + return Optional.empty(); + } + }); + } + + + public Long write(String fileUuid, long offset, ByteString data) { + if (offset < 0) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset)); + + return txm.run(() -> { + // FIXME: + var file = curTx.getObject(File.class, new JObjectKey(fileUuid)).orElse(null); + if (file == null) { + Log.error("File not found when trying to read: " + fileUuid); + return -1L; + } + + if (size(fileUuid) < offset) + truncate(fileUuid, offset); + + // Get chunk ids from the database + var chunksAll = file.getChunks(); + var first = chunksAll.floorEntry(offset); + var last = chunksAll.lowerEntry(offset + data.size()); + NavigableMap removedChunks = new TreeMap<>(); + + long start = 0; + + NavigableMap beforeFirst = first != null ? chunksAll.headMap(first.getKey(), false) : Collections.emptyNavigableMap(); + NavigableMap afterLast = last != null ? chunksAll.tailMap(last.getKey(), false) : Collections.emptyNavigableMap(); + + if (first != null && readChunk(first.getValue()).size() + first.getKey() <= offset) { + beforeFirst = chunksAll; + afterLast = Collections.emptyNavigableMap(); + first = null; + last = null; + start = offset; + } else if (!chunksAll.isEmpty()) { + var between = chunksAll.subMap(first.getKey(), true, last.getKey(), true); + removedChunks.putAll(between); + start = first.getKey(); + } + + ByteString pendingWrites = ByteString.empty(); + + if (first != null && first.getKey() < offset) { + var chunkBytes = readChunk(first.getValue()); + pendingWrites = pendingWrites.concat(chunkBytes.substring(0, (int) (offset - first.getKey()))); + } + pendingWrites = pendingWrites.concat(data); + + if (last != null) { + var lchunkBytes = readChunk(last.getValue()); + if (last.getKey() + lchunkBytes.size() > offset + data.size()) { + var startInFile = offset + data.size(); + var startInChunk = startInFile - last.getKey(); + pendingWrites = pendingWrites.concat(lchunkBytes.substring((int) startInChunk, lchunkBytes.size())); + } + } + + int combinedSize = pendingWrites.size(); + + if (targetChunkSize > 0) { + if (combinedSize < (targetChunkSize * writeMergeThreshold)) { + boolean leftDone = false; + boolean rightDone = false; + while (!leftDone && !rightDone) { + if (beforeFirst.isEmpty()) leftDone = true; + if (!beforeFirst.isEmpty() || !leftDone) { + var takeLeft = beforeFirst.lastEntry(); + + var cuuid = takeLeft.getValue(); + + if (readChunk(cuuid).size() >= (targetChunkSize * writeMergeMaxChunkToTake)) { + leftDone = true; + continue; + } + + if ((combinedSize + readChunk(cuuid).size()) > (targetChunkSize * writeMergeLimit)) { + leftDone = true; + continue; + } + + // FIXME: (and test this) + beforeFirst = beforeFirst.headMap(takeLeft.getKey(), false); + start = takeLeft.getKey(); + pendingWrites = readChunk(cuuid).concat(pendingWrites); + combinedSize += readChunk(cuuid).size(); + removedChunks.put(takeLeft.getKey(), takeLeft.getValue()); + } + if (afterLast.isEmpty()) rightDone = true; + if (!afterLast.isEmpty() && !rightDone) { + var takeRight = afterLast.firstEntry(); + + var cuuid = takeRight.getValue(); + + if (readChunk(cuuid).size() >= (targetChunkSize * writeMergeMaxChunkToTake)) { + rightDone = true; + continue; + } + + if ((combinedSize + readChunk(cuuid).size()) > (targetChunkSize * writeMergeLimit)) { + rightDone = true; + continue; + } + + // FIXME: (and test this) + afterLast = afterLast.tailMap(takeRight.getKey(), false); + pendingWrites = pendingWrites.concat(readChunk(cuuid)); + combinedSize += readChunk(cuuid).size(); + removedChunks.put(takeRight.getKey(), takeRight.getValue()); + } + } + } + } + + NavigableMap newChunks = new TreeMap<>(); + + { + int cur = 0; + while (cur < combinedSize) { + int end; + + if (targetChunkSize <= 0) + end = combinedSize; + else { + if ((combinedSize - cur) > (targetChunkSize * writeLastChunkLimit)) { + end = Math.min(cur + targetChunkSize, combinedSize); + } else { + end = combinedSize; + } + } + + var thisChunk = pendingWrites.substring(cur, end); + + newChunks.put(start, createChunk(thisChunk)); + + start += thisChunk.size(); + cur = end; + } + } + + var newChunksMap = new TreeMap<>(chunksAll); + + for (var e : removedChunks.entrySet()) { + newChunksMap.remove(e.getKey()); +// em.remove(em.getReference(ChunkData.class, e.getValue())); + } + + newChunksMap.putAll(newChunks); + + file.setChunks(newChunksMap); + + updateFileSize(file); + + return (long) data.size(); + }); + } + + public Boolean truncate(String fileUuid, long length) { + if (length < 0) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length)); + + return txm.run(() -> { + var file = curTx.getObject(File.class, new JObjectKey(fileUuid)).orElse(null); + if (file == null) { + Log.error("File not found when trying to read: " + fileUuid); + return false; + } + + if (length == 0) { + file.setChunks(new TreeMap<>()); + updateFileSize(file); + return true; + } + + var curSize = size(fileUuid); + if (curSize == length) return true; + + var chunksAll = file.getChunks(); + NavigableMap removedChunks = new TreeMap<>(); + NavigableMap newChunks = new TreeMap<>(); + + if (curSize < length) { + long combinedSize = (length - curSize); + + long start = curSize; + + // Hack + HashMap zeroCache = new HashMap<>(); + + { + long cur = 0; + while (cur < combinedSize) { + long end; + + if (targetChunkSize <= 0) + end = combinedSize; + else { + if ((combinedSize - cur) > (targetChunkSize * 1.5)) { + end = cur + targetChunkSize; + } else { + end = combinedSize; + } + } + + if (!zeroCache.containsKey(end - cur)) + zeroCache.put(end - cur, UnsafeByteOperations.unsafeWrap(new byte[Math.toIntExact(end - cur)])); + + newChunks.put(start, createChunk(zeroCache.get(end - cur))); + start += (end - cur); + cur = end; + } + } + } else { + var tail = chunksAll.lowerEntry(length); + var afterTail = chunksAll.tailMap(tail.getKey(), false); + + removedChunks.put(tail.getKey(), tail.getValue()); + removedChunks.putAll(afterTail); + + var tailBytes = readChunk(tail.getValue()); + var newChunk = tailBytes.substring(0, (int) (length - tail.getKey())); + + newChunks.put(tail.getKey(), createChunk(newChunk)); + } + + var newChunkMap = new TreeMap<>(chunksAll); + + for (var e : removedChunks.entrySet()) { + newChunkMap.remove(e.getKey()); +// em.remove(em.getReference(ChunkData.class, e.getValue())); + } + newChunkMap.putAll(newChunks); + + file.setChunks(newChunkMap); + + updateFileSize(file); + return true; + }); + } + + public void updateFileSize(File file) { + long realSize = 0; + + var last = file.getChunks().lastEntry(); + if (last != null) { + var lastSize = readChunk(last.getValue()).size(); + realSize = last.getKey() + lastSize; + } + + if (realSize != file.getSize()) { + file.setSize(realSize); + } + } + + public Long size(String uuid) { + return txm.run(() -> { + var file = curTx.getObject(File.class, new JObjectKey(uuid)).orElse(null); + if (file == null) { + Log.error("File not found when trying to read: " + uuid); + return -1L; + } + return file.getSize(); + }); + } +} diff --git a/dhfs-parent/crapfs/src/main/java/org/acme/fuse/DhfsFuse.java b/dhfs-parent/crapfs/src/main/java/org/acme/fuse/DhfsFuse.java new file mode 100644 index 00000000..cae0662d --- /dev/null +++ b/dhfs-parent/crapfs/src/main/java/org/acme/fuse/DhfsFuse.java @@ -0,0 +1,260 @@ +package org.acme.fuse; + +import com.google.protobuf.UnsafeByteOperations; +import com.sun.security.auth.module.UnixSystem; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import jnr.ffi.Pointer; +import org.acme.files.service.DhfsFileService; +import org.eclipse.microprofile.config.inject.ConfigProperty; +import ru.serce.jnrfuse.ErrorCodes; +import ru.serce.jnrfuse.FuseFillDir; +import ru.serce.jnrfuse.FuseStubFS; +import ru.serce.jnrfuse.struct.FileStat; +import ru.serce.jnrfuse.struct.FuseFileInfo; +import ru.serce.jnrfuse.struct.Statvfs; + +import java.nio.file.Paths; +import java.util.ArrayList; + +import static jnr.posix.FileStat.S_IFREG; + +@ApplicationScoped +public class DhfsFuse extends FuseStubFS { + private static final int blksize = 1048576; + private static final int iosize = 1048576; + + @ConfigProperty(name = "dhfs.fuse.root") + String root; + + @ConfigProperty(name = "dhfs.files.target_chunk_size") + int targetChunkSize; + + @Inject + DhfsFileService fileService; + + ClassLoader classLoader; + + void init(@Observes @Priority(100000) StartupEvent event) { + classLoader = Thread.currentThread().getContextClassLoader(); + + Paths.get(root).toFile().mkdirs(); + Log.info("Mounting with root " + root); + + var uid = new UnixSystem().getUid(); + var gid = new UnixSystem().getGid(); + + var opts = new ArrayList(); + + // Assuming macFuse +// if (SystemUtils.IS_OS_MAC) { + opts.add("-o"); + opts.add("iosize=" + iosize); +// } else if (SystemUtils.IS_OS_LINUX) { +// // FIXME: There's something else missing: the writes still seem to be 32k max +//// opts.add("-o"); +//// opts.add("large_read"); +// opts.add("-o"); +// opts.add("big_writes"); +// opts.add("-o"); +// opts.add("max_read=" + iosize); +// opts.add("-o"); +// opts.add("max_write=" + iosize); +// } + opts.add("-o"); + opts.add("auto_cache"); + opts.add("-o"); + opts.add("uid=" + uid); + opts.add("-o"); + opts.add("gid=" + gid); + + mount(Paths.get(root), false, false, opts.toArray(String[]::new)); + } + + void shutdown(@Observes @Priority(1) ShutdownEvent event) { + Log.info("Unmounting"); + umount(); + Log.info("Unmounted"); + } + + @Override + public int statfs(String path, Statvfs stbuf) { + Log.info("statfs " + path); + try { + stbuf.f_frsize.set(blksize); + stbuf.f_bsize.set(blksize); + stbuf.f_blocks.set(1024 * 1024); // total data blocks in file system + stbuf.f_bfree.set(1024 * 1024); // free blocks in fs + stbuf.f_bavail.set(1024 * 1024); // avail blocks in fs + stbuf.f_files.set(1000); //FIXME: + stbuf.f_ffree.set(Integer.MAX_VALUE - 2000); //FIXME: + stbuf.f_favail.set(Integer.MAX_VALUE - 2000); //FIXME: + stbuf.f_namemax.set(2048); + return super.statfs(path, stbuf); + } catch (Exception e) { + Log.error("When statfs " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int getattr(String path, FileStat stat) { + Thread.currentThread().setContextClassLoader(classLoader); + + Log.info("getattr " + path); + + if (path.equals("/")) { + stat.st_mode.set(FileStat.S_IFDIR | 0777); + stat.st_nlink.set(2); + return 0; + } + + try { + var fileOpt = fileService.open(path.substring(1)); + if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); + var uuid = fileOpt.get(); + + stat.st_mode.set(S_IFREG | 0755); + stat.st_nlink.set(1); + stat.st_size.set(fileService.size(uuid)); + // FIXME: Race? + // stat.st_ctim.tv_sec.set(found.get().ctime() / 1000); + // stat.st_ctim.tv_nsec.set((found.get().ctime() % 1000) * 1000); + // stat.st_mtim.tv_sec.set(found.get().mtime() / 1000); + // stat.st_mtim.tv_nsec.set((found.get().mtime() % 1000) * 1000); + // stat.st_atim.tv_sec.set(found.get().mtime() / 1000); + // stat.st_atim.tv_nsec.set((found.get().mtime() % 1000) * 1000); + stat.st_blksize.set(blksize); + } catch (Exception e) { + Log.error("When getattr " + path, e); + return -ErrorCodes.EIO(); + } catch (Throwable e) { + Log.error("When getattr " + path, e); + return -ErrorCodes.EIO(); + } + return 0; + } + + @Override + public int open(String path, FuseFileInfo fi) { + Thread.currentThread().setContextClassLoader(classLoader); + + Log.info("open " + path); + + if (path.equals("/")) return 0; + + try { + if (fileService.open(path.substring(1)).isEmpty()) return -ErrorCodes.ENOENT(); + return 0; + } catch (Exception e) { + Log.error("When open " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int read(String path, Pointer buf, long size, long offset, FuseFileInfo fi) { + Thread.currentThread().setContextClassLoader(classLoader); + Log.info("read " + path + " " + size + " " + offset); + if (size < 0) return -ErrorCodes.EINVAL(); + if (offset < 0) return -ErrorCodes.EINVAL(); + try { + var fileOpt = fileService.open(path.substring(1)); + if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); + var file = fileOpt.get(); + var read = fileService.read(fileOpt.get(), offset, (int) size); + if (read.isEmpty()) return 0; + buf.put(0, read.get().toByteArray(), 0, read.get().size()); + return read.get().size(); + } catch (Exception e) { + Log.error("When reading " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int write(String path, Pointer buf, long size, long offset, FuseFileInfo fi) { + Thread.currentThread().setContextClassLoader(classLoader); + Log.info("write " + path + " " + size + " " + offset); + if (offset < 0) return -ErrorCodes.EINVAL(); + try { + var fileOpt = fileService.open(path.substring(1)); + if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); + var buffer = new byte[(int) size]; + + buf.get(0, buffer, 0, (int) size); + + var written = fileService.write(fileOpt.get(), offset, UnsafeByteOperations.unsafeWrap(buffer)); + return written.intValue(); + } catch (Exception e) { + Log.error("When writing " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int truncate(String path, long size) { + if (size < 0) return -ErrorCodes.EINVAL(); + try { + var ok = fileService.truncate(path.substring(1), size); + if (ok) + return 0; + else + return -ErrorCodes.ENOSPC(); + } catch (Exception e) { + Log.error("When truncating " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int create(String path, long mode, FuseFileInfo fi) { + Thread.currentThread().setContextClassLoader(classLoader); + try { + var ret = fileService.create(path.substring(1)); + if (ret.isEmpty()) return -ErrorCodes.ENOSPC(); + else return 0; + } catch (Exception e) { + Log.error("When creating " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int readdir(String path, Pointer buf, FuseFillDir filler, long offset, FuseFileInfo fi) { + Thread.currentThread().setContextClassLoader(classLoader); + + Log.info("readdir " + path); + + try { + Iterable found; + try { + found = fileService.readdir(path.substring(1)); + } catch (StatusRuntimeException e) { + if (e.getStatus().getCode().equals(Status.NOT_FOUND.getCode())) + return -ErrorCodes.ENOENT(); + else throw e; + } + + filler.apply(buf, ".", null, 0); + filler.apply(buf, "..", null, 0); + + for (var c : found) { + filler.apply(buf, c, null, 0); + } + + return 0; + } catch (Exception e) { + Log.error("When readdir " + path, e); + return -ErrorCodes.EIO(); + } + } + +} diff --git a/dhfs-parent/crapfs/src/main/resources/application.properties b/dhfs-parent/crapfs/src/main/resources/application.properties new file mode 100644 index 00000000..493f31e3 --- /dev/null +++ b/dhfs-parent/crapfs/src/main/resources/application.properties @@ -0,0 +1,22 @@ +quarkus.grpc.server.use-separate-server=false +dhfs.objects.reconnect_interval=5s +dhfs.objects.write_log=false +dhfs.fuse.root=${HOME}/dhfs_default/fuse +dhfs.files.target_chunk_size=262144 +# Writes strictly smaller than this will try to merge with blocks nearby +dhfs.files.write_merge_threshold=0.8 +# If a merge would result in a block of greater size than this, stop merging +dhfs.files.write_merge_limit=1.2 +# Don't take blocks of this size and above when merging +dhfs.files.write_merge_max_chunk_to_take=1 +dhfs.files.write_last_chunk_limit=1.5 +quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE +quarkus.log.category."com.usatiuk.dhfs".level=TRACE +quarkus.http.insecure-requests=enabled +quarkus.http.ssl.client-auth=required + +quarkus.hibernate-orm.database.generation=drop-and-create +quarkus.datasource.jdbc.url=jdbc:h2:file:${HOME}/dhfs_default/dhfsdb +quarkus.datasource.db-kind=h2 + +quarkus.hibernate-orm.cache."org.acme.files.objects.ChunkData".memory.object-count=500 \ No newline at end of file diff --git a/dhfs-parent/crapfs/src/main/resources/import.sql b/dhfs-parent/crapfs/src/main/resources/import.sql new file mode 100644 index 00000000..16aa5235 --- /dev/null +++ b/dhfs-parent/crapfs/src/main/resources/import.sql @@ -0,0 +1,6 @@ +-- This file allow to write SQL commands that will be emitted in test and dev. +-- The commands are commented as their support depends of the database +-- insert into myentity (id, field) values(1, 'field-1'); +-- insert into myentity (id, field) values(2, 'field-2'); +-- insert into myentity (id, field) values(3, 'field-3'); +-- alter sequence myentity_seq restart with 4; \ No newline at end of file diff --git a/dhfs-parent/pom.xml b/dhfs-parent/pom.xml index a54dda6e..c0f48f4d 100644 --- a/dhfs-parent/pom.xml +++ b/dhfs-parent/pom.xml @@ -19,6 +19,7 @@ utils objects-alloc objects-common + crapfs From e5949b7507be8ac7ced9288e8a73d1c812ffa879 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sat, 7 Dec 2024 17:52:26 +0100 Subject: [PATCH 016/105] working crapfs --- .../acme/files/service/DhfsFileService.java | 1 + .../src/main/resources/application.properties | 7 ++-- dhfs-parent/objects/pom.xml | 10 ++--- .../usatiuk/dhfs/objects/JObjectManager.java | 37 ++++++++++++------- .../dhfs/objects/TransactionManager.java | 9 +++++ .../src/main/resources/META-INF/beans.xml | 0 6 files changed, 42 insertions(+), 22 deletions(-) create mode 100644 dhfs-parent/objects/src/main/resources/META-INF/beans.xml diff --git a/dhfs-parent/crapfs/src/main/java/org/acme/files/service/DhfsFileService.java b/dhfs-parent/crapfs/src/main/java/org/acme/files/service/DhfsFileService.java index 9b7744a1..977dd4f2 100644 --- a/dhfs-parent/crapfs/src/main/java/org/acme/files/service/DhfsFileService.java +++ b/dhfs-parent/crapfs/src/main/java/org/acme/files/service/DhfsFileService.java @@ -72,6 +72,7 @@ public class DhfsFileService { return txm.run(() -> { var file = alloc.create(File.class, new JObjectKey(path)); + file.setChunks(new TreeMap<>()); curTx.putObject(file); return Optional.of(path); }); diff --git a/dhfs-parent/crapfs/src/main/resources/application.properties b/dhfs-parent/crapfs/src/main/resources/application.properties index 493f31e3..4181868b 100644 --- a/dhfs-parent/crapfs/src/main/resources/application.properties +++ b/dhfs-parent/crapfs/src/main/resources/application.properties @@ -10,13 +10,12 @@ dhfs.files.write_merge_limit=1.2 # Don't take blocks of this size and above when merging dhfs.files.write_merge_max_chunk_to_take=1 dhfs.files.write_last_chunk_limit=1.5 -quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE -quarkus.log.category."com.usatiuk.dhfs".level=TRACE +quarkus.log.category."com.usatiuk.dhfs".min-level=INFO +quarkus.log.category."com.usatiuk.dhfs".level=INFO quarkus.http.insecure-requests=enabled quarkus.http.ssl.client-auth=required - +dhfs.objects.persistence.files.root=${HOME}/dhfs_default/dhfsdb quarkus.hibernate-orm.database.generation=drop-and-create quarkus.datasource.jdbc.url=jdbc:h2:file:${HOME}/dhfs_default/dhfsdb quarkus.datasource.db-kind=h2 - quarkus.hibernate-orm.cache."org.acme.files.objects.ChunkData".memory.object-count=500 \ No newline at end of file diff --git a/dhfs-parent/objects/pom.xml b/dhfs-parent/objects/pom.xml index 96e660a5..e20ad13e 100644 --- a/dhfs-parent/objects/pom.xml +++ b/dhfs-parent/objects/pom.xml @@ -80,11 +80,11 @@ objects-common 1.0-SNAPSHOT - - com.usatiuk - objects-common-deployment - 1.0-SNAPSHOT - + + + + + diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index e7466e59..674454f5 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -13,6 +13,7 @@ import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; +import java.io.Serializable; import java.lang.ref.Cleaner; import java.lang.ref.WeakReference; import java.util.*; @@ -85,7 +86,7 @@ public class JObjectManager { //noinspection unused try (var readLock = _storageReadLocker.lock(key)) { var read = objectStorage.readObject(key).orElse(null); - if (read == null) throw new IllegalArgumentException("Object not found: " + key); + if (read == null) return null; var got = objectSerializer.deserialize(read); @@ -168,6 +169,27 @@ public class JObjectManager { return transactionFactory.createTransaction(counter, new TransactionObjectSourceImpl(counter)); } + // FIXME: + private static class SimpleTxManifest implements Serializable, TxManifest { + private final List _written; + private final List _deleted; + + public SimpleTxManifest(List written, List deleted) { + _written = written; + _deleted = deleted; + } + + @Override + public List getWritten() { + return _written; + } + + @Override + public List getDeleted() { + return _deleted; + } + } + public void commit(TransactionPrivate tx) { // This also holds the weak references var toUnlock = new LinkedList(); @@ -290,18 +312,7 @@ public class JObjectManager { Log.tracef("Committing transaction %d to storage", tx.getId()); - objectStorage.commitTx(new TxManifest() { - @Override - public List getWritten() { - // FIXME: - return written.stream().map(JData::getKey).toList(); - } - - @Override - public List getDeleted() { - return List.of(); - } - }); + objectStorage.commitTx(new SimpleTxManifest(written.stream().map(JData::getKey).toList(), Collections.emptyList())); } catch (Throwable t) { Log.error("Error when committing transaction", t); throw t; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java index 39a3b9a2..8e9c061a 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java @@ -13,6 +13,10 @@ public interface TransactionManager { void rollback(); default T run(Supplier supplier) { + if (current() != null) { + return supplier.get(); + } + begin(); try { var ret = supplier.get(); @@ -25,6 +29,11 @@ public interface TransactionManager { } default void run(VoidFn fn) { + if (current() != null) { + fn.apply(); + return; + } + begin(); try { fn.apply(); diff --git a/dhfs-parent/objects/src/main/resources/META-INF/beans.xml b/dhfs-parent/objects/src/main/resources/META-INF/beans.xml new file mode 100644 index 00000000..e69de29b From 14ba4b8e2eb64484c6b25748e0b0607b8fdb497f Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Wed, 18 Dec 2024 13:30:32 +0100 Subject: [PATCH 017/105] revert crapfs --- dhfs-parent/crapfs/pom.xml | 208 -------- dhfs-parent/crapfs/src/lombok.config | 1 - .../crapfs/src/main/docker/Dockerfile.jvm | 97 ---- .../src/main/docker/Dockerfile.legacy-jar | 93 ---- .../crapfs/src/main/docker/Dockerfile.native | 27 -- .../src/main/docker/Dockerfile.native-micro | 30 -- .../crapfs/src/main/java/org/acme/Main.java | 21 - .../org/acme/files/objects/ChunkData.java | 9 - .../java/org/acme/files/objects/File.java | 16 - .../acme/files/service/DhfsFileService.java | 444 ------------------ .../src/main/java/org/acme/fuse/DhfsFuse.java | 260 ---------- .../src/main/resources/application.properties | 21 - .../crapfs/src/main/resources/import.sql | 6 - dhfs-parent/pom.xml | 1 - 14 files changed, 1234 deletions(-) delete mode 100644 dhfs-parent/crapfs/pom.xml delete mode 100644 dhfs-parent/crapfs/src/lombok.config delete mode 100644 dhfs-parent/crapfs/src/main/docker/Dockerfile.jvm delete mode 100644 dhfs-parent/crapfs/src/main/docker/Dockerfile.legacy-jar delete mode 100644 dhfs-parent/crapfs/src/main/docker/Dockerfile.native delete mode 100644 dhfs-parent/crapfs/src/main/docker/Dockerfile.native-micro delete mode 100644 dhfs-parent/crapfs/src/main/java/org/acme/Main.java delete mode 100644 dhfs-parent/crapfs/src/main/java/org/acme/files/objects/ChunkData.java delete mode 100644 dhfs-parent/crapfs/src/main/java/org/acme/files/objects/File.java delete mode 100644 dhfs-parent/crapfs/src/main/java/org/acme/files/service/DhfsFileService.java delete mode 100644 dhfs-parent/crapfs/src/main/java/org/acme/fuse/DhfsFuse.java delete mode 100644 dhfs-parent/crapfs/src/main/resources/application.properties delete mode 100644 dhfs-parent/crapfs/src/main/resources/import.sql diff --git a/dhfs-parent/crapfs/pom.xml b/dhfs-parent/crapfs/pom.xml deleted file mode 100644 index d24db205..00000000 --- a/dhfs-parent/crapfs/pom.xml +++ /dev/null @@ -1,208 +0,0 @@ - - - 4.0.0 - crapfs - 1.0.0-SNAPSHOT - - - com.usatiuk.dhfs - parent - 1.0-SNAPSHOT - - - - - org.testcontainers - testcontainers - test - - - org.awaitility - awaitility - test - - - com.usatiuk - autoprotomap - 1.0-SNAPSHOT - - - com.usatiuk - autoprotomap-deployment - 1.0-SNAPSHOT - provided - - - org.bouncycastle - bcprov-jdk18on - 1.78.1 - - - org.bouncycastle - bcpkix-jdk18on - 1.78.1 - - - io.quarkus - quarkus-security - - - net.openhft - zero-allocation-hashing - - - io.quarkus - quarkus-grpc - - - io.quarkus - quarkus-arc - - - io.quarkus - quarkus-rest - - - io.quarkus - quarkus-rest-client - - - io.quarkus - quarkus-rest-client-jsonb - - - io.quarkus - quarkus-rest-jsonb - - - io.quarkus - quarkus-scheduler - - - io.quarkus - quarkus-junit5 - test - - - org.projectlombok - lombok - provided - - - com.github.SerCeMan - jnr-fuse - 44ed40f8ce - - - com.github.jnr - jnr-ffi - 2.2.16 - - - com.github.jnr - jnr-posix - 3.1.19 - - - com.github.jnr - jnr-constants - 0.10.4 - - - org.apache.commons - commons-lang3 - - - commons-io - commons-io - - - org.jboss.slf4j - slf4j-jboss-logmanager - test - - - commons-codec - commons-codec - - - org.apache.commons - commons-collections4 - - - org.apache.commons - commons-math3 - 3.6.1 - - - com.usatiuk - kleppmanntree - 1.0-SNAPSHOT - - - com.usatiuk.dhfs - supportlib - 1.0-SNAPSHOT - - - com.usatiuk.dhfs - objects - 1.0-SNAPSHOT - - - com.usatiuk.dhfs - utils - 1.0-SNAPSHOT - - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - 1C - false - classes - - - - org.apache.maven.plugins - maven-failsafe-plugin - - - - true - - - concurrent - - - 0.5 - - true - true - - - - - ${quarkus.platform.group-id} - quarkus-maven-plugin - ${quarkus.platform.version} - true - - - quarkus-plugin - - build - generate-code - generate-code-tests - - - - - - - diff --git a/dhfs-parent/crapfs/src/lombok.config b/dhfs-parent/crapfs/src/lombok.config deleted file mode 100644 index f1c474ce..00000000 --- a/dhfs-parent/crapfs/src/lombok.config +++ /dev/null @@ -1 +0,0 @@ -lombok.accessors.prefix += _ diff --git a/dhfs-parent/crapfs/src/main/docker/Dockerfile.jvm b/dhfs-parent/crapfs/src/main/docker/Dockerfile.jvm deleted file mode 100644 index 2c7432c7..00000000 --- a/dhfs-parent/crapfs/src/main/docker/Dockerfile.jvm +++ /dev/null @@ -1,97 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode -# -# Before building the container image run: -# -# ./mvnw package -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/crapfs-jvm . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/crapfs-jvm -# -# If you want to include the debug port into your docker image -# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005. -# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005 -# when running the container -# -# Then run the container using : -# -# docker run -i --rm -p 8080:8080 quarkus/crapfs-jvm -# -# This image uses the `run-java.sh` script to run the application. -# This scripts computes the command line to execute your Java application, and -# includes memory/GC tuning. -# You can configure the behavior using the following environment properties: -# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") -# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options -# in JAVA_OPTS (example: "-Dsome.property=foo") -# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is -# used to calculate a default maximal heap memory based on a containers restriction. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio -# of the container available memory as set here. The default is `50` which means 50% -# of the available memory is used as an upper boundary. You can skip this mechanism by -# setting this value to `0` in which case no `-Xmx` option is added. -# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This -# is used to calculate a default initial heap memory based on the maximum heap memory. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio -# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` -# is used as the initial heap size. You can skip this mechanism by setting this value -# to `0` in which case no `-Xms` option is added (example: "25") -# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. -# This is used to calculate the maximum value of the initial heap memory. If used in -# a container without any memory constraints for the container then this option has -# no effect. If there is a memory constraint then `-Xms` is limited to the value set -# here. The default is 4096MB which means the calculated value of `-Xms` never will -# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") -# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output -# when things are happening. This option, if set to true, will set -# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). -# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: -# true"). -# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). -# - CONTAINER_CORE_LIMIT: A calculated core limit as described in -# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") -# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). -# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. -# (example: "20") -# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. -# (example: "40") -# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. -# (example: "4") -# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus -# previous GC times. (example: "90") -# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") -# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") -# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should -# contain the necessary JRE command-line options to specify the required GC, which -# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). -# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") -# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") -# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be -# accessed directly. (example: "foo.example.com,bar.example.com") -# -### -FROM registry.access.redhat.com/ubi8/openjdk-21:1.20 - -ENV LANGUAGE='en_US:en' - - -# We make four distinct layers so if there are application changes the library layers can be re-used -COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/ -COPY --chown=185 target/quarkus-app/*.jar /deployments/ -COPY --chown=185 target/quarkus-app/app/ /deployments/app/ -COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/ - -EXPOSE 8080 -USER 185 -ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" -ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" - -ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ] - diff --git a/dhfs-parent/crapfs/src/main/docker/Dockerfile.legacy-jar b/dhfs-parent/crapfs/src/main/docker/Dockerfile.legacy-jar deleted file mode 100644 index fcc2aef7..00000000 --- a/dhfs-parent/crapfs/src/main/docker/Dockerfile.legacy-jar +++ /dev/null @@ -1,93 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode -# -# Before building the container image run: -# -# ./mvnw package -Dquarkus.package.jar.type=legacy-jar -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/crapfs-legacy-jar . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/crapfs-legacy-jar -# -# If you want to include the debug port into your docker image -# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005. -# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005 -# when running the container -# -# Then run the container using : -# -# docker run -i --rm -p 8080:8080 quarkus/crapfs-legacy-jar -# -# This image uses the `run-java.sh` script to run the application. -# This scripts computes the command line to execute your Java application, and -# includes memory/GC tuning. -# You can configure the behavior using the following environment properties: -# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") -# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options -# in JAVA_OPTS (example: "-Dsome.property=foo") -# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is -# used to calculate a default maximal heap memory based on a containers restriction. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio -# of the container available memory as set here. The default is `50` which means 50% -# of the available memory is used as an upper boundary. You can skip this mechanism by -# setting this value to `0` in which case no `-Xmx` option is added. -# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This -# is used to calculate a default initial heap memory based on the maximum heap memory. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio -# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` -# is used as the initial heap size. You can skip this mechanism by setting this value -# to `0` in which case no `-Xms` option is added (example: "25") -# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. -# This is used to calculate the maximum value of the initial heap memory. If used in -# a container without any memory constraints for the container then this option has -# no effect. If there is a memory constraint then `-Xms` is limited to the value set -# here. The default is 4096MB which means the calculated value of `-Xms` never will -# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") -# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output -# when things are happening. This option, if set to true, will set -# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). -# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: -# true"). -# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). -# - CONTAINER_CORE_LIMIT: A calculated core limit as described in -# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") -# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). -# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. -# (example: "20") -# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. -# (example: "40") -# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. -# (example: "4") -# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus -# previous GC times. (example: "90") -# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") -# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") -# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should -# contain the necessary JRE command-line options to specify the required GC, which -# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). -# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") -# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") -# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be -# accessed directly. (example: "foo.example.com,bar.example.com") -# -### -FROM registry.access.redhat.com/ubi8/openjdk-21:1.20 - -ENV LANGUAGE='en_US:en' - - -COPY target/lib/* /deployments/lib/ -COPY target/*-runner.jar /deployments/quarkus-run.jar - -EXPOSE 8080 -USER 185 -ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" -ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" - -ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ] diff --git a/dhfs-parent/crapfs/src/main/docker/Dockerfile.native b/dhfs-parent/crapfs/src/main/docker/Dockerfile.native deleted file mode 100644 index 4e549443..00000000 --- a/dhfs-parent/crapfs/src/main/docker/Dockerfile.native +++ /dev/null @@ -1,27 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. -# -# Before building the container image run: -# -# ./mvnw package -Dnative -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.native -t quarkus/crapfs . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/crapfs -# -### -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.10 -WORKDIR /work/ -RUN chown 1001 /work \ - && chmod "g+rwX" /work \ - && chown 1001:root /work -COPY --chown=1001:root target/*-runner /work/application - -EXPOSE 8080 -USER 1001 - -ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/dhfs-parent/crapfs/src/main/docker/Dockerfile.native-micro b/dhfs-parent/crapfs/src/main/docker/Dockerfile.native-micro deleted file mode 100644 index d3b97ce7..00000000 --- a/dhfs-parent/crapfs/src/main/docker/Dockerfile.native-micro +++ /dev/null @@ -1,30 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. -# It uses a micro base image, tuned for Quarkus native executables. -# It reduces the size of the resulting container image. -# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image. -# -# Before building the container image run: -# -# ./mvnw package -Dnative -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/crapfs . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/crapfs -# -### -FROM quay.io/quarkus/quarkus-micro-image:2.0 -WORKDIR /work/ -RUN chown 1001 /work \ - && chmod "g+rwX" /work \ - && chown 1001:root /work -COPY --chown=1001:root target/*-runner /work/application - -EXPOSE 8080 -USER 1001 - -ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/dhfs-parent/crapfs/src/main/java/org/acme/Main.java b/dhfs-parent/crapfs/src/main/java/org/acme/Main.java deleted file mode 100644 index 9afa3a9a..00000000 --- a/dhfs-parent/crapfs/src/main/java/org/acme/Main.java +++ /dev/null @@ -1,21 +0,0 @@ -package org.acme; - -import io.quarkus.runtime.Quarkus; -import io.quarkus.runtime.QuarkusApplication; -import io.quarkus.runtime.annotations.QuarkusMain; - -@QuarkusMain -public class Main { - public static void main(String... args) { - Quarkus.run(CrapfsServerApp.class, args); - } - - public static class CrapfsServerApp implements QuarkusApplication { - - @Override - public int run(String... args) throws Exception { - Quarkus.waitForExit(); - return 0; - } - } -} \ No newline at end of file diff --git a/dhfs-parent/crapfs/src/main/java/org/acme/files/objects/ChunkData.java b/dhfs-parent/crapfs/src/main/java/org/acme/files/objects/ChunkData.java deleted file mode 100644 index 4803cad5..00000000 --- a/dhfs-parent/crapfs/src/main/java/org/acme/files/objects/ChunkData.java +++ /dev/null @@ -1,9 +0,0 @@ -package org.acme.files.objects; - -import com.usatiuk.objects.common.runtime.JData; - -public interface ChunkData extends JData { - byte[] getData(); - - void setData(byte[] data); -} diff --git a/dhfs-parent/crapfs/src/main/java/org/acme/files/objects/File.java b/dhfs-parent/crapfs/src/main/java/org/acme/files/objects/File.java deleted file mode 100644 index cc3ebeef..00000000 --- a/dhfs-parent/crapfs/src/main/java/org/acme/files/objects/File.java +++ /dev/null @@ -1,16 +0,0 @@ -package org.acme.files.objects; - -import com.usatiuk.objects.common.runtime.JData; -import com.usatiuk.objects.common.runtime.JObjectKey; - -import java.util.NavigableMap; - -public interface File extends JData { - NavigableMap getChunks(); - - void setChunks(NavigableMap chunk); - - long getSize(); - - void setSize(long size); -} diff --git a/dhfs-parent/crapfs/src/main/java/org/acme/files/service/DhfsFileService.java b/dhfs-parent/crapfs/src/main/java/org/acme/files/service/DhfsFileService.java deleted file mode 100644 index 977dd4f2..00000000 --- a/dhfs-parent/crapfs/src/main/java/org/acme/files/service/DhfsFileService.java +++ /dev/null @@ -1,444 +0,0 @@ -package org.acme.files.service; - -import com.google.protobuf.ByteString; -import com.google.protobuf.UnsafeByteOperations; -import com.usatiuk.dhfs.objects.TransactionManager; -import com.usatiuk.dhfs.objects.transaction.Transaction; -import com.usatiuk.objects.alloc.runtime.ObjectAllocator; -import com.usatiuk.objects.common.runtime.JObjectKey; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import io.quarkus.logging.Log; -import io.quarkus.runtime.StartupEvent; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import jakarta.inject.Inject; -import org.acme.files.objects.ChunkData; -import org.acme.files.objects.File; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.util.*; - -@ApplicationScoped -public class DhfsFileService { - - @ConfigProperty(name = "dhfs.files.target_chunk_size") - int targetChunkSize; - - @ConfigProperty(name = "dhfs.files.write_merge_threshold") - float writeMergeThreshold; - - @ConfigProperty(name = "dhfs.files.write_merge_max_chunk_to_take") - float writeMergeMaxChunkToTake; - - @ConfigProperty(name = "dhfs.files.write_merge_limit") - float writeMergeLimit; - - @ConfigProperty(name = "dhfs.files.write_last_chunk_limit") - float writeLastChunkLimit; - - @ConfigProperty(name = "dhfs.objects.write_log") - boolean writeLogging; - - @Inject - Transaction curTx; - - @Inject - TransactionManager txm; - - @Inject - ObjectAllocator alloc; - - long chunkCounter = 0; - - void init(@Observes @Priority(500) StartupEvent event) { - Log.info("Initializing file service"); - } - - public Optional open(String path) { - return txm.run(() -> { - if (curTx.getObject(File.class, new JObjectKey(path)).orElse(null) != null) { - return Optional.of(path); - } - return Optional.empty(); - }); - } - - public Optional create(String path) { - if (path.contains("/")) { - throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Path should not contain slashes")); - } - - return txm.run(() -> { - var file = alloc.create(File.class, new JObjectKey(path)); - file.setChunks(new TreeMap<>()); - curTx.putObject(file); - return Optional.of(path); - }); - } - - private JObjectKey createChunk(ByteString bytes) { - var cd = alloc.create(ChunkData.class, new JObjectKey("chunk-" + chunkCounter++)); - cd.setData(bytes.toByteArray()); - curTx.putObject(cd); - return cd.getKey(); - } - - private ByteString readChunk(JObjectKey uuid) { - var chunk = curTx.getObject(ChunkData.class, uuid); - if (chunk.isEmpty()) { - Log.error("Chunk not found when trying to read: " + uuid); - throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("Chunk not found")); - } - return UnsafeByteOperations.unsafeWrap(chunk.get().getData()); - } - - private static final List fileNames = List.of("file1", "file2"); - - public List readdir(String path) { - if (!path.equals("")) { - throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Only root directory is supported")); - } - - return txm.run(() -> { - var ret = new ArrayList(); - for (String fileName : fileNames) { - var got = curTx.getObject(File.class, new JObjectKey(fileName)); - if (got.isPresent()) { - ret.add(fileName); - } - } - return ret; - }); - } - - public Optional read(String fileUuid, long offset, int length) { - if (length < 0) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length)); - if (offset < 0) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset)); - - return txm.run(() -> { - var file = curTx.getObject(File.class, new JObjectKey(fileUuid)).orElse(null); - if (file == null) { - Log.error("File not found when trying to read: " + fileUuid); - return Optional.empty(); - } - - try { - var chunksAll = new TreeMap<>(file.getChunks()); - if (chunksAll.isEmpty()) { - return Optional.of(ByteString.empty()); - } - var chunksList = chunksAll.tailMap(chunksAll.floorKey(offset)).entrySet(); - - if (chunksList.isEmpty()) { - return Optional.of(ByteString.empty()); - } - - var chunks = chunksList.iterator(); - ByteString buf = ByteString.empty(); - - long curPos = offset; - var chunk = chunks.next(); - - while (curPos < offset + length) { - var chunkPos = chunk.getKey(); - - long offInChunk = curPos - chunkPos; - - long toReadInChunk = (offset + length) - curPos; - - var chunkBytes = readChunk(chunk.getValue()); - - long readableLen = chunkBytes.size() - offInChunk; - - var toReadReally = Math.min(readableLen, toReadInChunk); - - if (toReadReally < 0) break; - - buf = buf.concat(chunkBytes.substring((int) offInChunk, (int) (offInChunk + toReadReally))); - - curPos += toReadReally; - - if (readableLen > toReadInChunk) - break; - - if (!chunks.hasNext()) break; - - chunk = chunks.next(); - } - - // FIXME: - return Optional.of(buf); - } catch (Exception e) { - Log.error("Error reading file: " + fileUuid, e); - return Optional.empty(); - } - }); - } - - - public Long write(String fileUuid, long offset, ByteString data) { - if (offset < 0) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset)); - - return txm.run(() -> { - // FIXME: - var file = curTx.getObject(File.class, new JObjectKey(fileUuid)).orElse(null); - if (file == null) { - Log.error("File not found when trying to read: " + fileUuid); - return -1L; - } - - if (size(fileUuid) < offset) - truncate(fileUuid, offset); - - // Get chunk ids from the database - var chunksAll = file.getChunks(); - var first = chunksAll.floorEntry(offset); - var last = chunksAll.lowerEntry(offset + data.size()); - NavigableMap removedChunks = new TreeMap<>(); - - long start = 0; - - NavigableMap beforeFirst = first != null ? chunksAll.headMap(first.getKey(), false) : Collections.emptyNavigableMap(); - NavigableMap afterLast = last != null ? chunksAll.tailMap(last.getKey(), false) : Collections.emptyNavigableMap(); - - if (first != null && readChunk(first.getValue()).size() + first.getKey() <= offset) { - beforeFirst = chunksAll; - afterLast = Collections.emptyNavigableMap(); - first = null; - last = null; - start = offset; - } else if (!chunksAll.isEmpty()) { - var between = chunksAll.subMap(first.getKey(), true, last.getKey(), true); - removedChunks.putAll(between); - start = first.getKey(); - } - - ByteString pendingWrites = ByteString.empty(); - - if (first != null && first.getKey() < offset) { - var chunkBytes = readChunk(first.getValue()); - pendingWrites = pendingWrites.concat(chunkBytes.substring(0, (int) (offset - first.getKey()))); - } - pendingWrites = pendingWrites.concat(data); - - if (last != null) { - var lchunkBytes = readChunk(last.getValue()); - if (last.getKey() + lchunkBytes.size() > offset + data.size()) { - var startInFile = offset + data.size(); - var startInChunk = startInFile - last.getKey(); - pendingWrites = pendingWrites.concat(lchunkBytes.substring((int) startInChunk, lchunkBytes.size())); - } - } - - int combinedSize = pendingWrites.size(); - - if (targetChunkSize > 0) { - if (combinedSize < (targetChunkSize * writeMergeThreshold)) { - boolean leftDone = false; - boolean rightDone = false; - while (!leftDone && !rightDone) { - if (beforeFirst.isEmpty()) leftDone = true; - if (!beforeFirst.isEmpty() || !leftDone) { - var takeLeft = beforeFirst.lastEntry(); - - var cuuid = takeLeft.getValue(); - - if (readChunk(cuuid).size() >= (targetChunkSize * writeMergeMaxChunkToTake)) { - leftDone = true; - continue; - } - - if ((combinedSize + readChunk(cuuid).size()) > (targetChunkSize * writeMergeLimit)) { - leftDone = true; - continue; - } - - // FIXME: (and test this) - beforeFirst = beforeFirst.headMap(takeLeft.getKey(), false); - start = takeLeft.getKey(); - pendingWrites = readChunk(cuuid).concat(pendingWrites); - combinedSize += readChunk(cuuid).size(); - removedChunks.put(takeLeft.getKey(), takeLeft.getValue()); - } - if (afterLast.isEmpty()) rightDone = true; - if (!afterLast.isEmpty() && !rightDone) { - var takeRight = afterLast.firstEntry(); - - var cuuid = takeRight.getValue(); - - if (readChunk(cuuid).size() >= (targetChunkSize * writeMergeMaxChunkToTake)) { - rightDone = true; - continue; - } - - if ((combinedSize + readChunk(cuuid).size()) > (targetChunkSize * writeMergeLimit)) { - rightDone = true; - continue; - } - - // FIXME: (and test this) - afterLast = afterLast.tailMap(takeRight.getKey(), false); - pendingWrites = pendingWrites.concat(readChunk(cuuid)); - combinedSize += readChunk(cuuid).size(); - removedChunks.put(takeRight.getKey(), takeRight.getValue()); - } - } - } - } - - NavigableMap newChunks = new TreeMap<>(); - - { - int cur = 0; - while (cur < combinedSize) { - int end; - - if (targetChunkSize <= 0) - end = combinedSize; - else { - if ((combinedSize - cur) > (targetChunkSize * writeLastChunkLimit)) { - end = Math.min(cur + targetChunkSize, combinedSize); - } else { - end = combinedSize; - } - } - - var thisChunk = pendingWrites.substring(cur, end); - - newChunks.put(start, createChunk(thisChunk)); - - start += thisChunk.size(); - cur = end; - } - } - - var newChunksMap = new TreeMap<>(chunksAll); - - for (var e : removedChunks.entrySet()) { - newChunksMap.remove(e.getKey()); -// em.remove(em.getReference(ChunkData.class, e.getValue())); - } - - newChunksMap.putAll(newChunks); - - file.setChunks(newChunksMap); - - updateFileSize(file); - - return (long) data.size(); - }); - } - - public Boolean truncate(String fileUuid, long length) { - if (length < 0) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length)); - - return txm.run(() -> { - var file = curTx.getObject(File.class, new JObjectKey(fileUuid)).orElse(null); - if (file == null) { - Log.error("File not found when trying to read: " + fileUuid); - return false; - } - - if (length == 0) { - file.setChunks(new TreeMap<>()); - updateFileSize(file); - return true; - } - - var curSize = size(fileUuid); - if (curSize == length) return true; - - var chunksAll = file.getChunks(); - NavigableMap removedChunks = new TreeMap<>(); - NavigableMap newChunks = new TreeMap<>(); - - if (curSize < length) { - long combinedSize = (length - curSize); - - long start = curSize; - - // Hack - HashMap zeroCache = new HashMap<>(); - - { - long cur = 0; - while (cur < combinedSize) { - long end; - - if (targetChunkSize <= 0) - end = combinedSize; - else { - if ((combinedSize - cur) > (targetChunkSize * 1.5)) { - end = cur + targetChunkSize; - } else { - end = combinedSize; - } - } - - if (!zeroCache.containsKey(end - cur)) - zeroCache.put(end - cur, UnsafeByteOperations.unsafeWrap(new byte[Math.toIntExact(end - cur)])); - - newChunks.put(start, createChunk(zeroCache.get(end - cur))); - start += (end - cur); - cur = end; - } - } - } else { - var tail = chunksAll.lowerEntry(length); - var afterTail = chunksAll.tailMap(tail.getKey(), false); - - removedChunks.put(tail.getKey(), tail.getValue()); - removedChunks.putAll(afterTail); - - var tailBytes = readChunk(tail.getValue()); - var newChunk = tailBytes.substring(0, (int) (length - tail.getKey())); - - newChunks.put(tail.getKey(), createChunk(newChunk)); - } - - var newChunkMap = new TreeMap<>(chunksAll); - - for (var e : removedChunks.entrySet()) { - newChunkMap.remove(e.getKey()); -// em.remove(em.getReference(ChunkData.class, e.getValue())); - } - newChunkMap.putAll(newChunks); - - file.setChunks(newChunkMap); - - updateFileSize(file); - return true; - }); - } - - public void updateFileSize(File file) { - long realSize = 0; - - var last = file.getChunks().lastEntry(); - if (last != null) { - var lastSize = readChunk(last.getValue()).size(); - realSize = last.getKey() + lastSize; - } - - if (realSize != file.getSize()) { - file.setSize(realSize); - } - } - - public Long size(String uuid) { - return txm.run(() -> { - var file = curTx.getObject(File.class, new JObjectKey(uuid)).orElse(null); - if (file == null) { - Log.error("File not found when trying to read: " + uuid); - return -1L; - } - return file.getSize(); - }); - } -} diff --git a/dhfs-parent/crapfs/src/main/java/org/acme/fuse/DhfsFuse.java b/dhfs-parent/crapfs/src/main/java/org/acme/fuse/DhfsFuse.java deleted file mode 100644 index cae0662d..00000000 --- a/dhfs-parent/crapfs/src/main/java/org/acme/fuse/DhfsFuse.java +++ /dev/null @@ -1,260 +0,0 @@ -package org.acme.fuse; - -import com.google.protobuf.UnsafeByteOperations; -import com.sun.security.auth.module.UnixSystem; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import io.quarkus.logging.Log; -import io.quarkus.runtime.ShutdownEvent; -import io.quarkus.runtime.StartupEvent; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import jakarta.inject.Inject; -import jnr.ffi.Pointer; -import org.acme.files.service.DhfsFileService; -import org.eclipse.microprofile.config.inject.ConfigProperty; -import ru.serce.jnrfuse.ErrorCodes; -import ru.serce.jnrfuse.FuseFillDir; -import ru.serce.jnrfuse.FuseStubFS; -import ru.serce.jnrfuse.struct.FileStat; -import ru.serce.jnrfuse.struct.FuseFileInfo; -import ru.serce.jnrfuse.struct.Statvfs; - -import java.nio.file.Paths; -import java.util.ArrayList; - -import static jnr.posix.FileStat.S_IFREG; - -@ApplicationScoped -public class DhfsFuse extends FuseStubFS { - private static final int blksize = 1048576; - private static final int iosize = 1048576; - - @ConfigProperty(name = "dhfs.fuse.root") - String root; - - @ConfigProperty(name = "dhfs.files.target_chunk_size") - int targetChunkSize; - - @Inject - DhfsFileService fileService; - - ClassLoader classLoader; - - void init(@Observes @Priority(100000) StartupEvent event) { - classLoader = Thread.currentThread().getContextClassLoader(); - - Paths.get(root).toFile().mkdirs(); - Log.info("Mounting with root " + root); - - var uid = new UnixSystem().getUid(); - var gid = new UnixSystem().getGid(); - - var opts = new ArrayList(); - - // Assuming macFuse -// if (SystemUtils.IS_OS_MAC) { - opts.add("-o"); - opts.add("iosize=" + iosize); -// } else if (SystemUtils.IS_OS_LINUX) { -// // FIXME: There's something else missing: the writes still seem to be 32k max -//// opts.add("-o"); -//// opts.add("large_read"); -// opts.add("-o"); -// opts.add("big_writes"); -// opts.add("-o"); -// opts.add("max_read=" + iosize); -// opts.add("-o"); -// opts.add("max_write=" + iosize); -// } - opts.add("-o"); - opts.add("auto_cache"); - opts.add("-o"); - opts.add("uid=" + uid); - opts.add("-o"); - opts.add("gid=" + gid); - - mount(Paths.get(root), false, false, opts.toArray(String[]::new)); - } - - void shutdown(@Observes @Priority(1) ShutdownEvent event) { - Log.info("Unmounting"); - umount(); - Log.info("Unmounted"); - } - - @Override - public int statfs(String path, Statvfs stbuf) { - Log.info("statfs " + path); - try { - stbuf.f_frsize.set(blksize); - stbuf.f_bsize.set(blksize); - stbuf.f_blocks.set(1024 * 1024); // total data blocks in file system - stbuf.f_bfree.set(1024 * 1024); // free blocks in fs - stbuf.f_bavail.set(1024 * 1024); // avail blocks in fs - stbuf.f_files.set(1000); //FIXME: - stbuf.f_ffree.set(Integer.MAX_VALUE - 2000); //FIXME: - stbuf.f_favail.set(Integer.MAX_VALUE - 2000); //FIXME: - stbuf.f_namemax.set(2048); - return super.statfs(path, stbuf); - } catch (Exception e) { - Log.error("When statfs " + path, e); - return -ErrorCodes.EIO(); - } - } - - @Override - public int getattr(String path, FileStat stat) { - Thread.currentThread().setContextClassLoader(classLoader); - - Log.info("getattr " + path); - - if (path.equals("/")) { - stat.st_mode.set(FileStat.S_IFDIR | 0777); - stat.st_nlink.set(2); - return 0; - } - - try { - var fileOpt = fileService.open(path.substring(1)); - if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); - var uuid = fileOpt.get(); - - stat.st_mode.set(S_IFREG | 0755); - stat.st_nlink.set(1); - stat.st_size.set(fileService.size(uuid)); - // FIXME: Race? - // stat.st_ctim.tv_sec.set(found.get().ctime() / 1000); - // stat.st_ctim.tv_nsec.set((found.get().ctime() % 1000) * 1000); - // stat.st_mtim.tv_sec.set(found.get().mtime() / 1000); - // stat.st_mtim.tv_nsec.set((found.get().mtime() % 1000) * 1000); - // stat.st_atim.tv_sec.set(found.get().mtime() / 1000); - // stat.st_atim.tv_nsec.set((found.get().mtime() % 1000) * 1000); - stat.st_blksize.set(blksize); - } catch (Exception e) { - Log.error("When getattr " + path, e); - return -ErrorCodes.EIO(); - } catch (Throwable e) { - Log.error("When getattr " + path, e); - return -ErrorCodes.EIO(); - } - return 0; - } - - @Override - public int open(String path, FuseFileInfo fi) { - Thread.currentThread().setContextClassLoader(classLoader); - - Log.info("open " + path); - - if (path.equals("/")) return 0; - - try { - if (fileService.open(path.substring(1)).isEmpty()) return -ErrorCodes.ENOENT(); - return 0; - } catch (Exception e) { - Log.error("When open " + path, e); - return -ErrorCodes.EIO(); - } - } - - @Override - public int read(String path, Pointer buf, long size, long offset, FuseFileInfo fi) { - Thread.currentThread().setContextClassLoader(classLoader); - Log.info("read " + path + " " + size + " " + offset); - if (size < 0) return -ErrorCodes.EINVAL(); - if (offset < 0) return -ErrorCodes.EINVAL(); - try { - var fileOpt = fileService.open(path.substring(1)); - if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); - var file = fileOpt.get(); - var read = fileService.read(fileOpt.get(), offset, (int) size); - if (read.isEmpty()) return 0; - buf.put(0, read.get().toByteArray(), 0, read.get().size()); - return read.get().size(); - } catch (Exception e) { - Log.error("When reading " + path, e); - return -ErrorCodes.EIO(); - } - } - - @Override - public int write(String path, Pointer buf, long size, long offset, FuseFileInfo fi) { - Thread.currentThread().setContextClassLoader(classLoader); - Log.info("write " + path + " " + size + " " + offset); - if (offset < 0) return -ErrorCodes.EINVAL(); - try { - var fileOpt = fileService.open(path.substring(1)); - if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); - var buffer = new byte[(int) size]; - - buf.get(0, buffer, 0, (int) size); - - var written = fileService.write(fileOpt.get(), offset, UnsafeByteOperations.unsafeWrap(buffer)); - return written.intValue(); - } catch (Exception e) { - Log.error("When writing " + path, e); - return -ErrorCodes.EIO(); - } - } - - @Override - public int truncate(String path, long size) { - if (size < 0) return -ErrorCodes.EINVAL(); - try { - var ok = fileService.truncate(path.substring(1), size); - if (ok) - return 0; - else - return -ErrorCodes.ENOSPC(); - } catch (Exception e) { - Log.error("When truncating " + path, e); - return -ErrorCodes.EIO(); - } - } - - @Override - public int create(String path, long mode, FuseFileInfo fi) { - Thread.currentThread().setContextClassLoader(classLoader); - try { - var ret = fileService.create(path.substring(1)); - if (ret.isEmpty()) return -ErrorCodes.ENOSPC(); - else return 0; - } catch (Exception e) { - Log.error("When creating " + path, e); - return -ErrorCodes.EIO(); - } - } - - @Override - public int readdir(String path, Pointer buf, FuseFillDir filler, long offset, FuseFileInfo fi) { - Thread.currentThread().setContextClassLoader(classLoader); - - Log.info("readdir " + path); - - try { - Iterable found; - try { - found = fileService.readdir(path.substring(1)); - } catch (StatusRuntimeException e) { - if (e.getStatus().getCode().equals(Status.NOT_FOUND.getCode())) - return -ErrorCodes.ENOENT(); - else throw e; - } - - filler.apply(buf, ".", null, 0); - filler.apply(buf, "..", null, 0); - - for (var c : found) { - filler.apply(buf, c, null, 0); - } - - return 0; - } catch (Exception e) { - Log.error("When readdir " + path, e); - return -ErrorCodes.EIO(); - } - } - -} diff --git a/dhfs-parent/crapfs/src/main/resources/application.properties b/dhfs-parent/crapfs/src/main/resources/application.properties deleted file mode 100644 index 4181868b..00000000 --- a/dhfs-parent/crapfs/src/main/resources/application.properties +++ /dev/null @@ -1,21 +0,0 @@ -quarkus.grpc.server.use-separate-server=false -dhfs.objects.reconnect_interval=5s -dhfs.objects.write_log=false -dhfs.fuse.root=${HOME}/dhfs_default/fuse -dhfs.files.target_chunk_size=262144 -# Writes strictly smaller than this will try to merge with blocks nearby -dhfs.files.write_merge_threshold=0.8 -# If a merge would result in a block of greater size than this, stop merging -dhfs.files.write_merge_limit=1.2 -# Don't take blocks of this size and above when merging -dhfs.files.write_merge_max_chunk_to_take=1 -dhfs.files.write_last_chunk_limit=1.5 -quarkus.log.category."com.usatiuk.dhfs".min-level=INFO -quarkus.log.category."com.usatiuk.dhfs".level=INFO -quarkus.http.insecure-requests=enabled -quarkus.http.ssl.client-auth=required -dhfs.objects.persistence.files.root=${HOME}/dhfs_default/dhfsdb -quarkus.hibernate-orm.database.generation=drop-and-create -quarkus.datasource.jdbc.url=jdbc:h2:file:${HOME}/dhfs_default/dhfsdb -quarkus.datasource.db-kind=h2 -quarkus.hibernate-orm.cache."org.acme.files.objects.ChunkData".memory.object-count=500 \ No newline at end of file diff --git a/dhfs-parent/crapfs/src/main/resources/import.sql b/dhfs-parent/crapfs/src/main/resources/import.sql deleted file mode 100644 index 16aa5235..00000000 --- a/dhfs-parent/crapfs/src/main/resources/import.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file allow to write SQL commands that will be emitted in test and dev. --- The commands are commented as their support depends of the database --- insert into myentity (id, field) values(1, 'field-1'); --- insert into myentity (id, field) values(2, 'field-2'); --- insert into myentity (id, field) values(3, 'field-3'); --- alter sequence myentity_seq restart with 4; \ No newline at end of file diff --git a/dhfs-parent/pom.xml b/dhfs-parent/pom.xml index c0f48f4d..a54dda6e 100644 --- a/dhfs-parent/pom.xml +++ b/dhfs-parent/pom.xml @@ -19,7 +19,6 @@ utils objects-alloc objects-common - crapfs From 6da2e43cee1aa7550d3451ba08277d40de960a7f Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sat, 28 Dec 2024 18:40:21 +0100 Subject: [PATCH 018/105] somewhat working fs --- .../com/usatiuk/kleppmanntree/LogEffect.java | 4 +- .../usatiuk/kleppmanntree/LogEffectOld.java | 5 +- .../com/usatiuk/kleppmanntree/LogRecord.java | 4 +- .../com/usatiuk/kleppmanntree/OpMove.java | 5 +- .../com/usatiuk/kleppmanntree/TreeNode.java | 3 +- .../deployment/ObjectsAllocProcessor.java | 3 +- .../dhfs/objects/TransactionManager.java | 7 + dhfs-parent/server-old/.dockerignore | 5 + dhfs-parent/server-old/.gitignore | 43 + dhfs-parent/server-old/Dockerfile | 2 + dhfs-parent/server-old/docker-compose.yml | 42 + dhfs-parent/server-old/pom.xml | 209 +++++ dhfs-parent/server-old/src/lombok.config | 1 + .../server-old/src/main/docker/Dockerfile.jvm | 97 ++ .../src/main/docker/Dockerfile.legacy-jar | 93 ++ .../src/main/docker/Dockerfile.native | 27 + .../src/main/docker/Dockerfile.native-micro | 30 + .../src/main/java/DeadlockDetector.java | 63 ++ .../src/main/java/com/usatiuk/dhfs/Main.java | 21 + .../com/usatiuk/dhfs/ShutdownChecker.java | 42 + .../conflicts/DirectoryConflictResolver.java | 0 .../files/conflicts/FileConflictResolver.java | 0 .../files/conflicts/NoOpConflictResolver.java | 0 .../usatiuk/dhfs/files/objects/ChunkData.java | 90 ++ .../files/objects/ChunkDataSerializer.java | 0 .../usatiuk/dhfs/files/objects/Directory.java | 0 .../files/objects/DirectorySerializer.java | 0 .../com/usatiuk/dhfs/files/objects/File.java | 51 ++ .../dhfs/files/objects/FileSerializer.java | 0 .../usatiuk/dhfs/files/objects/FsNode.java | 43 + .../dhfs/files/service/DhfsFileService.java | 51 ++ .../files/service/DhfsFileServiceImpl.java | 814 +++++++++++++++++ .../service/DirectoryNotEmptyException.java | 8 + .../dhfs/files/service/FileChunkMutator.java | 0 .../dhfs/files/service/GetattrRes.java | 4 + .../dhfs/files/service/GetattrType.java | 7 + .../java/com/usatiuk/dhfs/fuse/DhfsFuse.java | 391 ++++++++ .../usatiuk/dhfs/fuse/JnrPtrByteOutput.java | 64 ++ .../dhfs/fuse/JnrPtrByteOutputAccessors.java | 24 + .../jkleppmanntree/JKleppmannTreeManager.java | 566 ++++++++++++ .../JKleppmannTreeNodeWrapper.java | 71 ++ .../JKleppmannTreeOpWrapper.java | 30 + .../JKleppmannTreePeerInterface.java | 25 + .../JKleppmannTreePeriodicPushOp.java | 25 + .../JKleppmannTreeLogEffectSerializer.java | 0 .../JKleppmannTreeNodeProtoSerializer.java | 0 .../JKleppmannTreeOpProtoSerializer.java | 0 ...mannTreePeriodicPushOpProtoSerializer.java | 0 ...mannTreePersistentDataProtoSerializer.java | 0 .../structs/JKleppmannTreeNode.java | 45 + .../structs/JKleppmannTreeNodeMeta.java | 31 + .../JKleppmannTreeNodeMetaDirectory.java | 16 + .../structs/JKleppmannTreeNodeMetaFile.java | 37 + .../structs/JKleppmannTreePersistentData.java | 88 ++ .../objects/jrepository/AssumedUnique.java | 0 .../DeletedObjectAccessException.java | 0 .../dhfs/objects/jrepository/JMutator.java | 0 .../dhfs/objects/jrepository/JObject.java | 0 .../dhfs/objects/jrepository/JObjectData.java | 0 .../dhfs/objects/jrepository/JObjectKey.java | 0 .../dhfs/objects/jrepository/JObjectLRU.java | 0 .../objects/jrepository/JObjectManager.java | 0 .../jrepository/JObjectManagerImpl.java | 0 .../jrepository/JObjectRefProcessor.java | 0 .../objects/jrepository/JObjectSnapshot.java | 0 .../objects/jrepository/JObjectTxManager.java | 0 .../dhfs/objects/jrepository/Leaf.java | 0 .../objects/jrepository/ObjectMetadata.java | 0 .../jrepository/ObjectMetadataSerializer.java | 0 .../dhfs/objects/jrepository/OnlyLocal.java | 0 .../objects/jrepository/PushResolution.java | 0 .../dhfs/objects/jrepository/SoftJObject.java | 0 .../jrepository/SoftJObjectFactory.java | 0 .../dhfs/objects/jrepository/TxBundle.java | 0 .../dhfs/objects/jrepository/TxWriteback.java | 0 .../objects/jrepository/TxWritebackImpl.java | 0 .../objects/repository/CertificateTools.java | 0 .../objects/repository/ConflictResolver.java | 0 .../dhfs/objects/repository/PeerManager.java | 0 .../repository/PersistentPeerDataService.java | 0 .../repository/PersistentRemoteHosts.java | 0 .../repository/PersistentRemoteHostsData.java | 0 .../repository/RemoteObjectServiceClient.java | 0 .../repository/RemoteObjectServiceServer.java | 0 .../objects/repository/RpcChannelFactory.java | 0 .../objects/repository/RpcClientFactory.java | 0 .../dhfs/objects/repository/SyncHandler.java | 0 .../repository/TransientPeerState.java | 0 .../repository/TransientPeersState.java | 0 .../repository/TransientPeersStateData.java | 0 .../autosync/AutoSyncProcessor.java | 0 .../DeferredInvalidationQueueData.java | 0 .../DeferredInvalidationQueueService.java | 0 .../invalidation/InvalidationQueue.java | 0 .../InvalidationQueueService.java | 0 .../dhfs/objects/repository/opsupport/Op.java | 0 .../repository/opsupport/OpObject.java | 0 .../opsupport/OpObjectRegistry.java | 0 .../repository/opsupport/OpSender.java | 0 .../LocalPeerDiscoveryBroadcaster.java | 0 .../LocalPeerDiscoveryClient.java | 0 .../repository/peersync/PeerDirectory.java | 0 .../PeerDirectoryConflictResolver.java | 0 .../peersync/PeerDirectoryLocal.java | 0 .../PeerDirectoryLocalSerializer.java | 0 .../peersync/PeerDirectorySerializer.java | 0 .../objects/repository/peersync/PeerInfo.java | 0 .../repository/peersync/PeerSyncApi.java | 0 .../peersync/PeerSyncApiClient.java | 0 .../peersync/PeerSyncApiClientDynamic.java | 0 .../peersync/PersistentPeerInfo.java | 0 .../PersistentPeerInfoSerializer.java | 0 .../peertrust/PeerRolesAugmentor.java | 0 .../peertrust/PeerTrustManager.java | 0 .../peertrust/PeerTrustServerCustomizer.java | 0 .../FileObjectPersistentStore.java | 0 .../persistence/ObjectPersistentStore.java | 0 .../repository/persistence/TxManifest.java | 0 .../repository/webapi/AvailablePeerInfo.java | 0 .../repository/webapi/KnownPeerDelete.java | 0 .../repository/webapi/KnownPeerInfo.java | 0 .../repository/webapi/KnownPeerPut.java | 0 .../repository/webapi/ManagementApi.java | 0 .../com/usatiuk/dhfs/webui/WebUiRouter.java | 54 ++ .../proto/dhfs_objects_peer_discovery.proto | 13 + .../src/main/proto/dhfs_objects_serial.proto | 155 ++++ .../src/main/proto/dhfs_objects_sync.proto | 102 +++ .../src/main/resources/application.properties | 46 + .../com/usatiuk/dhfs/TempDataProfile.java | 29 + .../com/usatiuk/dhfs/TestDataCleaner.java | 44 + .../usatiuk/dhfs/benchmarks/Benchmarker.java | 83 ++ .../benchmarks/DhfsFileBenchmarkTest.java | 52 ++ .../dhfs/files/DhfsFileServiceSimpleTest.java | 9 + .../files/DhfsFileServiceSimpleTestImpl.java | 288 ++++++ ...fsFileServiceSimpleTestNoChunkingTest.java | 9 + ...ileServiceSimpleTestSmallChunkingTest.java | 9 + .../com/usatiuk/dhfs/fuse/DhfsFuseTest.java | 77 ++ .../usatiuk/dhfs/integration/DhfsFuseIT.java | 352 ++++++++ .../dhfs/integration/DhfsFusex3IT.java | 293 ++++++ .../usatiuk/dhfs/integration/DhfsImage.java | 93 ++ .../usatiuk/dhfs/integration/ResyncIT.java | 135 +++ .../FileObjectPersistentStoreTest.java | 95 ++ .../persistence/ProtoSerializationTest.java | 24 + .../src/test/resources/application.properties | 11 + .../usatiuk/dhfs/files/objects/ChunkData.java | 88 +- .../com/usatiuk/dhfs/files/objects/File.java | 50 +- .../usatiuk/dhfs/files/objects/FsNode.java | 44 +- .../dhfs/files/service/DhfsFileService.java | 32 +- .../files/service/DhfsFileServiceImpl.java | 836 ++++++++---------- .../java/com/usatiuk/dhfs/fuse/DhfsFuse.java | 10 +- .../jkleppmanntree/JKleppmannTreeManager.java | 652 ++++++-------- .../JKleppmannTreeNodeWrapper.java | 44 +- .../JKleppmannTreeOpWrapper.java | 25 +- .../JKleppmannTreePeerInterface.java | 10 +- .../JKleppmannTreePeriodicPushOp.java | 11 +- .../structs/JKleppmannTreeNode.java | 42 +- .../structs/JKleppmannTreeNodeMetaFile.java | 5 +- .../structs/JKleppmannTreePersistentData.java | 93 +- 158 files changed, 5884 insertions(+), 1218 deletions(-) create mode 100644 dhfs-parent/server-old/.dockerignore create mode 100644 dhfs-parent/server-old/.gitignore create mode 100644 dhfs-parent/server-old/Dockerfile create mode 100644 dhfs-parent/server-old/docker-compose.yml create mode 100644 dhfs-parent/server-old/pom.xml create mode 100644 dhfs-parent/server-old/src/lombok.config create mode 100644 dhfs-parent/server-old/src/main/docker/Dockerfile.jvm create mode 100644 dhfs-parent/server-old/src/main/docker/Dockerfile.legacy-jar create mode 100644 dhfs-parent/server-old/src/main/docker/Dockerfile.native create mode 100644 dhfs-parent/server-old/src/main/docker/Dockerfile.native-micro create mode 100644 dhfs-parent/server-old/src/main/java/DeadlockDetector.java create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/Main.java create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/ShutdownChecker.java rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/files/conflicts/DirectoryConflictResolver.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/files/conflicts/FileConflictResolver.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/files/conflicts/NoOpConflictResolver.java (100%) create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/files/objects/Directory.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/files/objects/DirectorySerializer.java (100%) create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/File.java rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java (100%) create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DirectoryNotEmptyException.java rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/files/service/FileChunkMutator.java (100%) create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrRes.java create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrType.java create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutput.java create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeLogEffectSerializer.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeNodeProtoSerializer.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeOpProtoSerializer.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePeriodicPushOpProtoSerializer.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePersistentDataProtoSerializer.java (100%) create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/AssumedUnique.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/DeletedObjectAccessException.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/JMutator.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObject.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectData.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectKey.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectLRU.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManager.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManagerImpl.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectRefProcessor.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectSnapshot.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectTxManager.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/Leaf.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadata.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadataSerializer.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/OnlyLocal.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/PushResolution.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObject.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObjectFactory.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxBundle.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWriteback.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWritebackImpl.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/ConflictResolver.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHosts.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/RpcChannelFactory.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeerState.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersState.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersStateData.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/autosync/AutoSyncProcessor.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueue.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/Op.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObject.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObjectRegistry.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpSender.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryBroadcaster.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryClient.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectory.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryConflictResolver.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocal.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocalSerializer.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectorySerializer.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApi.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClient.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClientDynamic.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfo.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfoSerializer.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerRolesAugmentor.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustManager.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustServerCustomizer.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/FileObjectPersistentStore.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/ObjectPersistentStore.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/TxManifest.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/AvailablePeerInfo.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerDelete.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerInfo.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerPut.java (100%) rename dhfs-parent/{server => server-old}/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java (100%) create mode 100644 dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/webui/WebUiRouter.java create mode 100644 dhfs-parent/server-old/src/main/proto/dhfs_objects_peer_discovery.proto create mode 100644 dhfs-parent/server-old/src/main/proto/dhfs_objects_serial.proto create mode 100644 dhfs-parent/server-old/src/main/proto/dhfs_objects_sync.proto create mode 100644 dhfs-parent/server-old/src/main/resources/application.properties create mode 100644 dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/TempDataProfile.java create mode 100644 dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/TestDataCleaner.java create mode 100644 dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/benchmarks/Benchmarker.java create mode 100644 dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java create mode 100644 dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTest.java create mode 100644 dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java create mode 100644 dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestNoChunkingTest.java create mode 100644 dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestSmallChunkingTest.java create mode 100644 dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/fuse/DhfsFuseTest.java create mode 100644 dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsFuseIT.java create mode 100644 dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsFusex3IT.java create mode 100644 dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java create mode 100644 dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/ResyncIT.java create mode 100644 dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/persistence/FileObjectPersistentStoreTest.java create mode 100644 dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/persistence/ProtoSerializationTest.java create mode 100644 dhfs-parent/server-old/src/test/resources/application.properties diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogEffect.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogEffect.java index 5cd564b7..0fe9a95f 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogEffect.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogEffect.java @@ -1,9 +1,11 @@ package com.usatiuk.kleppmanntree; +import java.io.Serializable; + public record LogEffect, PeerIdT extends Comparable, MetaT extends NodeMeta, NodeIdT>( LogEffectOld oldInfo, OpMove effectiveOp, NodeIdT newParentId, MetaT newMeta, - NodeIdT childId) { + NodeIdT childId) implements Serializable { } diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogEffectOld.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogEffectOld.java index ec3f2662..c1c0a477 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogEffectOld.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogEffectOld.java @@ -1,6 +1,9 @@ package com.usatiuk.kleppmanntree; +import java.io.Serializable; + public record LogEffectOld, PeerIdT extends Comparable, MetaT extends NodeMeta, NodeIdT> (OpMove oldEffectiveMove, NodeIdT oldParent, - MetaT oldMeta) {} + MetaT oldMeta) implements Serializable { +} diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogRecord.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogRecord.java index b9a7b9da..2fb036c4 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogRecord.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/LogRecord.java @@ -1,7 +1,9 @@ package com.usatiuk.kleppmanntree; +import java.io.Serializable; import java.util.List; public record LogRecord, PeerIdT extends Comparable, MetaT extends NodeMeta, NodeIdT> (OpMove op, - List> effects) {} + List> effects) implements Serializable { +} diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/OpMove.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/OpMove.java index e9c19562..85b7f383 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/OpMove.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/OpMove.java @@ -1,5 +1,8 @@ package com.usatiuk.kleppmanntree; +import java.io.Serializable; + public record OpMove, PeerIdT extends Comparable, MetaT extends NodeMeta, NodeIdT> (CombinedTimestamp timestamp, NodeIdT newParentId, MetaT newMeta, - NodeIdT childId) {} + NodeIdT childId) implements Serializable { +} diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNode.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNode.java index a2b1577f..4eaad710 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNode.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNode.java @@ -3,12 +3,13 @@ package com.usatiuk.kleppmanntree; import lombok.Getter; import lombok.Setter; +import java.io.Serializable; import java.util.HashMap; import java.util.Map; @Getter @Setter -public class TreeNode, PeerIdT extends Comparable, MetaT extends NodeMeta, NodeIdT> { +public class TreeNode, PeerIdT extends Comparable, MetaT extends NodeMeta, NodeIdT> implements Serializable { private final NodeIdT _id; private NodeIdT _parent = null; private OpMove _lastEffectiveOp = null; diff --git a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java index 5a339234..36ebea42 100644 --- a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java +++ b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java @@ -154,7 +154,7 @@ class ObjectsAllocProcessor { try (var constructor = classCreator.getConstructorCreator(item.klass.name().toString(), long.class.getName())) { constructor.invokeSpecialMethod(MethodDescriptor.ofConstructor(Object.class), constructor.getThis()); - constructor.writeInstanceField(modified.getFieldDescriptor(), constructor.getThis(), constructor.load(false)); + constructor.writeInstanceField(modified.getFieldDescriptor(), constructor.getThis(), constructor.load(true)); // FIXME: for (var field : fieldsMap.values()) { if (!Objects.equals(field.getName(), VERSION_NAME)) constructor.writeInstanceField(field, constructor.getThis(), constructor.invokeInterfaceMethod( @@ -239,6 +239,7 @@ class ObjectsAllocProcessor { Map collectMethods(List types) { return types.stream() .flatMap(x -> x.methods().stream()) + .filter(x -> x.name().startsWith("get") || x.name().startsWith("set")) .collect(Collectors.toMap(MethodInfo::name, x -> x)); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java index 8e9c061a..f50260ab 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java @@ -44,6 +44,13 @@ public interface TransactionManager { } } + default void executeTx(VoidFn fn) { + run(fn); + } + + default T executeTx(Supplier supplier) { + return run(supplier); + } Transaction current(); } diff --git a/dhfs-parent/server-old/.dockerignore b/dhfs-parent/server-old/.dockerignore new file mode 100644 index 00000000..94810d00 --- /dev/null +++ b/dhfs-parent/server-old/.dockerignore @@ -0,0 +1,5 @@ +* +!target/*-runner +!target/*-runner.jar +!target/lib/* +!target/quarkus-app/* \ No newline at end of file diff --git a/dhfs-parent/server-old/.gitignore b/dhfs-parent/server-old/.gitignore new file mode 100644 index 00000000..8c7863e7 --- /dev/null +++ b/dhfs-parent/server-old/.gitignore @@ -0,0 +1,43 @@ +#Maven +target/ +pom.xml.tag +pom.xml.releaseBackup +pom.xml.versionsBackup +release.properties +.flattened-pom.xml + +# Eclipse +.project +.classpath +.settings/ +bin/ + +# IntelliJ +.idea +*.ipr +*.iml +*.iws + +# NetBeans +nb-configuration.xml + +# Visual Studio Code +.vscode +.factorypath + +# OSX +.DS_Store + +# Vim +*.swp +*.swo + +# patch +*.orig +*.rej + +# Local environment +.env + +# Plugin directory +/.quarkus/cli/plugins/ diff --git a/dhfs-parent/server-old/Dockerfile b/dhfs-parent/server-old/Dockerfile new file mode 100644 index 00000000..62bace54 --- /dev/null +++ b/dhfs-parent/server-old/Dockerfile @@ -0,0 +1,2 @@ +FROM azul/zulu-openjdk-debian:21-jre-latest +RUN apt update && apt install -y libfuse2 curl \ No newline at end of file diff --git a/dhfs-parent/server-old/docker-compose.yml b/dhfs-parent/server-old/docker-compose.yml new file mode 100644 index 00000000..a6a0aefa --- /dev/null +++ b/dhfs-parent/server-old/docker-compose.yml @@ -0,0 +1,42 @@ +version: "3.2" + +services: + dhfs1: + build: . + privileged: true + devices: + - /dev/fuse + volumes: + - $HOME/dhfs/dhfs1:/dhfs_root + - $HOME/dhfs/dhfs1_f:/dhfs_root/fuse:rshared + - ./target/quarkus-app:/app + command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED + -Ddhfs.objects.persistence.files.root=/dhfs_root/p + -Ddhfs.objects.root=/dhfs_root/d + -Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0 + -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005 + -jar /app/quarkus-run.jar" + ports: + - 8080:8080 + - 8081:8443 + - 5005:5005 + dhfs2: + build: . + privileged: true + devices: + - /dev/fuse + volumes: + - $HOME/dhfs/dhfs2:/dhfs_root + - $HOME/dhfs/dhfs2_f:/dhfs_root/fuse:rshared + - ./target/quarkus-app:/app + command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED + --add-exports java.base/jdk.internal.access=ALL-UNNAMED + -Ddhfs.objects.persistence.files.root=/dhfs_root/p + -Ddhfs.objects.root=/dhfs_root/d + -Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0 + -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5010 + -jar /app/quarkus-run.jar" + ports: + - 8090:8080 + - 8091:8443 + - 5010:5010 diff --git a/dhfs-parent/server-old/pom.xml b/dhfs-parent/server-old/pom.xml new file mode 100644 index 00000000..bb74c72a --- /dev/null +++ b/dhfs-parent/server-old/pom.xml @@ -0,0 +1,209 @@ + + + 4.0.0 + com.usatiuk.dhfs + server + 1.0.0-SNAPSHOT + + + com.usatiuk.dhfs + parent + 1.0-SNAPSHOT + + + + + org.testcontainers + testcontainers + test + + + org.awaitility + awaitility + test + + + com.usatiuk + autoprotomap + 1.0-SNAPSHOT + + + com.usatiuk + autoprotomap-deployment + 1.0-SNAPSHOT + provided + + + org.bouncycastle + bcprov-jdk18on + 1.78.1 + + + org.bouncycastle + bcpkix-jdk18on + 1.78.1 + + + io.quarkus + quarkus-security + + + net.openhft + zero-allocation-hashing + + + io.quarkus + quarkus-grpc + + + io.quarkus + quarkus-arc + + + io.quarkus + quarkus-rest + + + io.quarkus + quarkus-rest-client + + + io.quarkus + quarkus-rest-client-jsonb + + + io.quarkus + quarkus-rest-jsonb + + + io.quarkus + quarkus-scheduler + + + io.quarkus + quarkus-junit5 + test + + + org.projectlombok + lombok + provided + + + com.github.SerCeMan + jnr-fuse + 44ed40f8ce + + + com.github.jnr + jnr-ffi + 2.2.16 + + + com.github.jnr + jnr-posix + 3.1.19 + + + com.github.jnr + jnr-constants + 0.10.4 + + + org.apache.commons + commons-lang3 + + + commons-io + commons-io + + + org.jboss.slf4j + slf4j-jboss-logmanager + test + + + commons-codec + commons-codec + + + org.apache.commons + commons-collections4 + + + org.apache.commons + commons-math3 + 3.6.1 + + + com.usatiuk + kleppmanntree + 1.0-SNAPSHOT + + + com.usatiuk.dhfs + supportlib + 1.0-SNAPSHOT + + + com.usatiuk.dhfs + objects + 1.0-SNAPSHOT + + + com.usatiuk.dhfs + utils + 1.0-SNAPSHOT + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + 1C + false + classes + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + + true + + + concurrent + + + 0.5 + + true + true + + + + + ${quarkus.platform.group-id} + quarkus-maven-plugin + ${quarkus.platform.version} + true + + + quarkus-plugin + + build + generate-code + generate-code-tests + + + + + + + diff --git a/dhfs-parent/server-old/src/lombok.config b/dhfs-parent/server-old/src/lombok.config new file mode 100644 index 00000000..f1c474ce --- /dev/null +++ b/dhfs-parent/server-old/src/lombok.config @@ -0,0 +1 @@ +lombok.accessors.prefix += _ diff --git a/dhfs-parent/server-old/src/main/docker/Dockerfile.jvm b/dhfs-parent/server-old/src/main/docker/Dockerfile.jvm new file mode 100644 index 00000000..b1de5988 --- /dev/null +++ b/dhfs-parent/server-old/src/main/docker/Dockerfile.jvm @@ -0,0 +1,97 @@ +#### +# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode +# +# Before building the container image run: +# +# ./mvnw package +# +# Then, build the image with: +# +# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/server-jvm . +# +# Then run the container using: +# +# docker run -i --rm -p 8080:8080 quarkus/server-jvm +# +# If you want to include the debug port into your docker image +# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005. +# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005 +# when running the container +# +# Then run the container using : +# +# docker run -i --rm -p 8080:8080 quarkus/server-jvm +# +# This image uses the `run-java.sh` script to run the application. +# This scripts computes the command line to execute your Java application, and +# includes memory/GC tuning. +# You can configure the behavior using the following environment properties: +# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") +# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options +# in JAVA_OPTS (example: "-Dsome.property=foo") +# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is +# used to calculate a default maximal heap memory based on a containers restriction. +# If used in a container without any memory constraints for the container then this +# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio +# of the container available memory as set here. The default is `50` which means 50% +# of the available memory is used as an upper boundary. You can skip this mechanism by +# setting this value to `0` in which case no `-Xmx` option is added. +# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This +# is used to calculate a default initial heap memory based on the maximum heap memory. +# If used in a container without any memory constraints for the container then this +# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio +# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` +# is used as the initial heap size. You can skip this mechanism by setting this value +# to `0` in which case no `-Xms` option is added (example: "25") +# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. +# This is used to calculate the maximum value of the initial heap memory. If used in +# a container without any memory constraints for the container then this option has +# no effect. If there is a memory constraint then `-Xms` is limited to the value set +# here. The default is 4096MB which means the calculated value of `-Xms` never will +# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") +# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output +# when things are happening. This option, if set to true, will set +# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). +# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: +# true"). +# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). +# - CONTAINER_CORE_LIMIT: A calculated core limit as described in +# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") +# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). +# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. +# (example: "20") +# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. +# (example: "40") +# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. +# (example: "4") +# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus +# previous GC times. (example: "90") +# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") +# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") +# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should +# contain the necessary JRE command-line options to specify the required GC, which +# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). +# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") +# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") +# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be +# accessed directly. (example: "foo.example.com,bar.example.com") +# +### +FROM registry.access.redhat.com/ubi8/openjdk-21:1.18 + +ENV LANGUAGE='en_US:en' + + +# We make four distinct layers so if there are application changes the library layers can be re-used +COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/ +COPY --chown=185 target/quarkus-app/*.jar /deployments/ +COPY --chown=185 target/quarkus-app/app/ /deployments/app/ +COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/ + +EXPOSE 8080 +USER 185 +ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" +ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" + +ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ] + diff --git a/dhfs-parent/server-old/src/main/docker/Dockerfile.legacy-jar b/dhfs-parent/server-old/src/main/docker/Dockerfile.legacy-jar new file mode 100644 index 00000000..f66a1665 --- /dev/null +++ b/dhfs-parent/server-old/src/main/docker/Dockerfile.legacy-jar @@ -0,0 +1,93 @@ +#### +# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode +# +# Before building the container image run: +# +# ./mvnw package -Dquarkus.package.jar.type=legacy-jar +# +# Then, build the image with: +# +# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/server-legacy-jar . +# +# Then run the container using: +# +# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar +# +# If you want to include the debug port into your docker image +# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005. +# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005 +# when running the container +# +# Then run the container using : +# +# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar +# +# This image uses the `run-java.sh` script to run the application. +# This scripts computes the command line to execute your Java application, and +# includes memory/GC tuning. +# You can configure the behavior using the following environment properties: +# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") +# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options +# in JAVA_OPTS (example: "-Dsome.property=foo") +# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is +# used to calculate a default maximal heap memory based on a containers restriction. +# If used in a container without any memory constraints for the container then this +# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio +# of the container available memory as set here. The default is `50` which means 50% +# of the available memory is used as an upper boundary. You can skip this mechanism by +# setting this value to `0` in which case no `-Xmx` option is added. +# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This +# is used to calculate a default initial heap memory based on the maximum heap memory. +# If used in a container without any memory constraints for the container then this +# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio +# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` +# is used as the initial heap size. You can skip this mechanism by setting this value +# to `0` in which case no `-Xms` option is added (example: "25") +# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. +# This is used to calculate the maximum value of the initial heap memory. If used in +# a container without any memory constraints for the container then this option has +# no effect. If there is a memory constraint then `-Xms` is limited to the value set +# here. The default is 4096MB which means the calculated value of `-Xms` never will +# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") +# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output +# when things are happening. This option, if set to true, will set +# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). +# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: +# true"). +# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). +# - CONTAINER_CORE_LIMIT: A calculated core limit as described in +# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") +# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). +# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. +# (example: "20") +# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. +# (example: "40") +# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. +# (example: "4") +# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus +# previous GC times. (example: "90") +# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") +# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") +# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should +# contain the necessary JRE command-line options to specify the required GC, which +# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). +# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") +# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") +# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be +# accessed directly. (example: "foo.example.com,bar.example.com") +# +### +FROM registry.access.redhat.com/ubi8/openjdk-21:1.18 + +ENV LANGUAGE='en_US:en' + + +COPY target/lib/* /deployments/lib/ +COPY target/*-runner.jar /deployments/quarkus-run.jar + +EXPOSE 8080 +USER 185 +ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" +ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" + +ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ] diff --git a/dhfs-parent/server-old/src/main/docker/Dockerfile.native b/dhfs-parent/server-old/src/main/docker/Dockerfile.native new file mode 100644 index 00000000..226e7c71 --- /dev/null +++ b/dhfs-parent/server-old/src/main/docker/Dockerfile.native @@ -0,0 +1,27 @@ +#### +# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. +# +# Before building the container image run: +# +# ./mvnw package -Dnative +# +# Then, build the image with: +# +# docker build -f src/main/docker/Dockerfile.native -t quarkus/server . +# +# Then run the container using: +# +# docker run -i --rm -p 8080:8080 quarkus/server +# +### +FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9 +WORKDIR /work/ +RUN chown 1001 /work \ + && chmod "g+rwX" /work \ + && chown 1001:root /work +COPY --chown=1001:root target/*-runner /work/application + +EXPOSE 8080 +USER 1001 + +ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/dhfs-parent/server-old/src/main/docker/Dockerfile.native-micro b/dhfs-parent/server-old/src/main/docker/Dockerfile.native-micro new file mode 100644 index 00000000..4bd4c6de --- /dev/null +++ b/dhfs-parent/server-old/src/main/docker/Dockerfile.native-micro @@ -0,0 +1,30 @@ +#### +# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. +# It uses a micro base image, tuned for Quarkus native executables. +# It reduces the size of the resulting container image. +# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image. +# +# Before building the container image run: +# +# ./mvnw package -Dnative +# +# Then, build the image with: +# +# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/server . +# +# Then run the container using: +# +# docker run -i --rm -p 8080:8080 quarkus/server +# +### +FROM quay.io/quarkus/quarkus-micro-image:2.0 +WORKDIR /work/ +RUN chown 1001 /work \ + && chmod "g+rwX" /work \ + && chown 1001:root /work +COPY --chown=1001:root target/*-runner /work/application + +EXPOSE 8080 +USER 1001 + +ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/dhfs-parent/server-old/src/main/java/DeadlockDetector.java b/dhfs-parent/server-old/src/main/java/DeadlockDetector.java new file mode 100644 index 00000000..7b275098 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/DeadlockDetector.java @@ -0,0 +1,63 @@ +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; + +import java.lang.management.ManagementFactory; +import java.lang.management.ThreadInfo; +import java.lang.management.ThreadMXBean; +import java.util.Arrays; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +@ApplicationScoped +public class DeadlockDetector { + private final ExecutorService _executor = Executors.newSingleThreadExecutor(); + + void init(@Observes @Priority(1) StartupEvent event) { + _executor.submit(this::run); + } + + void shutdown(@Observes @Priority(100000) ShutdownEvent event) { + _executor.shutdownNow(); + } + + private void run() { + ThreadMXBean bean = ManagementFactory.getThreadMXBean(); + try { + while (!Thread.interrupted()) { + Thread.sleep(4000); + + long[] threadIds = bean.findDeadlockedThreads(); // Returns null if no threads are deadlocked. + + if (threadIds != null) { + ThreadInfo[] infos = bean.getThreadInfo(threadIds, Integer.MAX_VALUE); + + StringBuilder sb = new StringBuilder(); + + sb.append("Deadlock detected!\n"); + + for (ThreadInfo info : infos) { + StackTraceElement[] stack = info.getStackTrace(); + sb.append(info.getThreadName()).append("\n"); + sb.append("getLockedMonitors: ").append(Arrays.toString(info.getLockedMonitors())).append("\n"); + sb.append("getLockedSynchronizers: ").append(Arrays.toString(info.getLockedSynchronizers())).append("\n"); + sb.append("waiting on: ").append(info.getLockInfo()).append("\n"); + sb.append("locked by: ").append(info.getLockOwnerName()).append("\n"); + sb.append("Stack trace:\n"); + for (var e : stack) { + sb.append(e.toString()).append("\n"); + } + sb.append("==="); + } + + Log.error(sb); + } + } + } catch (InterruptedException e) { + } + Log.info("Deadlock detector thread exiting"); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/Main.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/Main.java new file mode 100644 index 00000000..69e488c0 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/Main.java @@ -0,0 +1,21 @@ +package com.usatiuk.dhfs; + +import io.quarkus.runtime.Quarkus; +import io.quarkus.runtime.QuarkusApplication; +import io.quarkus.runtime.annotations.QuarkusMain; + +@QuarkusMain +public class Main { + public static void main(String... args) { + Quarkus.run(DhfsStorageServerApp.class, args); + } + + public static class DhfsStorageServerApp implements QuarkusApplication { + + @Override + public int run(String... args) throws Exception { + Quarkus.waitForExit(); + return 0; + } + } +} \ No newline at end of file diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/ShutdownChecker.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/ShutdownChecker.java new file mode 100644 index 00000000..dcd379a8 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/ShutdownChecker.java @@ -0,0 +1,42 @@ +package com.usatiuk.dhfs; + +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.io.IOException; +import java.nio.file.Paths; + +@ApplicationScoped +public class ShutdownChecker { + private static final String dataFileName = "running"; + @ConfigProperty(name = "dhfs.objects.root") + String dataRoot; + boolean _cleanShutdown = true; + boolean _initialized = false; + + void init(@Observes @Priority(2) StartupEvent event) throws IOException { + Paths.get(dataRoot).toFile().mkdirs(); + Log.info("Initializing with root " + dataRoot); + if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) { + _cleanShutdown = false; + Log.error("Unclean shutdown detected!"); + } else { + Paths.get(dataRoot).resolve(dataFileName).toFile().createNewFile(); + } + _initialized = true; + } + + void shutdown(@Observes @Priority(100000) ShutdownEvent event) throws IOException { + Paths.get(dataRoot).resolve(dataFileName).toFile().delete(); + } + + public boolean lastShutdownClean() { + if (!_initialized) throw new IllegalStateException("ShutdownChecker not initialized"); + return _cleanShutdown; + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/conflicts/DirectoryConflictResolver.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/DirectoryConflictResolver.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/conflicts/DirectoryConflictResolver.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/DirectoryConflictResolver.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/conflicts/FileConflictResolver.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/FileConflictResolver.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/conflicts/FileConflictResolver.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/FileConflictResolver.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/conflicts/NoOpConflictResolver.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/NoOpConflictResolver.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/conflicts/NoOpConflictResolver.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/NoOpConflictResolver.java diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java new file mode 100644 index 00000000..46f8e283 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java @@ -0,0 +1,90 @@ +package com.usatiuk.dhfs.files.objects; + +import com.google.protobuf.ByteString; +import com.usatiuk.dhfs.files.conflicts.NoOpConflictResolver; +import com.usatiuk.dhfs.objects.jrepository.AssumedUnique; +import com.usatiuk.dhfs.objects.jrepository.JObjectData; +import com.usatiuk.dhfs.objects.jrepository.Leaf; +import com.usatiuk.dhfs.objects.persistence.ChunkDataP; +import com.usatiuk.dhfs.objects.repository.ConflictResolver; +import net.openhft.hashing.LongTupleHashFunction; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +@AssumedUnique +@Leaf +public class ChunkData extends JObjectData { + final ChunkDataP _data; + + public ChunkData(ByteString bytes) { + super(); + _data = ChunkDataP.newBuilder() + .setData(bytes) + // TODO: There might be (most definitely) a copy there + .setName(Arrays.stream(LongTupleHashFunction.xx128().hashBytes(bytes.asReadOnlyByteBuffer())) + .mapToObj(Long::toHexString).collect(Collectors.joining())) + .build(); + } + + public ChunkData(ByteString bytes, String name) { + super(); + _data = ChunkDataP.newBuilder() + .setData(bytes) + .setName(name) + .build(); + } + + public ChunkData(ChunkDataP chunkDataP) { + super(); + _data = chunkDataP; + } + + ChunkDataP getData() { + return _data; + } + + public ByteString getBytes() { + return _data.getData(); + } + + public int getSize() { + return _data.getData().size(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ChunkData chunkData = (ChunkData) o; + return Objects.equals(getName(), chunkData.getName()); + } + + @Override + public int hashCode() { + return Objects.hashCode(getName()); + } + + @Override + public String getName() { + return _data.getName(); + } + + @Override + public Class getConflictResolver() { + return NoOpConflictResolver.class; + } + + @Override + public Collection extractRefs() { + return List.of(); + } + + @Override + public int estimateSize() { + return _data.getData().size(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/Directory.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/Directory.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/Directory.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/Directory.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/DirectorySerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/DirectorySerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/DirectorySerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/DirectorySerializer.java diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/File.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/File.java new file mode 100644 index 00000000..0c6fa4e8 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/File.java @@ -0,0 +1,51 @@ +package com.usatiuk.dhfs.files.objects; + +import com.usatiuk.dhfs.files.conflicts.FileConflictResolver; +import com.usatiuk.dhfs.objects.jrepository.JObjectData; +import com.usatiuk.dhfs.objects.repository.ConflictResolver; +import lombok.Getter; +import lombok.Setter; + +import java.util.*; + +public class File extends FsNode { + @Getter + private final NavigableMap _chunks; + @Getter + private final boolean _symlink; + @Getter + @Setter + private long _size = 0; + + public File(UUID uuid, long mode, boolean symlink) { + super(uuid, mode); + _symlink = symlink; + _chunks = new TreeMap<>(); + } + + public File(UUID uuid, long mode, boolean symlink, NavigableMap chunks) { + super(uuid, mode); + _symlink = symlink; + _chunks = chunks; + } + + @Override + public Class getConflictResolver() { + return FileConflictResolver.class; + } + + @Override + public Class getRefType() { + return ChunkData.class; + } + + @Override + public Collection extractRefs() { + return Collections.unmodifiableCollection(_chunks.values()); + } + + @Override + public int estimateSize() { + return _chunks.size() * 192; + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java new file mode 100644 index 00000000..a6e6ac14 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java @@ -0,0 +1,43 @@ +package com.usatiuk.dhfs.files.objects; + +import com.usatiuk.dhfs.objects.jrepository.JObjectData; +import lombok.Getter; +import lombok.Setter; + +import java.io.Serial; +import java.util.UUID; + +public abstract class FsNode extends JObjectData { + @Serial + private static final long serialVersionUID = 1; + + @Getter + final UUID _uuid; + @Getter + @Setter + private long _mode; + @Getter + @Setter + private long _ctime; + @Getter + @Setter + private long _mtime; + + protected FsNode(UUID uuid) { + this._uuid = uuid; + this._ctime = System.currentTimeMillis(); + this._mtime = this._ctime; + } + + protected FsNode(UUID uuid, long mode) { + this._uuid = uuid; + this._mode = mode; + this._ctime = System.currentTimeMillis(); + this._mtime = this._ctime; + } + + @Override + public String getName() { + return _uuid.toString(); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java new file mode 100644 index 00000000..58678dd2 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java @@ -0,0 +1,51 @@ +package com.usatiuk.dhfs.files.service; + +import com.google.protobuf.ByteString; +import com.google.protobuf.UnsafeByteOperations; +import com.usatiuk.dhfs.files.objects.File; +import com.usatiuk.dhfs.objects.jrepository.JObject; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.Optional; + +public interface DhfsFileService { + Optional open(String name); + + Optional create(String name, long mode); + + Pair inoToParent(String ino); + + void mkdir(String name, long mode); + + Optional getattr(String name); + + Boolean chmod(String name, long mode); + + void unlink(String name); + + Boolean rename(String from, String to); + + Boolean setTimes(String fileUuid, long atimeMs, long mtimeMs); + + Iterable readDir(String name); + + void updateFileSize(JObject file); + + Long size(String f); + + Optional read(String fileUuid, long offset, int length); + + Long write(String fileUuid, long offset, ByteString data); + + default Long write(String fileUuid, long offset, byte[] data) { + return write(fileUuid, offset, UnsafeByteOperations.unsafeWrap(data)); + } + + Boolean truncate(String fileUuid, long length); + + String readlink(String uuid); + + ByteString readlinkBS(String uuid); + + String symlink(String oldpath, String newpath); +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java new file mode 100644 index 00000000..33b30d85 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java @@ -0,0 +1,814 @@ +package com.usatiuk.dhfs.files.service; + +import com.google.protobuf.ByteString; +import com.google.protobuf.UnsafeByteOperations; +import com.usatiuk.dhfs.files.objects.ChunkData; +import com.usatiuk.dhfs.files.objects.File; +import com.usatiuk.dhfs.files.objects.FsNode; +import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeManager; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaDirectory; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile; +import com.usatiuk.dhfs.objects.jrepository.JMutator; +import com.usatiuk.dhfs.objects.jrepository.JObject; +import com.usatiuk.dhfs.objects.jrepository.JObjectManager; +import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; +import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.quarkus.logging.Log; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import org.apache.commons.lang3.tuple.Pair; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.util.*; +import java.util.stream.StreamSupport; + +@ApplicationScoped +public class DhfsFileServiceImpl implements DhfsFileService { + @Inject + JObjectManager jObjectManager; + @Inject + JObjectTxManager jObjectTxManager; + + @ConfigProperty(name = "dhfs.files.target_chunk_size") + int targetChunkSize; + + @ConfigProperty(name = "dhfs.files.write_merge_threshold") + float writeMergeThreshold; + + @ConfigProperty(name = "dhfs.files.write_merge_max_chunk_to_take") + float writeMergeMaxChunkToTake; + + @ConfigProperty(name = "dhfs.files.write_merge_limit") + float writeMergeLimit; + + @ConfigProperty(name = "dhfs.files.write_last_chunk_limit") + float writeLastChunkLimit; + + @ConfigProperty(name = "dhfs.files.use_hash_for_chunks") + boolean useHashForChunks; + + @ConfigProperty(name = "dhfs.files.allow_recursive_delete") + boolean allowRecursiveDelete; + + @ConfigProperty(name = "dhfs.objects.ref_verification") + boolean refVerification; + + @ConfigProperty(name = "dhfs.objects.write_log") + boolean writeLogging; + + @Inject + PersistentPeerDataService persistentPeerDataService; + @Inject + JKleppmannTreeManager jKleppmannTreeManager; + + private JKleppmannTreeManager.JKleppmannTree _tree; + + private ChunkData createChunk(ByteString bytes) { + if (useHashForChunks) { + return new ChunkData(bytes); + } else { + return new ChunkData(bytes, persistentPeerDataService.getUniqueId()); + } + } + + void init(@Observes @Priority(500) StartupEvent event) { + Log.info("Initializing file service"); + _tree = jKleppmannTreeManager.getTree("fs"); + } + + private JObject getDirEntry(String name) { + var res = _tree.traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList()); + if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND); + var ret = jObjectManager.get(res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name))); + if (!ret.getMeta().getKnownClass().equals(JKleppmannTreeNode.class)) + throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not jObject: " + name)); + return (JObject) ret; + } + + private Optional> getDirEntryOpt(String name) { + var res = _tree.traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList()); + if (res == null) return Optional.empty(); + var ret = jObjectManager.get(res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name))); + if (!ret.getMeta().getKnownClass().equals(JKleppmannTreeNode.class)) + throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not jObject: " + name)); + return Optional.of((JObject) ret); + } + + @Override + public Optional getattr(String uuid) { + return jObjectTxManager.executeTx(() -> { + var ref = jObjectManager.get(uuid); + if (ref.isEmpty()) return Optional.empty(); + return ref.get().runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { + GetattrRes ret; + if (d instanceof File f) { + ret = new GetattrRes(f.getMtime(), f.getCtime(), f.getMode(), f.isSymlink() ? GetattrType.SYMLINK : GetattrType.FILE); + } else if (d instanceof JKleppmannTreeNode) { + ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY); + } else { + throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + m.getName())); + } + return Optional.of(ret); + }); + }); + } + + @Override + public Optional open(String name) { + return jObjectTxManager.executeTx(() -> { + try { + var ret = getDirEntry(name); + return Optional.of(ret.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f) return f.getFileIno(); + else if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f) return m.getName(); + throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + m.getName())); + })); + } catch (StatusRuntimeException e) { + if (e.getStatus().getCode() == Status.Code.NOT_FOUND) { + return Optional.empty(); + } + throw e; + } + }); + } + + private void ensureDir(JObject entry) { + entry.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { + if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f) + throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription(m.getName() + " is a file, not directory")); + else if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f) return null; + throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + m.getName())); + }); + } + + @Override + public Optional create(String name, long mode) { + return jObjectTxManager.executeTx(() -> { + Path path = Path.of(name); + var parent = getDirEntry(path.getParent().toString()); + + ensureDir(parent); + + String fname = path.getFileName().toString(); + + var fuuid = UUID.randomUUID(); + Log.debug("Creating file " + fuuid); + File f = new File(fuuid, mode, false); + + var newNodeId = _tree.getNewNodeId(); + var fobj = jObjectManager.putLocked(f, Optional.of(newNodeId)); + try { + _tree.move(parent.getMeta().getName(), new JKleppmannTreeNodeMetaFile(fname, f.getName()), newNodeId); + } catch (Exception e) { + fobj.getMeta().removeRef(newNodeId); + throw e; + } finally { + fobj.rwUnlock(); + } + return Optional.of(f.getName()); + }); + } + + //FIXME: Slow.. + @Override + public Pair inoToParent(String ino) { + return jObjectTxManager.executeTx(() -> { + return _tree.findParent(w -> { + if (w.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f) + if (f.getFileIno().equals(ino)) + return true; + return false; + }); + }); + } + + @Override + public void mkdir(String name, long mode) { + jObjectTxManager.executeTx(() -> { + Path path = Path.of(name); + var parent = getDirEntry(path.getParent().toString()); + ensureDir(parent); + + String dname = path.getFileName().toString(); + + Log.debug("Creating directory " + name); + + _tree.move(parent.getMeta().getName(), new JKleppmannTreeNodeMetaDirectory(dname), _tree.getNewNodeId()); + }); + } + + @Override + public void unlink(String name) { + jObjectTxManager.executeTx(() -> { + var node = getDirEntryOpt(name).orElse(null); + JKleppmannTreeNodeMeta meta = node.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { + if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f) + if (!d.getNode().getChildren().isEmpty()) throw new DirectoryNotEmptyException(); + return d.getNode().getMeta(); + }); + + _tree.trash(meta, node.getMeta().getName()); + }); + } + + @Override + public Boolean rename(String from, String to) { + return jObjectTxManager.executeTx(() -> { + var node = getDirEntry(from); + JKleppmannTreeNodeMeta meta = node.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> d.getNode().getMeta()); + + var toPath = Path.of(to); + var toDentry = getDirEntry(toPath.getParent().toString()); + ensureDir(toDentry); + + _tree.move(toDentry.getMeta().getName(), meta.withName(toPath.getFileName().toString()), node.getMeta().getName()); + + return true; + }); + } + + @Override + public Boolean chmod(String uuid, long mode) { + return jObjectTxManager.executeTx(() -> { + var dent = jObjectManager.get(uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND)); + + dent.runWriteLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d, bump, i) -> { + if (d instanceof JKleppmannTreeNode) { + return null;//FIXME:? + } else if (d instanceof File f) { + bump.apply(); + f.setMtime(System.currentTimeMillis()); + f.setMode(mode); + } else { + throw new IllegalArgumentException(uuid + " is not a file"); + } + return null; + }); + + return true; + }); + } + + @Override + public Iterable readDir(String name) { + return jObjectTxManager.executeTx(() -> { + var found = getDirEntry(name); + + return found.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { + if (!(d instanceof JKleppmannTreeNode) || !(d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory)) { + throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + } + return new ArrayList<>(d.getNode().getChildren().keySet()); + }); + }); + } + + @Override + public Optional read(String fileUuid, long offset, int length) { + return jObjectTxManager.executeTx(() -> { + if (length < 0) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length)); + if (offset < 0) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset)); + + var fileOpt = jObjectManager.get(fileUuid); + if (fileOpt.isEmpty()) { + Log.error("File not found when trying to read: " + fileUuid); + return Optional.empty(); + } + var file = fileOpt.get(); + + try { + return file.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (md, fileData) -> { + if (!(fileData instanceof File)) { + throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + } + var chunksAll = ((File) fileData).getChunks(); + if (chunksAll.isEmpty()) { + return Optional.of(ByteString.empty()); + } + var chunksList = chunksAll.tailMap(chunksAll.floorKey(offset)).entrySet(); + + if (chunksList.isEmpty()) { + return Optional.of(ByteString.empty()); + } + + var chunks = chunksList.iterator(); + ByteString buf = ByteString.empty(); + + long curPos = offset; + var chunk = chunks.next(); + + while (curPos < offset + length) { + var chunkPos = chunk.getKey(); + + long offInChunk = curPos - chunkPos; + + long toReadInChunk = (offset + length) - curPos; + + var chunkBytes = readChunk(chunk.getValue()); + + long readableLen = chunkBytes.size() - offInChunk; + + var toReadReally = Math.min(readableLen, toReadInChunk); + + if (toReadReally < 0) break; + + buf = buf.concat(chunkBytes.substring((int) offInChunk, (int) (offInChunk + toReadReally))); + + curPos += toReadReally; + + if (readableLen > toReadInChunk) + break; + + if (!chunks.hasNext()) break; + + chunk = chunks.next(); + } + + // FIXME: + return Optional.of(buf); + }); + } catch (Exception e) { + Log.error("Error reading file: " + fileUuid, e); + return Optional.empty(); + } + }); + } + + private ByteString readChunk(String uuid) { + var chunkRead = jObjectManager.get(uuid).orElse(null); + + if (chunkRead == null) { + Log.error("Chunk requested not found: " + uuid); + throw new StatusRuntimeException(Status.NOT_FOUND); + } + + return chunkRead.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { + if (!(d instanceof ChunkData cd)) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + return cd.getBytes(); + }); + } + + private int getChunkSize(String uuid) { + return readChunk(uuid).size(); + } + + private void cleanupChunks(File f, Collection uuids) { + // FIXME: + var inFile = useHashForChunks ? new HashSet<>(f.getChunks().values()) : Collections.emptySet(); + for (var cuuid : uuids) { + try { + if (inFile.contains(cuuid)) continue; + jObjectManager.get(cuuid) + .ifPresent(jObject -> jObject.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, + (m, d, b, v) -> { + m.removeRef(f.getName()); + return null; + })); + } catch (Exception e) { + Log.error("Error when cleaning chunk " + cuuid, e); + } + } + } + + @Override + public Long write(String fileUuid, long offset, ByteString data) { + return jObjectTxManager.executeTx(() -> { + if (offset < 0) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset)); + + // FIXME: + var file = (JObject) jObjectManager.get(fileUuid).orElse(null); + if (file == null) { + Log.error("File not found when trying to read: " + fileUuid); + return -1L; + } + + file.rwLockNoCopy(); + try { + file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE); + // FIXME: + if (!(file.getData() instanceof File)) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + + if (writeLogging) { + Log.info("Writing to file: " + file.getMeta().getName() + " size=" + size(fileUuid) + " " + + offset + " " + data.size()); + } + + if (size(fileUuid) < offset) + truncate(fileUuid, offset); + + // FIXME: Some kind of immutable interface? + var chunksAll = Collections.unmodifiableNavigableMap(file.getData().getChunks()); + var first = chunksAll.floorEntry(offset); + var last = chunksAll.lowerEntry(offset + data.size()); + NavigableMap removedChunks = new TreeMap<>(); + + long start = 0; + + NavigableMap beforeFirst = first != null ? chunksAll.headMap(first.getKey(), false) : Collections.emptyNavigableMap(); + NavigableMap afterLast = last != null ? chunksAll.tailMap(last.getKey(), false) : Collections.emptyNavigableMap(); + + if (first != null && (getChunkSize(first.getValue()) + first.getKey() <= offset)) { + beforeFirst = chunksAll; + afterLast = Collections.emptyNavigableMap(); + first = null; + last = null; + start = offset; + } else if (!chunksAll.isEmpty()) { + var between = chunksAll.subMap(first.getKey(), true, last.getKey(), true); + removedChunks.putAll(between); + start = first.getKey(); + } + + ByteString pendingWrites = ByteString.empty(); + + if (first != null && first.getKey() < offset) { + var chunkBytes = readChunk(first.getValue()); + pendingWrites = pendingWrites.concat(chunkBytes.substring(0, (int) (offset - first.getKey()))); + } + pendingWrites = pendingWrites.concat(data); + + if (last != null) { + var lchunkBytes = readChunk(last.getValue()); + if (last.getKey() + lchunkBytes.size() > offset + data.size()) { + var startInFile = offset + data.size(); + var startInChunk = startInFile - last.getKey(); + pendingWrites = pendingWrites.concat(lchunkBytes.substring((int) startInChunk, lchunkBytes.size())); + } + } + + int combinedSize = pendingWrites.size(); + + if (targetChunkSize > 0) { + if (combinedSize < (targetChunkSize * writeMergeThreshold)) { + boolean leftDone = false; + boolean rightDone = false; + while (!leftDone && !rightDone) { + if (beforeFirst.isEmpty()) leftDone = true; + if (!beforeFirst.isEmpty() || !leftDone) { + var takeLeft = beforeFirst.lastEntry(); + + var cuuid = takeLeft.getValue(); + + if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) { + leftDone = true; + continue; + } + + if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) { + leftDone = true; + continue; + } + + // FIXME: (and test this) + beforeFirst = beforeFirst.headMap(takeLeft.getKey(), false); + start = takeLeft.getKey(); + pendingWrites = readChunk(cuuid).concat(pendingWrites); + combinedSize += getChunkSize(cuuid); + removedChunks.put(takeLeft.getKey(), takeLeft.getValue()); + } + if (afterLast.isEmpty()) rightDone = true; + if (!afterLast.isEmpty() && !rightDone) { + var takeRight = afterLast.firstEntry(); + + var cuuid = takeRight.getValue(); + + if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) { + rightDone = true; + continue; + } + + if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) { + rightDone = true; + continue; + } + + // FIXME: (and test this) + afterLast = afterLast.tailMap(takeRight.getKey(), false); + pendingWrites = pendingWrites.concat(readChunk(cuuid)); + combinedSize += getChunkSize(cuuid); + removedChunks.put(takeRight.getKey(), takeRight.getValue()); + } + } + } + } + + NavigableMap newChunks = new TreeMap<>(); + + { + int cur = 0; + while (cur < combinedSize) { + int end; + + if (targetChunkSize <= 0) + end = combinedSize; + else { + if ((combinedSize - cur) > (targetChunkSize * writeLastChunkLimit)) { + end = Math.min(cur + targetChunkSize, combinedSize); + } else { + end = combinedSize; + } + } + + var thisChunk = pendingWrites.substring(cur, end); + + ChunkData newChunkData = createChunk(thisChunk); + //FIXME: + jObjectManager.put(newChunkData, Optional.of(file.getMeta().getName())); + newChunks.put(start, newChunkData.getName()); + + start += thisChunk.size(); + cur = end; + } + } + + file.mutate(new FileChunkMutator(file.getData().getMtime(), System.currentTimeMillis(), removedChunks, newChunks)); + + cleanupChunks(file.getData(), removedChunks.values()); + updateFileSize((JObject) file); + } finally { + file.rwUnlock(); + } + + return (long) data.size(); + }); + } + + @Override + public Boolean truncate(String fileUuid, long length) { + return jObjectTxManager.executeTx(() -> { + if (length < 0) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length)); + + var file = (JObject) jObjectManager.get(fileUuid).orElse(null); + if (file == null) { + Log.error("File not found when trying to read: " + fileUuid); + return false; + } + + if (length == 0) { + file.rwLockNoCopy(); + try { + file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE); + + var oldChunks = Collections.unmodifiableNavigableMap(new TreeMap<>(file.getData().getChunks())); + + file.mutate(new JMutator<>() { + long oldMtime; + + @Override + public boolean mutate(File object) { + oldMtime = object.getMtime(); + object.getChunks().clear(); + return true; + } + + @Override + public void revert(File object) { + object.setMtime(oldMtime); + object.getChunks().putAll(oldChunks); + } + }); + cleanupChunks(file.getData(), oldChunks.values()); + updateFileSize((JObject) file); + } catch (Exception e) { + Log.error("Error writing file chunks: " + fileUuid, e); + return false; + } finally { + file.rwUnlock(); + } + return true; + } + + file.rwLockNoCopy(); + try { + file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE); + + var curSize = size(fileUuid); + if (curSize == length) return true; + + var chunksAll = Collections.unmodifiableNavigableMap(file.getData().getChunks()); + NavigableMap removedChunks = new TreeMap<>(); + NavigableMap newChunks = new TreeMap<>(); + + if (curSize < length) { + long combinedSize = (length - curSize); + + long start = curSize; + + // Hack + HashMap zeroCache = new HashMap<>(); + + { + long cur = 0; + while (cur < combinedSize) { + long end; + + if (targetChunkSize <= 0) + end = combinedSize; + else { + if ((combinedSize - cur) > (targetChunkSize * 1.5)) { + end = cur + targetChunkSize; + } else { + end = combinedSize; + } + } + + if (!zeroCache.containsKey(end - cur)) + zeroCache.put(end - cur, UnsafeByteOperations.unsafeWrap(new byte[Math.toIntExact(end - cur)])); + + ChunkData newChunkData = createChunk(zeroCache.get(end - cur)); + //FIXME: + jObjectManager.put(newChunkData, Optional.of(file.getMeta().getName())); + newChunks.put(start, newChunkData.getName()); + + start += newChunkData.getSize(); + cur = end; + } + } + } else { + var tail = chunksAll.lowerEntry(length); + var afterTail = chunksAll.tailMap(tail.getKey(), false); + + removedChunks.put(tail.getKey(), tail.getValue()); + removedChunks.putAll(afterTail); + + var tailBytes = readChunk(tail.getValue()); + var newChunk = tailBytes.substring(0, (int) (length - tail.getKey())); + + ChunkData newChunkData = createChunk(newChunk); + //FIXME: + jObjectManager.put(newChunkData, Optional.of(file.getMeta().getName())); + newChunks.put(tail.getKey(), newChunkData.getName()); + } + + file.mutate(new FileChunkMutator(file.getData().getMtime(), System.currentTimeMillis(), removedChunks, newChunks)); + + cleanupChunks(file.getData(), removedChunks.values()); + updateFileSize((JObject) file); + return true; + } catch (Exception e) { + Log.error("Error reading file: " + fileUuid, e); + return false; + } finally { + file.rwUnlock(); + } + }); + } + + @Override + public String readlink(String uuid) { + return jObjectTxManager.executeTx(() -> { + return readlinkBS(uuid).toStringUtf8(); + }); + } + + @Override + public ByteString readlinkBS(String uuid) { + return jObjectTxManager.executeTx(() -> { + var fileOpt = jObjectManager.get(uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid))); + + return fileOpt.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (md, fileData) -> { + if (!(fileData instanceof File)) { + throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + } + + if (!((File) fileData).isSymlink()) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Not a symlink: " + uuid)); + + return read(uuid, 0, Math.toIntExact(size(uuid))).get(); + }); + }); + } + + @Override + public String symlink(String oldpath, String newpath) { + return jObjectTxManager.executeTx(() -> { + Path path = Path.of(newpath); + var parent = getDirEntry(path.getParent().toString()); + + ensureDir(parent); + + String fname = path.getFileName().toString(); + + var fuuid = UUID.randomUUID(); + Log.debug("Creating file " + fuuid); + + File f = new File(fuuid, 0, true); + var newNodeId = _tree.getNewNodeId(); + ChunkData newChunkData = createChunk(UnsafeByteOperations.unsafeWrap(oldpath.getBytes(StandardCharsets.UTF_8))); + + f.getChunks().put(0L, newChunkData.getName()); + + jObjectManager.put(newChunkData, Optional.of(f.getName())); + var newFile = jObjectManager.putLocked(f, Optional.of(newNodeId)); + try { + updateFileSize(newFile); + } finally { + newFile.rwUnlock(); + } + + _tree.move(parent.getMeta().getName(), new JKleppmannTreeNodeMetaFile(fname, f.getName()), newNodeId); + return f.getName(); + }); + } + + @Override + public Boolean setTimes(String fileUuid, long atimeMs, long mtimeMs) { + return jObjectTxManager.executeTx(() -> { + var file = jObjectManager.get(fileUuid).orElseThrow( + () -> new StatusRuntimeException(Status.NOT_FOUND.withDescription( + "File not found for setTimes: " + fileUuid)) + ); + + file.runWriteLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, fileData, bump, i) -> { + if (fileData instanceof JKleppmannTreeNode) return null; // FIXME: + if (!(fileData instanceof FsNode fd)) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + + bump.apply(); + fd.setMtime(mtimeMs); + return null; + }); + + return true; + }); + } + + @Override + public void updateFileSize(JObject file) { + jObjectTxManager.executeTx(() -> { + file.rwLockNoCopy(); + try { + file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE); + if (!(file.getData() instanceof File fd)) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + + long realSize = 0; + + var last = fd.getChunks().lastEntry(); + if (last != null) { + var lastSize = getChunkSize(last.getValue()); + realSize = last.getKey() + lastSize; + } + + if (realSize != fd.getSize()) { + long finalRealSize = realSize; + file.mutate(new JMutator() { + long oldSize; + + @Override + public boolean mutate(File object) { + oldSize = object.getSize(); + object.setSize(finalRealSize); + return true; + } + + @Override + public void revert(File object) { + object.setSize(oldSize); + } + }); + } + } catch (Exception e) { + Log.error("Error updating file size: " + file.getMeta().getName(), e); + } finally { + file.rwUnlock(); + } + }); + } + + @Override + public Long size(String uuid) { + return jObjectTxManager.executeTx(() -> { + var read = jObjectManager.get(uuid) + .orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND)); + + try { + return read.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (fsNodeData, fileData) -> { + if (!(fileData instanceof File fd)) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + + return fd.getSize(); + }); + } catch (Exception e) { + Log.error("Error reading file: " + uuid, e); + return -1L; + } + }); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DirectoryNotEmptyException.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DirectoryNotEmptyException.java new file mode 100644 index 00000000..f13096f9 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DirectoryNotEmptyException.java @@ -0,0 +1,8 @@ +package com.usatiuk.dhfs.files.service; + +public class DirectoryNotEmptyException extends RuntimeException { + @Override + public synchronized Throwable fillInStackTrace() { + return this; + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/FileChunkMutator.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/FileChunkMutator.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/FileChunkMutator.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/FileChunkMutator.java diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrRes.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrRes.java new file mode 100644 index 00000000..3240a6b4 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrRes.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.files.service; + +public record GetattrRes(long mtime, long ctime, long mode, GetattrType type) { +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrType.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrType.java new file mode 100644 index 00000000..ebcd4868 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrType.java @@ -0,0 +1,7 @@ +package com.usatiuk.dhfs.files.service; + +public enum GetattrType { + FILE, + DIRECTORY, + SYMLINK +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java new file mode 100644 index 00000000..0fa8ee29 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java @@ -0,0 +1,391 @@ +package com.usatiuk.dhfs.fuse; + +import com.google.protobuf.UnsafeByteOperations; +import com.sun.security.auth.module.UnixSystem; +import com.usatiuk.dhfs.files.service.DhfsFileService; +import com.usatiuk.dhfs.files.service.DirectoryNotEmptyException; +import com.usatiuk.dhfs.files.service.GetattrRes; +import com.usatiuk.dhfs.objects.repository.persistence.ObjectPersistentStore; +import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer; +import com.usatiuk.kleppmanntree.AlreadyExistsException; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import jnr.ffi.Pointer; +import org.apache.commons.lang3.SystemUtils; +import org.eclipse.microprofile.config.inject.ConfigProperty; +import ru.serce.jnrfuse.ErrorCodes; +import ru.serce.jnrfuse.FuseFillDir; +import ru.serce.jnrfuse.FuseStubFS; +import ru.serce.jnrfuse.struct.FileStat; +import ru.serce.jnrfuse.struct.FuseFileInfo; +import ru.serce.jnrfuse.struct.Statvfs; +import ru.serce.jnrfuse.struct.Timespec; + +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Optional; + +import static jnr.posix.FileStat.*; + +@ApplicationScoped +public class DhfsFuse extends FuseStubFS { + private static final int blksize = 1048576; + private static final int iosize = 1048576; + @Inject + ObjectPersistentStore persistentStore; // FIXME? + @ConfigProperty(name = "dhfs.fuse.root") + String root; + @ConfigProperty(name = "dhfs.fuse.enabled") + boolean enabled; + @ConfigProperty(name = "dhfs.fuse.debug") + Boolean debug; + @ConfigProperty(name = "dhfs.files.target_chunk_size") + int targetChunkSize; + @Inject + JnrPtrByteOutputAccessors jnrPtrByteOutputAccessors; + @Inject + DhfsFileService fileService; + + void init(@Observes @Priority(100000) StartupEvent event) { + if (!enabled) return; + Paths.get(root).toFile().mkdirs(); + Log.info("Mounting with root " + root); + + var uid = new UnixSystem().getUid(); + var gid = new UnixSystem().getGid(); + + var opts = new ArrayList(); + + // Assuming macFuse + if (SystemUtils.IS_OS_MAC) { + opts.add("-o"); + opts.add("iosize=" + iosize); + } else if (SystemUtils.IS_OS_LINUX) { + // FIXME: There's something else missing: the writes still seem to be 32k max +// opts.add("-o"); +// opts.add("large_read"); + opts.add("-o"); + opts.add("big_writes"); + opts.add("-o"); + opts.add("max_read=" + iosize); + opts.add("-o"); + opts.add("max_write=" + iosize); + } + opts.add("-o"); + opts.add("auto_cache"); + opts.add("-o"); + opts.add("uid=" + uid); + opts.add("-o"); + opts.add("gid=" + gid); + + mount(Paths.get(root), false, debug, opts.toArray(String[]::new)); + } + + void shutdown(@Observes @Priority(1) ShutdownEvent event) { + if (!enabled) return; + Log.info("Unmounting"); + umount(); + Log.info("Unmounted"); + } + + @Override + public int statfs(String path, Statvfs stbuf) { + try { + stbuf.f_frsize.set(blksize); + stbuf.f_bsize.set(blksize); + stbuf.f_blocks.set(persistentStore.getTotalSpace() / blksize); // total data blocks in file system + stbuf.f_bfree.set(persistentStore.getFreeSpace() / blksize); // free blocks in fs + stbuf.f_bavail.set(persistentStore.getUsableSpace() / blksize); // avail blocks in fs + stbuf.f_files.set(1000); //FIXME: + stbuf.f_ffree.set(Integer.MAX_VALUE - 2000); //FIXME: + stbuf.f_favail.set(Integer.MAX_VALUE - 2000); //FIXME: + stbuf.f_namemax.set(2048); + return super.statfs(path, stbuf); + } catch (Exception e) { + Log.error("When statfs " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int getattr(String path, FileStat stat) { + try { + var fileOpt = fileService.open(path); + if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); + var uuid = fileOpt.get(); + Optional found = fileService.getattr(uuid); + if (found.isEmpty()) { + return -ErrorCodes.ENOENT(); + } + switch (found.get().type()) { + case FILE -> { + stat.st_mode.set(S_IFREG | found.get().mode()); + stat.st_nlink.set(1); + stat.st_size.set(fileService.size(uuid)); + } + case DIRECTORY -> { + stat.st_mode.set(S_IFDIR | found.get().mode()); + stat.st_nlink.set(2); + } + case SYMLINK -> { + stat.st_mode.set(S_IFLNK | 0777); + stat.st_nlink.set(1); + stat.st_size.set(fileService.size(uuid)); + } + } + + // FIXME: Race? + stat.st_ctim.tv_sec.set(found.get().ctime() / 1000); + stat.st_ctim.tv_nsec.set((found.get().ctime() % 1000) * 1000); + stat.st_mtim.tv_sec.set(found.get().mtime() / 1000); + stat.st_mtim.tv_nsec.set((found.get().mtime() % 1000) * 1000); + stat.st_atim.tv_sec.set(found.get().mtime() / 1000); + stat.st_atim.tv_nsec.set((found.get().mtime() % 1000) * 1000); + stat.st_blksize.set(blksize); + } catch (Exception e) { + Log.error("When getattr " + path, e); + return -ErrorCodes.EIO(); + } catch (Throwable e) { + Log.error("When getattr " + path, e); + return -ErrorCodes.EIO(); + } + return 0; + } + + @Override + public int utimens(String path, Timespec[] timespec) { + try { + var fileOpt = fileService.open(path); + if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); + var file = fileOpt.get(); + var res = fileService.setTimes(file, + timespec[0].tv_sec.get() * 1000, + timespec[1].tv_sec.get() * 1000); + if (!res) return -ErrorCodes.EINVAL(); + else return 0; + } catch (Exception e) { + Log.error("When utimens " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int open(String path, FuseFileInfo fi) { + try { + if (fileService.open(path).isEmpty()) return -ErrorCodes.ENOENT(); + return 0; + } catch (Exception e) { + Log.error("When open " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int read(String path, Pointer buf, long size, long offset, FuseFileInfo fi) { + if (size < 0) return -ErrorCodes.EINVAL(); + if (offset < 0) return -ErrorCodes.EINVAL(); + try { + var fileOpt = fileService.open(path); + if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); + var file = fileOpt.get(); + var read = fileService.read(fileOpt.get(), offset, (int) size); + if (read.isEmpty()) return 0; + UnsafeByteOperations.unsafeWriteTo(read.get(), new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size)); + return read.get().size(); + } catch (Exception e) { + Log.error("When reading " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int write(String path, Pointer buf, long size, long offset, FuseFileInfo fi) { + if (offset < 0) return -ErrorCodes.EINVAL(); + try { + var fileOpt = fileService.open(path); + if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); + var buffer = UninitializedByteBuffer.allocateUninitialized((int) size); + + jnrPtrByteOutputAccessors.getUnsafe().copyMemory( + buf.address(), + jnrPtrByteOutputAccessors.getNioAccess().getBufferAddress(buffer), + size + ); + + var written = fileService.write(fileOpt.get(), offset, UnsafeByteOperations.unsafeWrap(buffer)); + return written.intValue(); + } catch (Exception e) { + Log.error("When writing " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int create(String path, long mode, FuseFileInfo fi) { + try { + var ret = fileService.create(path, mode); + if (ret.isEmpty()) return -ErrorCodes.ENOSPC(); + else return 0; + } catch (Exception e) { + Log.error("When creating " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int mkdir(String path, long mode) { + try { + fileService.mkdir(path, mode); + return 0; + } catch (AlreadyExistsException aex) { + return -ErrorCodes.EEXIST(); + } catch (Exception e) { + Log.error("When creating dir " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int rmdir(String path) { + try { + fileService.unlink(path); + return 0; + } catch (DirectoryNotEmptyException ex) { + return -ErrorCodes.ENOTEMPTY(); + } catch (Exception e) { + Log.error("When removing dir " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int rename(String path, String newName) { + try { + var ret = fileService.rename(path, newName); + if (!ret) return -ErrorCodes.ENOENT(); + else return 0; + } catch (Exception e) { + Log.error("When renaming " + path, e); + return -ErrorCodes.EIO(); + } + + } + + @Override + public int unlink(String path) { + try { + fileService.unlink(path); + return 0; + } catch (Exception e) { + Log.error("When unlinking " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int truncate(String path, long size) { + if (size < 0) return -ErrorCodes.EINVAL(); + try { + var fileOpt = fileService.open(path); + if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); + var file = fileOpt.get(); + var ok = fileService.truncate(file, size); + if (ok) + return 0; + else + return -ErrorCodes.ENOSPC(); + } catch (Exception e) { + Log.error("When truncating " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int chmod(String path, long mode) { + try { + var fileOpt = fileService.open(path); + if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); + var ret = fileService.chmod(fileOpt.get(), mode); + if (ret) return 0; + else return -ErrorCodes.EINVAL(); + } catch (Exception e) { + Log.error("When chmod " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int readdir(String path, Pointer buf, FuseFillDir filler, long offset, FuseFileInfo fi) { + try { + Iterable found; + try { + found = fileService.readDir(path); + } catch (StatusRuntimeException e) { + if (e.getStatus().getCode().equals(Status.NOT_FOUND.getCode())) + return -ErrorCodes.ENOENT(); + else throw e; + } + + filler.apply(buf, ".", null, 0); + filler.apply(buf, "..", null, 0); + + for (var c : found) { + filler.apply(buf, c, null, 0); + } + + return 0; + } catch (Exception e) { + Log.error("When readdir " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int readlink(String path, Pointer buf, long size) { + if (size < 0) return -ErrorCodes.EINVAL(); + try { + var fileOpt = fileService.open(path); + if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); + var file = fileOpt.get(); + var read = fileService.readlinkBS(fileOpt.get()); + if (read.isEmpty()) return 0; + UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size)); + buf.putByte(Math.min(size - 1, read.size()), (byte) 0); + return 0; + } catch (Exception e) { + Log.error("When reading " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int chown(String path, long uid, long gid) { + try { + var fileOpt = fileService.open(path); + if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); + return 0; + } catch (Exception e) { + Log.error("When chown " + path, e); + return -ErrorCodes.EIO(); + } + } + + @Override + public int symlink(String oldpath, String newpath) { + try { + var ret = fileService.symlink(oldpath, newpath); + if (ret == null) return -ErrorCodes.EEXIST(); + else return 0; + } catch (Exception e) { + Log.error("When creating " + newpath, e); + return -ErrorCodes.EIO(); + } + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutput.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutput.java new file mode 100644 index 00000000..d2790516 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutput.java @@ -0,0 +1,64 @@ +package com.usatiuk.dhfs.fuse; + +import com.google.protobuf.ByteOutput; +import jnr.ffi.Pointer; + +import java.nio.ByteBuffer; +import java.nio.MappedByteBuffer; + +public class JnrPtrByteOutput extends ByteOutput { + private final Pointer _backing; + private final long _size; + private final JnrPtrByteOutputAccessors _accessors; + private long _pos; + + public JnrPtrByteOutput(JnrPtrByteOutputAccessors accessors, Pointer backing, long size) { + _backing = backing; + _size = size; + _pos = 0; + _accessors = accessors; + } + + @Override + public void write(byte value) { + throw new UnsupportedOperationException(); + } + + @Override + public void write(byte[] value, int offset, int length) { + if (length + _pos > _size) throw new IndexOutOfBoundsException(); + _backing.put(_pos, value, offset, length); + _pos += length; + } + + @Override + public void writeLazy(byte[] value, int offset, int length) { + if (length + _pos > _size) throw new IndexOutOfBoundsException(); + _backing.put(_pos, value, offset, length); + _pos += length; + } + + @Override + public void write(ByteBuffer value) { + var rem = value.remaining(); + if (rem + _pos > _size) throw new IndexOutOfBoundsException(); + + if (value.isDirect()) { + if (value instanceof MappedByteBuffer mb) { + mb.load(); + } + long addr = _accessors.getNioAccess().getBufferAddress(value) + value.position(); + var out = _backing.address() + _pos; + _accessors.getUnsafe().copyMemory(addr, out, rem); + } else { + throw new UnsupportedOperationException(); + } + + _pos += rem; + } + + @Override + public void writeLazy(ByteBuffer value) { + write(value); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java new file mode 100644 index 00000000..78cc8ff4 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java @@ -0,0 +1,24 @@ +package com.usatiuk.dhfs.fuse; + +import jakarta.inject.Singleton; +import jdk.internal.access.JavaNioAccess; +import jdk.internal.access.SharedSecrets; +import lombok.Getter; +import sun.misc.Unsafe; + +import java.lang.reflect.Field; + +@Singleton +class JnrPtrByteOutputAccessors { + @Getter + JavaNioAccess _nioAccess; + @Getter + Unsafe _unsafe; + + JnrPtrByteOutputAccessors() throws NoSuchFieldException, IllegalAccessException { + _nioAccess = SharedSecrets.getJavaNioAccess(); + Field f = Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + _unsafe = (Unsafe) f.get(null); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java new file mode 100644 index 00000000..2743bf48 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java @@ -0,0 +1,566 @@ +package com.usatiuk.dhfs.objects.jkleppmanntree; + +import com.usatiuk.dhfs.files.objects.File; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.*; +import com.usatiuk.dhfs.objects.jrepository.*; +import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import com.usatiuk.dhfs.objects.repository.opsupport.Op; +import com.usatiuk.dhfs.objects.repository.opsupport.OpObject; +import com.usatiuk.dhfs.objects.repository.opsupport.OpObjectRegistry; +import com.usatiuk.dhfs.objects.repository.opsupport.OpSender; +import com.usatiuk.kleppmanntree.*; +import com.usatiuk.dhfs.utils.VoidFn; +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Function; + +@ApplicationScoped +public class JKleppmannTreeManager { + private static final String dataFileName = "trees"; + private final ConcurrentHashMap _trees = new ConcurrentHashMap<>(); + @Inject + JKleppmannTreePeerInterface jKleppmannTreePeerInterface; + @Inject + OpSender opSender; + @Inject + OpObjectRegistry opObjectRegistry; + @Inject + JObjectManager jObjectManager; + @Inject + PersistentPeerDataService persistentPeerDataService; + @Inject + JObjectTxManager jObjectTxManager; + @Inject + SoftJObjectFactory softJObjectFactory; + @Inject + JKleppmannTreePeerInterface peerInterface; + + public JKleppmannTree getTree(String name) { + return _trees.computeIfAbsent(name, this::createTree); + } + + private JKleppmannTree createTree(String name) { + return jObjectTxManager.executeTx(() -> { + var data = jObjectManager.get(JKleppmannTreePersistentData.nameFromTreeName(name)).orElse(null); + if (data == null) { + data = jObjectManager.put(new JKleppmannTreePersistentData(name), Optional.empty()); + } + var tree = new JKleppmannTree(name); + opObjectRegistry.registerObject(tree); + return tree; + }); + } + + public class JKleppmannTree implements OpObject { + private final KleppmannTree _tree; + + private final SoftJObject _persistentData; + + private final JKleppmannTreeStorageInterface _storageInterface; + private final JKleppmannTreeClock _clock; + + private final String _treeName; + + JKleppmannTree(String treeName) { + _treeName = treeName; + + _persistentData = softJObjectFactory.create(JKleppmannTreePersistentData.class, JKleppmannTreePersistentData.nameFromTreeName(treeName)); + + _storageInterface = new JKleppmannTreeStorageInterface(); + _clock = new JKleppmannTreeClock(); + + _tree = new KleppmannTree<>(_storageInterface, peerInterface, _clock, new JOpRecorder()); + } + + public String traverse(List names) { + return _tree.traverse(names); + } + + public String getNewNodeId() { + return _storageInterface.getNewNodeId(); + } + + public void move(String newParent, JKleppmannTreeNodeMeta newMeta, String node) { + _tree.move(newParent, newMeta, node); + } + + public void trash(JKleppmannTreeNodeMeta newMeta, String node) { + _tree.move(_storageInterface.getTrashId(), newMeta.withName(node), node); + } + + @Override + public boolean hasPendingOpsForHost(UUID host) { + return _persistentData.get() + .runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, + (m, d) -> d.getQueues().containsKey(host) && + !d.getQueues().get(host).isEmpty() + ); + } + + @Override + public List getPendingOpsForHost(UUID host, int limit) { + return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + if (d.getQueues().containsKey(host)) { + var queue = d.getQueues().get(host); + ArrayList collected = new ArrayList<>(); + + for (var node : queue.entrySet()) { + collected.add(new JKleppmannTreeOpWrapper(node.getValue())); + if (collected.size() >= limit) break; + } + + return collected; + } + return List.of(); + }); + } + + @Override + public String getId() { + return _treeName; + } + + @Override + public void commitOpForHost(UUID host, Op op) { + if (!(op instanceof JKleppmannTreeOpWrapper jop)) + throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass() + " " + getId()); + _persistentData.get().assertRwLock(); + _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + + var got = _persistentData.get().getData().getQueues().get(host).firstEntry().getValue(); + if (!Objects.equals(jop.getOp(), got)) + throw new IllegalArgumentException("Committed op push was not the oldest"); + + _persistentData.get().mutate(new JMutator() { + @Override + public boolean mutate(JKleppmannTreePersistentData object) { + object.getQueues().get(host).pollFirstEntry(); + return true; + } + + @Override + public void revert(JKleppmannTreePersistentData object) { + object.getQueues().get(host).put(jop.getOp().timestamp(), jop.getOp()); + } + }); + + } + + @Override + public void pushBootstrap(UUID host) { + _tree.recordBoostrapFor(host); + } + + public Pair findParent(Function predicate) { + return _tree.findParent(predicate); + } + + @Override + public boolean acceptExternalOp(UUID from, Op op) { + if (op instanceof JKleppmannTreePeriodicPushOp pushOp) { + return _tree.updateExternalTimestamp(pushOp.getFrom(), pushOp.getTimestamp()); + } + + if (!(op instanceof JKleppmannTreeOpWrapper jop)) + throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass() + " " + getId()); + + JObject fileRef; + if (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile f) { + var fino = f.getFileIno(); + fileRef = jObjectManager.getOrPut(fino, File.class, Optional.of(jop.getOp().childId())); + } else { + fileRef = null; + } + + if (Log.isTraceEnabled()) + Log.trace("Received op from " + from + ": " + jop.getOp().timestamp().timestamp() + " " + jop.getOp().childId() + "->" + jop.getOp().newParentId() + " as " + jop.getOp().newMeta().getName()); + + try { + _tree.applyExternalOp(from, jop.getOp()); + } catch (Exception e) { + Log.error("Error applying external op", e); + throw e; + } finally { + // FIXME: + // Fixup the ref if it didn't really get applied + + if ((fileRef == null) && (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile)) + Log.error("Could not create child of pushed op: " + jop.getOp()); + + if (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile f) { + if (fileRef != null) { + var got = jObjectManager.get(jop.getOp().childId()).orElse(null); + + VoidFn remove = () -> { + fileRef.runWriteLockedVoid(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d, b, v) -> { + m.removeRef(jop.getOp().childId()); + }); + }; + + if (got == null) { + remove.apply(); + } else { + try { + got.rLock(); + try { + got.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + if (got.getData() == null || !got.getData().extractRefs().contains(f.getFileIno())) + remove.apply(); + } finally { + got.rUnlock(); + } + } catch (DeletedObjectAccessException dex) { + remove.apply(); + } + } + } + } + } + return true; + } + + @Override + public Op getPeriodicPushOp() { + return new JKleppmannTreePeriodicPushOp(persistentPeerDataService.getSelfUuid(), _clock.peekTimestamp()); + } + + @Override + public void addToTx() { + // FIXME: a hack + _persistentData.get().rwLockNoCopy(); + _persistentData.get().rwUnlock(); + } + + private class JOpRecorder implements OpRecorder { + @Override + public void recordOp(OpMove op) { + _persistentData.get().assertRwLock(); + _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + var hostUuds = persistentPeerDataService.getHostUuids().stream().toList(); + _persistentData.get().mutate(new JMutator() { + @Override + public boolean mutate(JKleppmannTreePersistentData object) { + object.recordOp(hostUuds, op); + return true; + } + + @Override + public void revert(JKleppmannTreePersistentData object) { + object.removeOp(hostUuds, op); + } + }); + opSender.push(JKleppmannTree.this); + } + + @Override + public void recordOpForPeer(UUID peer, OpMove op) { + _persistentData.get().assertRwLock(); + _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + _persistentData.get().mutate(new JMutator() { + @Override + public boolean mutate(JKleppmannTreePersistentData object) { + object.recordOp(peer, op); + return true; + } + + @Override + public void revert(JKleppmannTreePersistentData object) { + object.removeOp(peer, op); + } + }); + opSender.push(JKleppmannTree.this); + } + } + + private class JKleppmannTreeClock implements Clock { + @Override + public Long getTimestamp() { + _persistentData.get().assertRwLock(); + _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + var ret = _persistentData.get().getData().getClock().peekTimestamp() + 1; + _persistentData.get().mutate(new JMutator() { + @Override + public boolean mutate(JKleppmannTreePersistentData object) { + object.getClock().getTimestamp(); + return true; + } + + @Override + public void revert(JKleppmannTreePersistentData object) { + object.getClock().ungetTimestamp(); + } + }); + return ret; + } + + @Override + public Long peekTimestamp() { + return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getClock().peekTimestamp()); + } + + @Override + public Long updateTimestamp(Long receivedTimestamp) { + _persistentData.get().assertRwLock(); + _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + _persistentData.get().mutate(new JMutator() { + Long _old; + + @Override + public boolean mutate(JKleppmannTreePersistentData object) { + _old = object.getClock().updateTimestamp(receivedTimestamp); + return true; + } + + @Override + public void revert(JKleppmannTreePersistentData object) { + object.getClock().setTimestamp(_old); + } + }); + return _persistentData.get().getData().getClock().peekTimestamp(); + } + } + + public class JKleppmannTreeStorageInterface implements StorageInterface { + private final LogWrapper _logWrapper = new LogWrapper(); + private final PeerLogWrapper _peerLogWrapper = new PeerLogWrapper(); + + public JKleppmannTreeStorageInterface() { + if (jObjectManager.get(getRootId()).isEmpty()) { + putNode(new JKleppmannTreeNode(new TreeNode<>(getRootId(), null, new JKleppmannTreeNodeMetaDirectory("")))); + putNode(new JKleppmannTreeNode(new TreeNode<>(getTrashId(), null, null))); + } + } + + public JObject putNode(JKleppmannTreeNode node) { + return jObjectManager.put(node, Optional.ofNullable(node.getNode().getParent())); + } + + public JObject putNodeLocked(JKleppmannTreeNode node) { + return jObjectManager.putLocked(node, Optional.ofNullable(node.getNode().getParent())); + } + + @Override + public String getRootId() { + return _treeName + "_jt_root"; + } + + @Override + public String getTrashId() { + return _treeName + "_jt_trash"; + } + + @Override + public String getNewNodeId() { + return persistentPeerDataService.getUniqueId(); + } + + @Override + public JKleppmannTreeNodeWrapper getById(String id) { + var got = jObjectManager.get(id); + if (got.isEmpty()) return null; + return new JKleppmannTreeNodeWrapper((JObject) got.get()); + } + + @Override + public JKleppmannTreeNodeWrapper createNewNode(TreeNode node) { + return new JKleppmannTreeNodeWrapper(putNodeLocked(new JKleppmannTreeNode(node))); + } + + @Override + public void removeNode(String id) {} + + @Override + public LogInterface getLog() { + return _logWrapper; + } + + @Override + public PeerTimestampLogInterface getPeerTimestampLog() { + return _peerLogWrapper; + } + + @Override + public void rLock() { + _persistentData.get().rLock(); + } + + @Override + public void rUnlock() { + _persistentData.get().rUnlock(); + } + + @Override + public void rwLock() { + _persistentData.get().rwLockNoCopy(); + } + + @Override + public void rwUnlock() { + _persistentData.get().rwUnlock(); + } + + @Override + public void assertRwLock() { + _persistentData.get().assertRwLock(); + } + + private class PeerLogWrapper implements PeerTimestampLogInterface { + + @Override + public Long getForPeer(UUID peerId) { + return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, + (m, d) -> d.getPeerTimestampLog().get(peerId)); + } + + @Override + public void putForPeer(UUID peerId, Long timestamp) { + _persistentData.get().assertRwLock(); + _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + _persistentData.get().mutate(new JMutator() { + Long old; + + @Override + public boolean mutate(JKleppmannTreePersistentData object) { + old = object.getPeerTimestampLog().put(peerId, timestamp); + return !Objects.equals(old, timestamp); + } + + @Override + public void revert(JKleppmannTreePersistentData object) { + if (old != null) + object.getPeerTimestampLog().put(peerId, old); + else + object.getPeerTimestampLog().remove(peerId, timestamp); + } + }); + } + } + + private class LogWrapper implements LogInterface { + @Override + public Pair, LogRecord> peekOldest() { + return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + var ret = d.getLog().firstEntry(); + if (ret == null) return null; + return Pair.of(ret); + }); + } + + @Override + public Pair, LogRecord> takeOldest() { + _persistentData.get().assertRwLock(); + _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + + var ret = _persistentData.get().getData().getLog().firstEntry(); + if (ret != null) + _persistentData.get().mutate(new JMutator() { + @Override + public boolean mutate(JKleppmannTreePersistentData object) { + object.getLog().pollFirstEntry(); + return true; + } + + @Override + public void revert(JKleppmannTreePersistentData object) { + object.getLog().put(ret.getKey(), ret.getValue()); + } + }); + return Pair.of(ret); + } + + @Override + public Pair, LogRecord> peekNewest() { + return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + var ret = d.getLog().lastEntry(); + if (ret == null) return null; + return Pair.of(ret); + }); + } + + @Override + public List, LogRecord>> newestSlice(CombinedTimestamp since, boolean inclusive) { + return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + var tail = d.getLog().tailMap(since, inclusive); + return tail.entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); + }); + } + + @Override + public List, LogRecord>> getAll() { + return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + return d.getLog().entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); + }); + } + + @Override + public boolean isEmpty() { + return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + return d.getLog().isEmpty(); + }); + } + + @Override + public boolean containsKey(CombinedTimestamp timestamp) { + return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + return d.getLog().containsKey(timestamp); + }); + } + + @Override + public long size() { + return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + return (long) d.getLog().size(); + }); + } + + @Override + public void put(CombinedTimestamp timestamp, LogRecord record) { + _persistentData.get().assertRwLock(); + _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + if (_persistentData.get().getData().getLog().containsKey(timestamp)) + throw new IllegalStateException("Overwriting log entry?"); + _persistentData.get().mutate(new JMutator() { + @Override + public boolean mutate(JKleppmannTreePersistentData object) { + object.getLog().put(timestamp, record); + return true; + } + + @Override + public void revert(JKleppmannTreePersistentData object) { + object.getLog().remove(timestamp, record); + } + }); + } + + @Override + public void replace(CombinedTimestamp timestamp, LogRecord record) { + _persistentData.get().assertRwLock(); + _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + _persistentData.get().mutate(new JMutator() { + LogRecord old; + + @Override + public boolean mutate(JKleppmannTreePersistentData object) { + old = object.getLog().put(timestamp, record); + return !Objects.equals(old, record); + } + + @Override + public void revert(JKleppmannTreePersistentData object) { + if (old != null) + object.getLog().put(timestamp, old); + else + object.getLog().remove(timestamp, record); + } + }); + } + } + } + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java new file mode 100644 index 00000000..cd4b09c9 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java @@ -0,0 +1,71 @@ +package com.usatiuk.dhfs.objects.jkleppmanntree; + +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; +import com.usatiuk.dhfs.objects.jrepository.JObject; +import com.usatiuk.dhfs.objects.jrepository.JObjectManager; +import com.usatiuk.kleppmanntree.TreeNode; +import com.usatiuk.kleppmanntree.TreeNodeWrapper; + +import java.util.UUID; + +public class JKleppmannTreeNodeWrapper implements TreeNodeWrapper { + private final JObject _backing; + + public JKleppmannTreeNodeWrapper(JObject backing) {_backing = backing;} + + @Override + public void rLock() { + _backing.rLock(); + } + + @Override + public void rUnlock() { + _backing.rUnlock(); + } + + @Override + public void rwLock() { + _backing.rwLock(); + } + + @Override + public void rwUnlock() { + _backing.bumpVer(); // FIXME:? + _backing.rwUnlock(); + } + + @Override + public void freeze() { + _backing.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> { + m.freeze(); + return null; + }); + } + + @Override + public void unfreeze() { + _backing.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> { + m.unfreeze(); + return null; + }); + } + + @Override + public void notifyRef(String id) { + _backing.getMeta().addRef(id); + } + + @Override + public void notifyRmRef(String id) { + _backing.getMeta().removeRef(id); + } + + @Override + public TreeNode getNode() { + _backing.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); + if (_backing.getData() == null) + throw new IllegalStateException("Node " + _backing.getMeta().getName() + " data lost!"); + return _backing.getData().getNode(); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java new file mode 100644 index 00000000..4612f8fc --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java @@ -0,0 +1,30 @@ +package com.usatiuk.dhfs.objects.jkleppmanntree; + +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile; +import com.usatiuk.dhfs.objects.repository.opsupport.Op; +import com.usatiuk.kleppmanntree.OpMove; +import lombok.Getter; + +import java.util.Collection; +import java.util.List; +import java.util.UUID; + +// Wrapper to avoid having to specify generic types +public class JKleppmannTreeOpWrapper implements Op { + @Getter + private final OpMove _op; + + public JKleppmannTreeOpWrapper(OpMove op) { + if (op == null) throw new IllegalArgumentException("op shouldn't be null"); + _op = op; + } + + @Override + public Collection getEscapedRefs() { + if (_op.newMeta() instanceof JKleppmannTreeNodeMetaFile mf) { + return List.of(mf.getFileIno()); + } + return List.of(); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java new file mode 100644 index 00000000..39b5d484 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java @@ -0,0 +1,25 @@ +package com.usatiuk.dhfs.objects.jkleppmanntree; + +import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import com.usatiuk.kleppmanntree.PeerInterface; +import jakarta.inject.Inject; +import jakarta.inject.Singleton; + +import java.util.Collection; +import java.util.UUID; + +@Singleton +public class JKleppmannTreePeerInterface implements PeerInterface { + @Inject + PersistentPeerDataService persistentPeerDataService; + + @Override + public UUID getSelfId() { + return persistentPeerDataService.getSelfUuid(); + } + + @Override + public Collection getAllPeers() { + return persistentPeerDataService.getHostUuidsAndSelf(); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java new file mode 100644 index 00000000..3c84d067 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java @@ -0,0 +1,25 @@ +package com.usatiuk.dhfs.objects.jkleppmanntree; + +import com.usatiuk.dhfs.objects.repository.opsupport.Op; +import lombok.Getter; + +import java.util.Collection; +import java.util.List; +import java.util.UUID; + +public class JKleppmannTreePeriodicPushOp implements Op { + @Getter + private final UUID _from; + @Getter + private final long _timestamp; + + public JKleppmannTreePeriodicPushOp(UUID from, long timestamp) { + _from = from; + _timestamp = timestamp; + } + + @Override + public Collection getEscapedRefs() { + return List.of(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeLogEffectSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeLogEffectSerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeLogEffectSerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeLogEffectSerializer.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeNodeProtoSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeNodeProtoSerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeNodeProtoSerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeNodeProtoSerializer.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeOpProtoSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeOpProtoSerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeOpProtoSerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeOpProtoSerializer.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePeriodicPushOpProtoSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePeriodicPushOpProtoSerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePeriodicPushOpProtoSerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePeriodicPushOpProtoSerializer.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePersistentDataProtoSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePersistentDataProtoSerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePersistentDataProtoSerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePersistentDataProtoSerializer.java diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java new file mode 100644 index 00000000..0146da88 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java @@ -0,0 +1,45 @@ +package com.usatiuk.dhfs.objects.jkleppmanntree.structs; + +import com.usatiuk.dhfs.objects.jrepository.JObjectData; +import com.usatiuk.dhfs.objects.jrepository.OnlyLocal; +import com.usatiuk.dhfs.objects.repository.ConflictResolver; +import com.usatiuk.kleppmanntree.TreeNode; +import lombok.Getter; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.UUID; + +// FIXME: Ideally this is two classes? +@OnlyLocal +public class JKleppmannTreeNode extends JObjectData { + @Getter + final TreeNode _node; + + public JKleppmannTreeNode(TreeNode node) { + _node = node; + } + + @Override + public String getName() { + return _node.getId(); + } + + @Override + public Class getConflictResolver() { + return null; + } + + @Override + public Collection extractRefs() { + if (_node.getMeta() instanceof JKleppmannTreeNodeMetaFile) + return List.of(((JKleppmannTreeNodeMetaFile) _node.getMeta()).getFileIno()); + return Collections.unmodifiableCollection(_node.getChildren().values()); + } + + @Override + public Class getRefType() { + return JObjectData.class; + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java new file mode 100644 index 00000000..2ea7d27f --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java @@ -0,0 +1,31 @@ +package com.usatiuk.dhfs.objects.jkleppmanntree.structs; + +import com.usatiuk.autoprotomap.runtime.ProtoMirror; +import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaP; +import com.usatiuk.kleppmanntree.NodeMeta; +import lombok.Getter; + +import java.util.Objects; + +@ProtoMirror(JKleppmannTreeNodeMetaP.class) +public abstract class JKleppmannTreeNodeMeta implements NodeMeta { + @Getter + private final String _name; + + public JKleppmannTreeNodeMeta(String name) {_name = name;} + + public abstract JKleppmannTreeNodeMeta withName(String name); + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + JKleppmannTreeNodeMeta that = (JKleppmannTreeNodeMeta) o; + return Objects.equals(_name, that._name); + } + + @Override + public int hashCode() { + return Objects.hashCode(_name); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java new file mode 100644 index 00000000..79882017 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java @@ -0,0 +1,16 @@ +package com.usatiuk.dhfs.objects.jkleppmanntree.structs; + +import com.usatiuk.autoprotomap.runtime.ProtoMirror; +import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaDirectoryP; + +@ProtoMirror(JKleppmannTreeNodeMetaDirectoryP.class) +public class JKleppmannTreeNodeMetaDirectory extends JKleppmannTreeNodeMeta { + public JKleppmannTreeNodeMetaDirectory(String name) { + super(name); + } + + @Override + public JKleppmannTreeNodeMeta withName(String name) { + return new JKleppmannTreeNodeMetaDirectory(name); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java new file mode 100644 index 00000000..124cd51d --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java @@ -0,0 +1,37 @@ +package com.usatiuk.dhfs.objects.jkleppmanntree.structs; + +import com.usatiuk.autoprotomap.runtime.ProtoMirror; +import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaFileP; +import lombok.Getter; + +import java.util.Objects; + +@ProtoMirror(JKleppmannTreeNodeMetaFileP.class) +public class JKleppmannTreeNodeMetaFile extends JKleppmannTreeNodeMeta { + @Getter + private final String _fileIno; + + public JKleppmannTreeNodeMetaFile(String name, String fileIno) { + super(name); + _fileIno = fileIno; + } + + @Override + public JKleppmannTreeNodeMeta withName(String name) { + return new JKleppmannTreeNodeMetaFile(name, _fileIno); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + JKleppmannTreeNodeMetaFile that = (JKleppmannTreeNodeMetaFile) o; + return Objects.equals(_fileIno, that._fileIno); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), _fileIno); + } +} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java new file mode 100644 index 00000000..d6881d5b --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java @@ -0,0 +1,88 @@ +package com.usatiuk.dhfs.objects.jkleppmanntree.structs; + +import com.usatiuk.dhfs.objects.jrepository.JObjectData; +import com.usatiuk.dhfs.objects.jrepository.OnlyLocal; +import com.usatiuk.dhfs.objects.repository.ConflictResolver; +import com.usatiuk.kleppmanntree.AtomicClock; +import com.usatiuk.kleppmanntree.CombinedTimestamp; +import com.usatiuk.kleppmanntree.LogRecord; +import com.usatiuk.kleppmanntree.OpMove; +import lombok.Getter; + +import java.util.*; + +@OnlyLocal +public class JKleppmannTreePersistentData extends JObjectData { + private final String _treeName; + @Getter + private final AtomicClock _clock; + @Getter + private final HashMap, OpMove>> _queues; + @Getter + private final HashMap _peerTimestampLog; + @Getter + private final TreeMap, LogRecord> _log; + + public JKleppmannTreePersistentData(String treeName, AtomicClock clock, + HashMap, OpMove>> queues, + HashMap peerTimestampLog, TreeMap, LogRecord> log) { + _treeName = treeName; + _clock = clock; + _queues = queues; + _peerTimestampLog = peerTimestampLog; + _log = log; + } + + public JKleppmannTreePersistentData(String treeName) { + _treeName = treeName; + _clock = new AtomicClock(1); + _queues = new HashMap<>(); + _peerTimestampLog = new HashMap<>(); + _log = new TreeMap<>(); + } + + public static String nameFromTreeName(String treeName) { + return treeName + "_pd"; + } + + public void recordOp(UUID host, OpMove opMove) { + _queues.computeIfAbsent(host, h -> new TreeMap<>()); + _queues.get(host).put(opMove.timestamp(), opMove); + } + + public void removeOp(UUID host, OpMove opMove) { + _queues.get(host).remove(opMove.timestamp(), opMove); + } + + public void recordOp(Collection hosts, OpMove opMove) { + for (var u : hosts) { + recordOp(u, opMove); + } + } + + public void removeOp(Collection hosts, OpMove opMove) { + for (var u : hosts) { + removeOp(u, opMove); + } + } + + + @Override + public String getName() { + return nameFromTreeName(_treeName); + } + + public String getTreeName() { + return _treeName; + } + + @Override + public Class getConflictResolver() { + return null; + } + + @Override + public Collection extractRefs() { + return List.of(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/AssumedUnique.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/AssumedUnique.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/AssumedUnique.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/AssumedUnique.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/DeletedObjectAccessException.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/DeletedObjectAccessException.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/DeletedObjectAccessException.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/DeletedObjectAccessException.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JMutator.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JMutator.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JMutator.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JMutator.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObject.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObject.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObject.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObject.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectData.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectData.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectData.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectData.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectKey.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectKey.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectKey.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectKey.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectLRU.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectLRU.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectLRU.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectLRU.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManager.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManager.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManager.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManager.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManagerImpl.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManagerImpl.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManagerImpl.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManagerImpl.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectRefProcessor.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectRefProcessor.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectRefProcessor.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectRefProcessor.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectSnapshot.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectSnapshot.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectSnapshot.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectSnapshot.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectTxManager.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectTxManager.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectTxManager.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectTxManager.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/Leaf.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/Leaf.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/Leaf.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/Leaf.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadata.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadata.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadata.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadata.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadataSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadataSerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadataSerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadataSerializer.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/OnlyLocal.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/OnlyLocal.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/OnlyLocal.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/OnlyLocal.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/PushResolution.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/PushResolution.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/PushResolution.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/PushResolution.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObject.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObject.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObject.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObject.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObjectFactory.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObjectFactory.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObjectFactory.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObjectFactory.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxBundle.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxBundle.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxBundle.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxBundle.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWriteback.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWriteback.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWriteback.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWriteback.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWritebackImpl.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWritebackImpl.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWritebackImpl.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWritebackImpl.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/ConflictResolver.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/ConflictResolver.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/ConflictResolver.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/ConflictResolver.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHosts.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHosts.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHosts.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHosts.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcChannelFactory.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RpcChannelFactory.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcChannelFactory.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RpcChannelFactory.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeerState.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeerState.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeerState.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeerState.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersState.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersState.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersState.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersState.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersStateData.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersStateData.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersStateData.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersStateData.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/autosync/AutoSyncProcessor.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/autosync/AutoSyncProcessor.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/autosync/AutoSyncProcessor.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/autosync/AutoSyncProcessor.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueue.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueue.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueue.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueue.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/Op.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/Op.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/Op.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/Op.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObject.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObject.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObject.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObject.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObjectRegistry.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObjectRegistry.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObjectRegistry.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObjectRegistry.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpSender.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpSender.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpSender.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpSender.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryBroadcaster.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryBroadcaster.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryBroadcaster.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryBroadcaster.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryClient.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryClient.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryClient.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryClient.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectory.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectory.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectory.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectory.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryConflictResolver.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryConflictResolver.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryConflictResolver.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryConflictResolver.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocal.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocal.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocal.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocal.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocalSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocalSerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocalSerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocalSerializer.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectorySerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectorySerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectorySerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectorySerializer.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApi.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApi.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApi.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApi.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClient.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClient.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClient.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClient.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClientDynamic.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClientDynamic.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClientDynamic.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClientDynamic.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfo.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfo.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfo.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfo.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfoSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfoSerializer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfoSerializer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfoSerializer.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerRolesAugmentor.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerRolesAugmentor.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerRolesAugmentor.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerRolesAugmentor.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustManager.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustManager.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustManager.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustManager.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustServerCustomizer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustServerCustomizer.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustServerCustomizer.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustServerCustomizer.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/FileObjectPersistentStore.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/FileObjectPersistentStore.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/FileObjectPersistentStore.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/FileObjectPersistentStore.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/ObjectPersistentStore.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/ObjectPersistentStore.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/ObjectPersistentStore.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/ObjectPersistentStore.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/TxManifest.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/TxManifest.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/TxManifest.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/TxManifest.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/AvailablePeerInfo.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/AvailablePeerInfo.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/AvailablePeerInfo.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/AvailablePeerInfo.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerDelete.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerDelete.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerDelete.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerDelete.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerInfo.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerInfo.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerInfo.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerInfo.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerPut.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerPut.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerPut.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerPut.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java similarity index 100% rename from dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java rename to dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/webui/WebUiRouter.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/webui/WebUiRouter.java new file mode 100644 index 00000000..2f285c42 --- /dev/null +++ b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/webui/WebUiRouter.java @@ -0,0 +1,54 @@ +package com.usatiuk.dhfs.webui; + +import io.quarkus.runtime.StartupEvent; +import io.vertx.core.http.HttpServerRequest; +import io.vertx.ext.web.Router; +import io.vertx.ext.web.RoutingContext; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Optional; + +@ApplicationScoped +public class WebUiRouter { + + @ConfigProperty(name = "dhfs.webui.root") + Optional root; + + void installRoute(@Observes StartupEvent startupEvent, Router router) { + root.ifPresent(r -> { + router.route().path("/").handler(ctx -> ctx.redirect("/webui")); + router.route() + .path("/webui/*") + .handler(this::handle); + }); + } + + public void handle(RoutingContext event) { + var indexHtml = Paths.get(root.orElseThrow(() -> new IllegalStateException("Web ui root not set but handler called")), "index.html").toString(); + + HttpServerRequest request = event.request(); + String requestedPath = Path.of(event.currentRoute().getPath()).relativize(Path.of(event.normalizedPath())).toString(); + + if ("/".equals(requestedPath)) { + request.response().sendFile(indexHtml); + return; + } + + Path requested = Paths.get(root.get(), requestedPath); + if (!requested.normalize().startsWith(Paths.get(root.get()))) { + request.response().setStatusCode(404).end(); + return; + } + + event.vertx().fileSystem().lprops(requested.toString(), exists -> { + if (exists.succeeded() && exists.result().isRegularFile()) + request.response().sendFile(requested.toString()); + else + request.response().sendFile(indexHtml); + }); + } +} diff --git a/dhfs-parent/server-old/src/main/proto/dhfs_objects_peer_discovery.proto b/dhfs-parent/server-old/src/main/proto/dhfs_objects_peer_discovery.proto new file mode 100644 index 00000000..a1bc1866 --- /dev/null +++ b/dhfs-parent/server-old/src/main/proto/dhfs_objects_peer_discovery.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "com.usatiuk.dhfs.objects.repository.peerdiscovery"; +option java_outer_classname = "DhfsObjectPeerDiscoveryApi"; + +package dhfs.objects.peerdiscovery; + +message PeerDiscoveryInfo { + string uuid = 1; + uint32 port = 2; + uint32 securePort = 3; +} diff --git a/dhfs-parent/server-old/src/main/proto/dhfs_objects_serial.proto b/dhfs-parent/server-old/src/main/proto/dhfs_objects_serial.proto new file mode 100644 index 00000000..0f93fdd5 --- /dev/null +++ b/dhfs-parent/server-old/src/main/proto/dhfs_objects_serial.proto @@ -0,0 +1,155 @@ +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "com.usatiuk.dhfs.objects.persistence"; +option java_outer_classname = "DhfsObjectPersistence"; + +package dhfs.objects.persistence; + +message ObjectMetadataP { + string name = 1; + map remoteCopies = 2; + string knownClass = 3; + bool seen = 4; + bool deleted = 5; + repeated string confirmedDeletes = 6; + repeated string referrers = 7; + map changelog = 8; + repeated string savedRefs = 9; + bool frozen = 10; + bool haveLocalCopy = 11; +} + +message FsNodeP { + string uuid = 1; + int64 mode = 2; + int64 ctime = 3; + int64 mtime = 4; +} + +message FilePChunksEntry { + int64 start = 1; + string id = 2; +} + +message FileP { + FsNodeP fsNode = 1; + repeated FilePChunksEntry chunks = 2; + bool symlink = 3; + int64 size = 4; +} + +message DirectoryP { + FsNodeP fsNode = 1; + map children = 2; +} + +message ChunkDataP { + string name = 1; + bytes data = 2; +} + +message PeerDirectoryP { + repeated string peers = 1; +} + +message PersistentPeerInfoP { + string uuid = 1; + bytes cert = 2; +} + +message JKleppmannTreeNodeMetaFileP { + string name = 1; + string fileIno = 2; +} + +message JKleppmannTreeNodeMetaDirectoryP { + string name = 1; +} + +message JKleppmannTreeNodeMetaP { + oneof meta { + JKleppmannTreeNodeMetaFileP jKleppmannTreeNodeMetaFile = 1; + JKleppmannTreeNodeMetaDirectoryP jKleppmannTreeNodeMetaDirectory = 2; + } +} + +message JKleppmannTreeOpP { + int64 timestamp = 1; + string peer = 2; + string newParentId = 3; + JKleppmannTreeNodeMetaP meta = 4; + string child = 5; +} + +message JKleppmannTreeNodePChildrenEntry { + string key = 1; + string value = 2; +} + +message JKleppmannTreeNodeP { + optional string parent = 1; + string id = 2; + repeated JKleppmannTreeNodePChildrenEntry children = 3; + optional JKleppmannTreeNodeMetaP meta = 4; + optional JKleppmannTreeOpP lastEffectiveOp = 5; +} + +message JKleppmannTreePersistentDataPQueueEntry { + int64 clock = 1; + string uuid = 2; + JKleppmannTreeOpP op = 3; +} + +message JKleppmannTreePersistentDataPQueue { + string node = 1; + repeated JKleppmannTreePersistentDataPQueueEntry entries = 2; +} + +message JKleppmannTreePersistentDataPTimestampEntry { + string host = 1; + int64 timestamp = 2; +} + +message JKleppmannTreeOpLogEffectP { + optional JKleppmannTreeOpP oldEffectiveMove = 1; + optional string oldParent = 2; + optional JKleppmannTreeNodeMetaP oldMeta = 3; + JKleppmannTreeOpP effectiveOp = 4; + string newParentId = 5; + JKleppmannTreeNodeMetaP newMeta = 6; + string selfId = 7; +} + +message JKleppmannTreeOpLogPEntry { + int64 clock = 1; + string uuid = 2; + JKleppmannTreeOpP op = 3; + repeated JKleppmannTreeOpLogEffectP effects = 4; +} + +message JKleppmannTreePersistentDataP { + string treeName = 1; + int64 clock = 2; + repeated JKleppmannTreePersistentDataPQueue queues = 3; + repeated JKleppmannTreePersistentDataPTimestampEntry peerLog = 4; + repeated JKleppmannTreeOpLogPEntry opLog = 5; +} + +message PeerDirectoryLocalP { + repeated string initialOpSyncDonePeers = 1; + repeated string initialObjSyncDonePeers = 2; +} + +message JObjectDataP { + oneof obj { + FileP file = 2; + DirectoryP directory = 3; + ChunkDataP chunkData = 5; + PeerDirectoryP peerDirectory = 6; + PersistentPeerInfoP persistentPeerInfo = 7; + JKleppmannTreeNodeP jKleppmannTreeNode = 8; + JKleppmannTreePersistentDataP jKleppmannTreePersistentData = 9; + PeerDirectoryLocalP peerDirectoryLocal = 10; + } +} \ No newline at end of file diff --git a/dhfs-parent/server-old/src/main/proto/dhfs_objects_sync.proto b/dhfs-parent/server-old/src/main/proto/dhfs_objects_sync.proto new file mode 100644 index 00000000..8ef94946 --- /dev/null +++ b/dhfs-parent/server-old/src/main/proto/dhfs_objects_sync.proto @@ -0,0 +1,102 @@ +syntax = "proto3"; + +import "dhfs_objects_serial.proto"; + +option java_multiple_files = true; +option java_package = "com.usatiuk.dhfs.objects.repository"; +option java_outer_classname = "DhfsObjectSyncApi"; + +package dhfs.objects.sync; + +service DhfsObjectSyncGrpc { + rpc GetObject (GetObjectRequest) returns (GetObjectReply) {} + rpc CanDelete (CanDeleteRequest) returns (CanDeleteReply) {} + rpc IndexUpdate (IndexUpdatePush) returns (IndexUpdateReply) {} + rpc OpPush (OpPushMsg) returns (OpPushReply) {} + + rpc Ping (PingRequest) returns (PingReply) {} +} + +message PingRequest { + string selfUuid = 1; +} + +message PingReply { + string selfUuid = 1; +} + +message ObjectChangelogEntry { + string host = 1; + uint64 version = 2; +} + +message ObjectChangelog { + repeated ObjectChangelogEntry entries = 1; +} + +message ObjectHeader { + string name = 2; + ObjectChangelog changelog = 5; + optional dhfs.objects.persistence.JObjectDataP pushedData = 6; +} + +message ApiObject { + ObjectHeader header = 1; + dhfs.objects.persistence.JObjectDataP content = 2; +} + +message GetObjectRequest { + string selfUuid = 10; + + string name = 2; +} + +message GetObjectReply { + string selfUuid = 10; + + ApiObject object = 1; +} + +message CanDeleteRequest { + string selfUuid = 10; + + string name = 2; + repeated string ourReferrers = 3; +} + +message CanDeleteReply { + string selfUuid = 10; + string objName = 1; + bool deletionCandidate = 2; + repeated string referrers = 3; +} + +message IndexUpdatePush { + string selfUuid = 10; + + ObjectHeader header = 1; +} + +message IndexUpdateReply {} + +message JKleppmannTreePeriodicPushOpP { + string fromUuid = 1; + int64 timestamp = 2; +} + +message OpPushPayload { + oneof payload { + dhfs.objects.persistence.JKleppmannTreeOpP jKleppmannTreeOpWrapper = 1; + JKleppmannTreePeriodicPushOpP jKleppmannTreePeriodicPushOp = 2; + } +} + +message OpPushMsg { + string selfUuid = 10; + string queueId = 1; + repeated OpPushPayload msg = 2; +} + +message OpPushReply { + +} \ No newline at end of file diff --git a/dhfs-parent/server-old/src/main/resources/application.properties b/dhfs-parent/server-old/src/main/resources/application.properties new file mode 100644 index 00000000..8309619c --- /dev/null +++ b/dhfs-parent/server-old/src/main/resources/application.properties @@ -0,0 +1,46 @@ +quarkus.grpc.server.use-separate-server=false +dhfs.objects.persistence.files.root=${HOME}/dhfs_default/data/objs +dhfs.objects.root=${HOME}/dhfs_default/data/stuff +dhfs.objects.peerdiscovery.port=42069 +dhfs.objects.peerdiscovery.interval=5000 +dhfs.objects.sync.timeout=30 +dhfs.objects.sync.ping.timeout=5 +dhfs.objects.invalidation.threads=4 +dhfs.objects.invalidation.delay=1000 +dhfs.objects.reconnect_interval=5s +dhfs.objects.write_log=false +dhfs.objects.periodic-push-op-interval=5m +dhfs.fuse.root=${HOME}/dhfs_default/fuse +dhfs.fuse.debug=false +dhfs.fuse.enabled=true +dhfs.files.allow_recursive_delete=false +dhfs.files.target_chunk_size=2097152 +# Writes strictly smaller than this will try to merge with blocks nearby +dhfs.files.write_merge_threshold=0.8 +# If a merge would result in a block of greater size than this, stop merging +dhfs.files.write_merge_limit=1.2 +# Don't take blocks of this size and above when merging +dhfs.files.write_merge_max_chunk_to_take=1 +dhfs.files.write_last_chunk_limit=1.5 +dhfs.objects.writeback.delay=100 +dhfs.objects.writeback.limit=134217728 +dhfs.objects.lru.limit=134217728 +dhfs.objects.lru.print-stats=false +dhfs.objects.writeback.watermark-high=0.6 +dhfs.objects.writeback.watermark-low=0.4 +dhfs.objects.writeback.threads=4 +dhfs.objects.deletion.delay=1000 +dhfs.objects.deletion.can-delete-retry-delay=10000 +dhfs.objects.ref_verification=true +dhfs.files.use_hash_for_chunks=false +dhfs.objects.autosync.threads=2 +dhfs.objects.autosync.download-all=false +dhfs.objects.move-processor.threads=4 +dhfs.objects.ref-processor.threads=4 +dhfs.objects.opsender.batch-size=100 +dhfs.objects.lock_timeout_secs=15 +dhfs.local-discovery=true +quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE +quarkus.log.category."com.usatiuk.dhfs".level=TRACE +quarkus.http.insecure-requests=enabled +quarkus.http.ssl.client-auth=required diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/TempDataProfile.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/TempDataProfile.java new file mode 100644 index 00000000..03f74be5 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/TempDataProfile.java @@ -0,0 +1,29 @@ +package com.usatiuk.dhfs; + +import io.quarkus.test.junit.QuarkusTestProfile; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Map; + +abstract public class TempDataProfile implements QuarkusTestProfile { + protected void getConfigOverrides(Map toPut) {} + + @Override + final public Map getConfigOverrides() { + Path tempDirWithPrefix; + try { + tempDirWithPrefix = Files.createTempDirectory("dhfs-test"); + } catch (IOException e) { + throw new RuntimeException(e); + } + var ret = new HashMap(); + ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString()); + ret.put("dhfs.objects.root", tempDirWithPrefix.resolve("dhfs_root_d_test").toString()); + ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString()); + getConfigOverrides(ret); + return ret; + } +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/TestDataCleaner.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/TestDataCleaner.java new file mode 100644 index 00000000..2a6979a6 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/TestDataCleaner.java @@ -0,0 +1,44 @@ +package com.usatiuk.dhfs; + +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.util.Objects; + +@ApplicationScoped +public class TestDataCleaner { + @ConfigProperty(name = "dhfs.objects.persistence.files.root") + String tempDirectory; + @ConfigProperty(name = "dhfs.objects.root") + String tempDirectoryIdx; + + void init(@Observes @Priority(1) StartupEvent event) throws IOException { + try { + purgeDirectory(Path.of(tempDirectory).toFile()); + purgeDirectory(Path.of(tempDirectoryIdx).toFile()); + } catch (Exception ignored) { + Log.warn("Couldn't cleanup test data on init"); + } + } + + void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException { + purgeDirectory(Path.of(tempDirectory).toFile()); + purgeDirectory(Path.of(tempDirectoryIdx).toFile()); + } + + void purgeDirectory(File dir) { + for (File file : Objects.requireNonNull(dir.listFiles())) { + if (file.isDirectory()) + purgeDirectory(file); + file.delete(); + } + } +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/benchmarks/Benchmarker.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/benchmarks/Benchmarker.java new file mode 100644 index 00000000..86ad0fb3 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/benchmarks/Benchmarker.java @@ -0,0 +1,83 @@ +package com.usatiuk.dhfs.benchmarks; + +import io.quarkus.logging.Log; +import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics; + +import java.util.Arrays; +import java.util.function.Supplier; + +public class Benchmarker { + static long[] runLatency(Supplier fn, int iterations) { + var out = new long[iterations]; + + int hash = 1; + + for (int i = 0; i < iterations; i++) { + long startNanos = System.nanoTime(); + var cur = fn.get(); + long stopNanos = System.nanoTime(); + out[i] = stopNanos - startNanos; + hash = hash * 31 + cur.hashCode(); + } + + System.out.println("\nHash: " + hash); + + return out; + } + + static long[] runThroughput(Supplier fn, int iterations, long iterationTime) { + var out = new long[iterations]; + + int hash = 1; + + for (int i = 0; i < iterations; i++) { + long startMillis = System.currentTimeMillis(); + long count = 0; + // FIXME: That's probably janky + while (System.currentTimeMillis() - startMillis < iterationTime) { + var res = fn.get(); + count++; + hash = hash * 31 + res.hashCode(); + } + System.out.println("Ran iteration " + i + "/" + iterations + " count=" + count); + out[i] = count; + } + + System.out.println("\nHash: " + hash); + + return out; + } + + static void printStats(double[] data, String unit) { + DescriptiveStatistics stats = new DescriptiveStatistics(); + for (var r : data) { + stats.addValue(r); + } + Log.info("\n" + stats + + "\n 50%: " + stats.getPercentile(50) + " " + unit + + "\n 90%: " + stats.getPercentile(90) + " " + unit + + "\n 95%: " + stats.getPercentile(95) + " " + unit + + "\n 99%: " + stats.getPercentile(99) + " " + unit + + "\n 99.9%: " + stats.getPercentile(99.9) + " " + unit + + "\n 99.99%: " + stats.getPercentile(99.99) + " " + unit + ); + + } + + static void runAndPrintMixSimple(String name, Supplier fn, int latencyIterations, int thrptIterations, int thrptIterationTime, int warmupIterations, int warmupIterationTime) { + System.out.println("\n=========\n" + "Running " + name + "\n=========\n"); + System.out.println("==Warmup=="); + runThroughput(fn, warmupIterations, warmupIterationTime); + System.out.println("==Warmup done=="); + System.out.println("==Throughput=="); + var thrpt = runThroughput(fn, thrptIterations, thrptIterationTime); + printStats(Arrays.stream(thrpt).mapToDouble(o -> (double) o / 1000).toArray(), "ops/s"); + System.out.println("==Throughput done=="); + System.out.println("==Latency=="); + var lat = runLatency(fn, latencyIterations); + printStats(Arrays.stream(lat).mapToDouble(o -> (double) o).toArray(), "ns/op"); + System.out.println("==Latency done=="); + System.out.println("\n=========\n" + name + " done" + "\n=========\n"); + } + +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java new file mode 100644 index 00000000..96acf3f5 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java @@ -0,0 +1,52 @@ +package com.usatiuk.dhfs.benchmarks; + +import com.google.protobuf.UnsafeByteOperations; +import com.usatiuk.dhfs.TempDataProfile; +import com.usatiuk.dhfs.files.service.DhfsFileService; +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; +import jakarta.inject.Inject; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +import java.nio.ByteBuffer; +import java.util.Map; + +class Profiles { + public static class DhfsFuseTestProfile extends TempDataProfile { + @Override + protected void getConfigOverrides(Map ret) { + ret.put("quarkus.log.category.\"com.usatiuk.dhfs\".level", "INFO"); + ret.put("dhfs.fuse.enabled", "false"); + ret.put("dhfs.objects.ref_verification", "false"); + } + } +} + +@QuarkusTest +@TestProfile(Profiles.DhfsFuseTestProfile.class) +public class DhfsFileBenchmarkTest { + @Inject + DhfsFileService dhfsFileService; + + @Test + @Disabled + void openRootTest() { + Benchmarker.runAndPrintMixSimple("dhfsFileService.open(\"\")", + () -> { + return dhfsFileService.open(""); + }, 1_000_000, 5, 1000, 5, 1000); + } + + @Test + @Disabled + void writeMbTest() { + String file = dhfsFileService.create("/writeMbTest", 0777).get(); + var bb = ByteBuffer.allocateDirect(1024 * 1024); + Benchmarker.runAndPrintMixSimple("dhfsFileService.write(\"\")", + () -> { + var thing = UnsafeByteOperations.unsafeWrap(bb); + return dhfsFileService.write(file, dhfsFileService.size(file), thing); + }, 1_000, 10, 100, 1, 100); + } +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTest.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTest.java new file mode 100644 index 00000000..93cc42b8 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTest.java @@ -0,0 +1,9 @@ +package com.usatiuk.dhfs.files; + +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; + +@QuarkusTest +@TestProfile(Profiles.DhfsFileServiceSimpleTestProfile.class) +public class DhfsFileServiceSimpleTest extends DhfsFileServiceSimpleTestImpl { +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java new file mode 100644 index 00000000..8bea5c7e --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java @@ -0,0 +1,288 @@ +package com.usatiuk.dhfs.files; + +import com.google.protobuf.ByteString; +import com.usatiuk.dhfs.TempDataProfile; +import com.usatiuk.dhfs.files.objects.ChunkData; +import com.usatiuk.dhfs.files.objects.File; +import com.usatiuk.dhfs.files.service.DhfsFileService; +import com.usatiuk.dhfs.objects.jrepository.DeletedObjectAccessException; +import com.usatiuk.dhfs.objects.jrepository.JObjectManager; +import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; +import com.usatiuk.kleppmanntree.AlreadyExistsException; +import jakarta.inject.Inject; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.util.Map; +import java.util.Optional; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.awaitility.Awaitility.await; + +class Profiles { + public static class DhfsFileServiceSimpleTestProfile extends TempDataProfile { + @Override + protected void getConfigOverrides(Map ret) { + ret.put("dhfs.fuse.enabled", "false"); + } + } + + public static class DhfsFileServiceSimpleTestProfileNoChunking extends TempDataProfile { + @Override + protected void getConfigOverrides(Map ret) { + ret.put("dhfs.fuse.enabled", "false"); + ret.put("dhfs.files.target_chunk_size", "-1"); + } + } + + public static class DhfsFileServiceSimpleTestProfileSmallChunking extends TempDataProfile { + @Override + protected void getConfigOverrides(Map ret) { + ret.put("dhfs.fuse.enabled", "false"); + ret.put("dhfs.files.target_chunk_size", "3"); + } + } +} + +public class DhfsFileServiceSimpleTestImpl { + @Inject + DhfsFileService fileService; + @Inject + JObjectManager jObjectManager; + @Inject + JObjectTxManager jObjectTxManager; + + @Test + void readTest() { + var fuuid = UUID.randomUUID(); + { + ChunkData c1 = new ChunkData(ByteString.copyFrom("12345".getBytes())); + ChunkData c2 = new ChunkData(ByteString.copyFrom("678".getBytes())); + ChunkData c3 = new ChunkData(ByteString.copyFrom("91011".getBytes())); + File f = new File(fuuid, 777, false); + f.getChunks().put(0L, c1.getName()); + f.getChunks().put((long) c1.getBytes().size(), c2.getName()); + f.getChunks().put((long) c1.getBytes().size() + c2.getBytes().size(), c3.getName()); + + // FIXME: dhfs_files + + var c1o = new AtomicReference(); + var c2o = new AtomicReference(); + var c3o = new AtomicReference(); + var fo = new AtomicReference(); + + jObjectTxManager.executeTx(() -> { + c1o.set(jObjectManager.put(c1, Optional.of(f.getName())).getMeta().getName()); + c2o.set(jObjectManager.put(c2, Optional.of(f.getName())).getMeta().getName()); + c3o.set(jObjectManager.put(c3, Optional.of(f.getName())).getMeta().getName()); + fo.set(jObjectManager.put(f, Optional.empty()).getMeta().getName()); + }); + + var all = jObjectManager.findAll(); + Assertions.assertTrue(all.contains(c1o.get())); + Assertions.assertTrue(all.contains(c2o.get())); + Assertions.assertTrue(all.contains(c3o.get())); + Assertions.assertTrue(all.contains(fo.get())); + } + + String all = "1234567891011"; + + { + for (int start = 0; start < all.length(); start++) { + for (int end = start; end <= all.length(); end++) { + var read = fileService.read(fuuid.toString(), start, end - start); + Assertions.assertArrayEquals(all.substring(start, end).getBytes(), read.get().toByteArray()); + } + } + } + } + + @Test + void dontMkdirTwiceTest() { + Assertions.assertDoesNotThrow(() -> fileService.mkdir("/dontMkdirTwiceTest", 777)); + Assertions.assertThrows(AlreadyExistsException.class, () -> fileService.mkdir("/dontMkdirTwiceTest", 777)); + } + + @Test + void writeTest() { + var ret = fileService.create("/writeTest", 777); + Assertions.assertTrue(ret.isPresent()); + + var uuid = ret.get(); + + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + fileService.write(uuid, 4, new byte[]{10, 11, 12}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + fileService.write(uuid, 10, new byte[]{13, 14}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray()); + fileService.write(uuid, 6, new byte[]{15, 16}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray()); + fileService.write(uuid, 3, new byte[]{17, 18}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 17, 18, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray()); + } + + @Test + void removeTest() { + var ret = fileService.create("/removeTest", 777); + Assertions.assertTrue(ret.isPresent()); + + var uuid = ret.get(); + + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + + fileService.unlink("/removeTest"); + Assertions.assertFalse(fileService.open("/removeTest").isPresent()); + } + + @Test + void truncateTest1() { + var ret = fileService.create("/truncateTest1", 777); + Assertions.assertTrue(ret.isPresent()); + + var uuid = ret.get(); + + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + + fileService.truncate(uuid, 20); + fileService.write(uuid, 5, new byte[]{10, 11, 12, 13, 14, 15, 16, 17}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray()); + } + + @Test + void truncateTest2() { + var ret = fileService.create("/truncateTest2", 777); + Assertions.assertTrue(ret.isPresent()); + + var uuid = ret.get(); + + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + + fileService.truncate(uuid, 20); + fileService.write(uuid, 10, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 20}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, fileService.read(uuid, 0, 20).get().toByteArray()); + } + + @Test + void truncateTest3() { + var ret = fileService.create("/truncateTest3", 777); + Assertions.assertTrue(ret.isPresent()); + + var uuid = ret.get(); + + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + + fileService.truncate(uuid, 7); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6,}, fileService.read(uuid, 0, 20).get().toByteArray()); + } + + @Test + void moveTest() { + var ret = fileService.create("/moveTest", 777); + Assertions.assertTrue(ret.isPresent()); + var uuid = ret.get(); + + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + + Assertions.assertTrue(fileService.rename("/moveTest", "/movedTest")); + Assertions.assertFalse(fileService.open("/moveTest").isPresent()); + Assertions.assertTrue(fileService.open("/movedTest").isPresent()); + + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + fileService.read(fileService.open("/movedTest").get(), 0, 10).get().toByteArray()); + } + + @Test + void moveOverTest() throws InterruptedException { + var ret = fileService.create("/moveOverTest1", 777); + Assertions.assertTrue(ret.isPresent()); + var uuid = ret.get(); + var ret2 = fileService.create("/moveOverTest2", 777); + Assertions.assertTrue(ret2.isPresent()); + var uuid2 = ret2.get(); + + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + fileService.write(uuid2, 0, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29}); + Assertions.assertArrayEquals(new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29}, fileService.read(uuid2, 0, 10).get().toByteArray()); + + var oldfile = jObjectManager.get(ret2.get()).orElseThrow(IllegalStateException::new); + var chunk = oldfile.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.extractRefs()).stream().toList().get(0); + var chunkObj = jObjectManager.get(chunk).orElseThrow(IllegalStateException::new); + + Assertions.assertTrue(fileService.rename("/moveOverTest1", "/moveOverTest2")); + Assertions.assertFalse(fileService.open("/moveOverTest1").isPresent()); + Assertions.assertTrue(fileService.open("/moveOverTest2").isPresent()); + + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + fileService.read(fileService.open("/moveOverTest2").get(), 0, 10).get().toByteArray()); + + await().atMost(5, TimeUnit.SECONDS).until(() -> { + try { + return chunkObj.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, + (m, d) -> !m.getReferrers().contains(uuid)); + } catch (DeletedObjectAccessException ignored) { + return true; + } + }); + } + + @Test + void readOverSizeTest() { + var ret = fileService.create("/readOverSizeTest", 777); + Assertions.assertTrue(ret.isPresent()); + var uuid = ret.get(); + + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + Assertions.assertArrayEquals(new byte[]{}, fileService.read(uuid, 20, 10).get().toByteArray()); + } + + @Test + void writeOverSizeTest() { + var ret = fileService.create("/writeOverSizeTest", 777); + Assertions.assertTrue(ret.isPresent()); + var uuid = ret.get(); + + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + fileService.write(uuid, 20, new byte[]{10, 11, 12, 13, 14, 15, 16, 17, 18, 19}); + Assertions.assertArrayEquals(new byte[]{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 + }, fileService.read(uuid, 0, 30).get().toByteArray()); + } + + @Test + void moveTest2() throws InterruptedException { + var ret = fileService.create("/moveTest2", 777); + Assertions.assertTrue(ret.isPresent()); + var uuid = ret.get(); + + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + + var oldfile = jObjectManager.get(uuid).orElseThrow(IllegalStateException::new); + var chunk = oldfile.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.extractRefs()).stream().toList().get(0); + var chunkObj = jObjectManager.get(chunk).orElseThrow(IllegalStateException::new); + + chunkObj.runReadLockedVoid(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { + Assertions.assertTrue(m.getReferrers().contains(uuid)); + }); + + Assertions.assertTrue(fileService.rename("/moveTest2", "/movedTest2")); + Assertions.assertFalse(fileService.open("/moveTest2").isPresent()); + Assertions.assertTrue(fileService.open("/movedTest2").isPresent()); + + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + fileService.read(fileService.open("/movedTest2").get(), 0, 10).get().toByteArray()); + } +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestNoChunkingTest.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestNoChunkingTest.java new file mode 100644 index 00000000..5aab68e4 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestNoChunkingTest.java @@ -0,0 +1,9 @@ +package com.usatiuk.dhfs.files; + +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; + +@QuarkusTest +@TestProfile(Profiles.DhfsFileServiceSimpleTestProfileNoChunking.class) +public class DhfsFileServiceSimpleTestNoChunkingTest extends DhfsFileServiceSimpleTestImpl { +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestSmallChunkingTest.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestSmallChunkingTest.java new file mode 100644 index 00000000..2d9fdd78 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestSmallChunkingTest.java @@ -0,0 +1,9 @@ +package com.usatiuk.dhfs.files; + +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; + +@QuarkusTest +@TestProfile(Profiles.DhfsFileServiceSimpleTestProfileSmallChunking.class) +public class DhfsFileServiceSimpleTestSmallChunkingTest extends DhfsFileServiceSimpleTestImpl { +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/fuse/DhfsFuseTest.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/fuse/DhfsFuseTest.java new file mode 100644 index 00000000..df800321 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/fuse/DhfsFuseTest.java @@ -0,0 +1,77 @@ +package com.usatiuk.dhfs.fuse; + +import com.usatiuk.dhfs.TempDataProfile; +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; +import org.eclipse.microprofile.config.inject.ConfigProperty; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; + +class Profiles { + public static class DhfsFuseTestProfile extends TempDataProfile { + } +} + +@QuarkusTest +@TestProfile(Profiles.DhfsFuseTestProfile.class) +public class DhfsFuseTest { + @ConfigProperty(name = "dhfs.fuse.root") + String root; + + @Test + void readWriteFileTest() throws IOException, InterruptedException { + byte[] testString = "test file thing".getBytes(); + Path testPath = Path.of(root).resolve("test1"); + + Assertions.assertDoesNotThrow(() -> Files.createFile(testPath)); + Assertions.assertDoesNotThrow(() -> Files.write(testPath, testString)); + Assertions.assertDoesNotThrow(() -> Files.readAllBytes(testPath)); + Assertions.assertArrayEquals(Files.readAllBytes(testPath), testString); + } + + @Test + void symlinkTest() throws IOException, InterruptedException { + byte[] testString = "symlinkedfile".getBytes(); + Path testPath = Path.of(root).resolve("symlinktarget"); + Path testSymlink = Path.of(root).resolve("symlinktest"); + + Assertions.assertDoesNotThrow(() -> Files.createFile(testPath)); + Assertions.assertDoesNotThrow(() -> Files.write(testPath, testString)); + Assertions.assertDoesNotThrow(() -> Files.readAllBytes(testPath)); + Assertions.assertArrayEquals(Files.readAllBytes(testPath), testString); + + Assertions.assertDoesNotThrow(() -> Files.createSymbolicLink(testSymlink, testPath)); + Assertions.assertTrue(() -> Files.isSymbolicLink(testSymlink)); + Assertions.assertEquals(testPath, Files.readSymbolicLink(testSymlink)); + Assertions.assertDoesNotThrow(() -> Files.readAllBytes(testSymlink)); + Assertions.assertArrayEquals(Files.readAllBytes(testSymlink), testString); + } + + @Test + void dontRemoveEmptyDirTest() throws IOException { + byte[] testString = "dontRemoveEmptyDirTestStr".getBytes(); + Path testDir = Path.of(root).resolve("dontRemoveEmptyDirTestDir"); + Path testFile = testDir.resolve("dontRemoveEmptyDirTestFile"); + + Assertions.assertDoesNotThrow(() -> Files.createDirectory(testDir)); + Assertions.assertDoesNotThrow(() -> Files.createFile(testFile)); + Assertions.assertDoesNotThrow(() -> Files.write(testFile, testString)); + Assertions.assertDoesNotThrow(() -> Files.readAllBytes(testFile)); + Assertions.assertArrayEquals(Files.readAllBytes(testFile), testString); + + Assertions.assertThrows(Exception.class, () -> Files.delete(testDir)); + Assertions.assertDoesNotThrow(() -> Files.readAllBytes(testFile)); + Assertions.assertArrayEquals(Files.readAllBytes(testFile), testString); + + Assertions.assertDoesNotThrow(() -> Files.delete(testFile)); + Assertions.assertDoesNotThrow(() -> Files.delete(testDir)); + Assertions.assertFalse(Files.exists(testDir)); + Assertions.assertFalse(Files.exists(testFile)); + Assertions.assertThrows(Exception.class, () -> Files.readAllBytes(testFile)); + } + +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsFuseIT.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsFuseIT.java new file mode 100644 index 00000000..b9d9f92d --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsFuseIT.java @@ -0,0 +1,352 @@ +package com.usatiuk.dhfs.integration; + +import com.github.dockerjava.api.model.Device; +import io.quarkus.logging.Log; +import org.apache.commons.lang3.tuple.Pair; +import org.junit.jupiter.api.*; +import org.slf4j.LoggerFactory; +import org.testcontainers.DockerClientFactory; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.Network; +import org.testcontainers.containers.output.Slf4jLogConsumer; +import org.testcontainers.containers.output.WaitingConsumer; +import org.testcontainers.containers.wait.strategy.Wait; + +import java.io.IOException; +import java.time.Duration; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Stream; + +import static org.awaitility.Awaitility.await; + +public class DhfsFuseIT { + GenericContainer container1; + GenericContainer container2; + + WaitingConsumer waitingConsumer1; + WaitingConsumer waitingConsumer2; + + String c1uuid; + String c2uuid; + + @BeforeEach + void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException { + Network network = Network.newNetwork(); + container1 = new GenericContainer<>(DhfsImage.getInstance()) + .withPrivilegedMode(true) + .withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse"))) + .waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network); + container2 = new GenericContainer<>(DhfsImage.getInstance()) + .withPrivilegedMode(true) + .withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse"))) + .waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network); + + Stream.of(container1, container2).parallel().forEach(GenericContainer::start); + + waitingConsumer1 = new WaitingConsumer(); + var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFuseIT.class)).withPrefix("1-" + testInfo.getDisplayName()); + container1.followOutput(loggingConsumer1.andThen(waitingConsumer1)); + waitingConsumer2 = new WaitingConsumer(); + var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFuseIT.class)).withPrefix("2-" + testInfo.getDisplayName()); + container2.followOutput(loggingConsumer2.andThen(waitingConsumer2)); + + c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); + c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); + + Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid)); + Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid)); + + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); + + var c1curl = container1.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c2uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + var c2curl = container2.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c1uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + } + + @AfterEach + void stop() { + Stream.of(container1, container2).parallel().forEach(GenericContainer::stop); + } + + @Test + void readWriteFileTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + } + + @Test + void readWriteRewriteFileTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + } + + @Test + void createDelayedTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + + var client = DockerClientFactory.instance().client(); + client.pauseContainerCmd(container2.getContainerId()).exec(); + + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS); + + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo newfile > /root/dhfs_default/fuse/testf2").getExitCode()); + + client.unpauseContainerCmd(container2.getContainerId()).exec(); + + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "newfile\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf2").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "newfile\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf2").getStdout())); + } + + @Test + void writeRewriteDelayedTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + + var client = DockerClientFactory.instance().client(); + client.pauseContainerCmd(container2.getContainerId()).exec(); + + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS); + + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode()); + + client.unpauseContainerCmd(container2.getContainerId()).exec(); + + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + + await().atMost(45, TimeUnit.SECONDS).until(() -> + "rewritten\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + } + + // TODO: How this fits with the tree? + @Test + @Disabled + void deleteDelayedTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + + var client = DockerClientFactory.instance().client(); + client.pauseContainerCmd(container2.getContainerId()).exec(); + + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS); + + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/testf1").getExitCode()); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Delaying deletion check"), 60, TimeUnit.SECONDS, 1); + + client.unpauseContainerCmd(container2.getContainerId()).exec(); + + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode()); + + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 1); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3); + + await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container2.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container1.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode()); + } + + @Test + void deleteTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> + "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + + Log.info("Deleting"); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> + 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode()); + Log.info("Deleted"); + + // FIXME? + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3); + + await().atMost(45, TimeUnit.SECONDS).until(() -> + 1 == container2.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> + 1 == container1.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode()); + } + + @Test + void moveFileTest() throws IOException, InterruptedException, TimeoutException { + Log.info("Creating"); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + Log.info("Listing"); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode()); + Log.info("Moving"); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/testf1 /root/dhfs_default/fuse/testf2").getExitCode()); + Log.info("Listing"); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode()); + Log.info("Reading"); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf2").getStdout())); + } + + @Test + void moveDirTest() throws IOException, InterruptedException, TimeoutException { + Log.info("Creating"); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/testdir").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testdir/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testdir/testf1").getStdout())); + Log.info("Listing"); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode()); + Log.info("Moving"); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/testdir2").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/testdir /root/dhfs_default/fuse/testdir2/testdirm").getExitCode()); + Log.info("Listing"); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode()); + Log.info("Reading"); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testdir2/testdirm/testf1").getStdout())); + } + + + // TODO: This probably shouldn't be working right now + @Test + void removeAddHostTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + + var c2curl = container2.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request DELETE " + + " --data '{\"uuid\":\"" + c1uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo jioadsd > /root/dhfs_default/fuse/newfile1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo asvdkljm > /root/dhfs_default/fuse/newfile1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo dfgvh > /root/dhfs_default/fuse/newfile2").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo dscfg > /root/dhfs_default/fuse/newfile2").getExitCode()); + + Log.info("Re-adding"); + container2.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c1uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + + await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> { + Log.info("Listing removeAddHostTest"); + var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*"); + var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*"); + var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/"); + var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/"); + Log.info(cat1); + Log.info(cat2); + Log.info(ls1); + Log.info(ls2); + + return cat1.getStdout().contains("jioadsd") && cat1.getStdout().contains("asvdkljm") && cat1.getStdout().contains("dfgvh") && cat1.getStdout().contains("dscfg") + && cat2.getStdout().contains("jioadsd") && cat2.getStdout().contains("asvdkljm") && cat2.getStdout().contains("dfgvh") && cat2.getStdout().contains("dscfg"); + }); + } + + @Test + void dirConflictTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode()); + boolean createFail = Stream.of(Pair.of(container1, "echo test1 >> /root/dhfs_default/fuse/testf"), + Pair.of(container2, "echo test2 >> /root/dhfs_default/fuse/testf")).parallel().map(p -> { + try { + return p.getLeft().execInContainer("/bin/sh", "-c", p.getRight()).getExitCode(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }).anyMatch(r -> r != 0); + Assumptions.assumeTrue(!createFail, "Failed creating one or more files"); + await().atMost(45, TimeUnit.SECONDS).until(() -> { + var ls = container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse"); + var cat = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*"); + Log.info(ls); + Log.info(cat); + return cat.getStdout().contains("test1") && cat.getStdout().contains("test2"); + }); + } + + @Test + void dirCycleTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/a").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/b").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo xqr489 >> /root/dhfs_default/fuse/a/testfa").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo ahinou >> /root/dhfs_default/fuse/b/testfb").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls -lavh /root/dhfs_default/fuse").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> { + var c2ls = container2.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f -exec cat {} \\;"); + return c2ls.getExitCode() == 0 && c2ls.getStdout().contains("xqr489") && c2ls.getStdout().contains("ahinou"); + }); + + var client = DockerClientFactory.instance().client(); + client.pauseContainerCmd(container1.getContainerId()).exec(); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/a /root/dhfs_default/fuse/b").getExitCode()); + client.pauseContainerCmd(container2.getContainerId()).exec(); + client.unpauseContainerCmd(container1.getContainerId()).exec(); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/b /root/dhfs_default/fuse/a").getExitCode()); + client.unpauseContainerCmd(container2.getContainerId()).exec(); + + + await().atMost(45, TimeUnit.SECONDS).until(() -> { + Log.info("Listing dirCycleTest"); + Log.info(container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse")); + Log.info(container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/a")); + Log.info(container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/b")); + Log.info(container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse")); + Log.info(container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/a")); + Log.info(container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/b")); + + var c1ls2 = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -maxdepth 3 -type f -exec cat {} \\;"); + Log.info(c1ls2); + var c2ls2 = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -maxdepth 3 -type f -exec cat {} \\;"); + Log.info(c2ls2); + + return c1ls2.getStdout().contains("xqr489") && c1ls2.getStdout().contains("ahinou") + && c2ls2.getStdout().contains("xqr489") && c2ls2.getStdout().contains("ahinou") + && c1ls2.getExitCode() == 0 && c2ls2.getExitCode() == 0; + }); + + } + +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsFusex3IT.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsFusex3IT.java new file mode 100644 index 00000000..b401b053 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsFusex3IT.java @@ -0,0 +1,293 @@ +package com.usatiuk.dhfs.integration; + +import com.github.dockerjava.api.model.Device; +import io.quarkus.logging.Log; +import org.junit.jupiter.api.*; +import org.slf4j.LoggerFactory; +import org.testcontainers.DockerClientFactory; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.Network; +import org.testcontainers.containers.output.Slf4jLogConsumer; +import org.testcontainers.containers.output.WaitingConsumer; +import org.testcontainers.containers.wait.strategy.Wait; + +import java.io.IOException; +import java.time.Duration; +import java.util.List; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Stream; + +import static org.awaitility.Awaitility.await; + +public class DhfsFusex3IT { + GenericContainer container1; + GenericContainer container2; + GenericContainer container3; + + WaitingConsumer waitingConsumer1; + WaitingConsumer waitingConsumer2; + WaitingConsumer waitingConsumer3; + + String c1uuid; + String c2uuid; + String c3uuid; + + // This calculation is somewhat racy, so keep it hardcoded for now + long emptyFileCount = 9; + + @BeforeEach + void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException { + // TODO: Dedup + Network network = Network.newNetwork(); + + container1 = new GenericContainer<>(DhfsImage.getInstance()) + .withPrivilegedMode(true) + .withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse"))) + .waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network); + container2 = new GenericContainer<>(DhfsImage.getInstance()) + .withPrivilegedMode(true) + .withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse"))) + .waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network); + container3 = new GenericContainer<>(DhfsImage.getInstance()) + .withPrivilegedMode(true) + .withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse"))) + .waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network); + + + Stream.of(container1, container2, container3).parallel().forEach(GenericContainer::start); + + c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); + c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); + c3uuid = container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); + + Log.info(container1.getContainerId() + "=" + c1uuid); + Log.info(container2.getContainerId() + "=" + c2uuid); + Log.info(container3.getContainerId() + "=" + c3uuid); + + waitingConsumer1 = new WaitingConsumer(); + var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFusex3IT.class)) + .withPrefix(c1uuid.substring(0, 4) + "-" + testInfo.getDisplayName()); + container1.followOutput(loggingConsumer1.andThen(waitingConsumer1)); + waitingConsumer2 = new WaitingConsumer(); + var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFusex3IT.class)) + .withPrefix(c2uuid.substring(0, 4) + "-" + testInfo.getDisplayName()); + container2.followOutput(loggingConsumer2.andThen(waitingConsumer2)); + waitingConsumer3 = new WaitingConsumer(); + var loggingConsumer3 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFusex3IT.class)) + .withPrefix(c3uuid.substring(0, 4) + "-" + testInfo.getDisplayName()); + container3.followOutput(loggingConsumer3.andThen(waitingConsumer3)); + + Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid)); + Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid)); + Assertions.assertDoesNotThrow(() -> UUID.fromString(c3uuid)); + + waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS, 2); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS, 2); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS, 2); + + var c1curl = container1.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c2uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + var c2curl1 = container2.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c1uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + var c2curl3 = container2.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c3uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + var c3curl = container3.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c2uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); + } + + private boolean checkEmpty() throws IOException, InterruptedException { + for (var container : List.of(container1, container2, container3)) { + var found = container.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/data/objs -type f"); + var foundWc = container.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/data/objs -type f | wc -l"); + Log.info("Remaining objects in " + container.getContainerId() + ": " + found.toString() + " " + foundWc.toString()); + if (!(found.getExitCode() == 0 && foundWc.getExitCode() == 0 && Integer.parseInt(foundWc.getStdout().strip()) == emptyFileCount)) + return false; + } + return true; + } + + @AfterEach + void stop() { + Stream.of(container1, container2, container3).parallel().forEach(GenericContainer::stop); + } + + @Test + void readWriteFileTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + } + + @Test + void largerFileDeleteTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /root/dhfs_default/fuse && curl -O https://ash-speed.hetzner.com/100MB.bin").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "head -c 10 /root/dhfs_default/fuse/100MB.bin").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/100MB.bin").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> checkEmpty()); + } + + @Test + void largerFileDeleteTestNoDelays() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /root/dhfs_default/fuse && curl -O https://ash-speed.hetzner.com/100MB.bin").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "head -c 10 /root/dhfs_default/fuse/100MB.bin").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/100MB.bin").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> checkEmpty()); + } + + @Test + void gccHelloWorldTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo '#include\nint main(){printf(\"hello world\"); return 0;}' > /root/dhfs_default/fuse/hello.c").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /root/dhfs_default/fuse && gcc hello.c").getExitCode()); + + await().atMost(45, TimeUnit.SECONDS).until(() -> { + var helloOut = container1.execInContainer("/bin/sh", "-c", "/root/dhfs_default/fuse/a.out"); + Log.info(helloOut); + return helloOut.getExitCode() == 0 && helloOut.getStdout().equals("hello world"); + }); + await().atMost(45, TimeUnit.SECONDS).until(() -> { + var helloOut = container2.execInContainer("/bin/sh", "-c", "/root/dhfs_default/fuse/a.out"); + Log.info(helloOut); + return helloOut.getExitCode() == 0 && helloOut.getStdout().equals("hello world"); + }); + await().atMost(45, TimeUnit.SECONDS).until(() -> { + var helloOut = container3.execInContainer("/bin/sh", "-c", "/root/dhfs_default/fuse/a.out"); + Log.info(helloOut); + return helloOut.getExitCode() == 0 && helloOut.getStdout().equals("hello world"); + }); + } + + @Test + void removeHostTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + + var c3curl = container3.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request DELETE " + + " --data '{\"uuid\":\"" + c2uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + } + + @Test + void dirConflictTest() throws IOException, InterruptedException, TimeoutException { + var client = DockerClientFactory.instance().client(); + client.pauseContainerCmd(container1.getContainerId()).exec(); + client.pauseContainerCmd(container2.getContainerId()).exec(); + // Pauses needed as otherwise docker buffers some incoming packets + waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /root/dhfs_default/fuse/testf").getExitCode()); + client.pauseContainerCmd(container3.getContainerId()).exec(); + client.unpauseContainerCmd(container2.getContainerId()).exec(); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /root/dhfs_default/fuse/testf").getExitCode()); + client.pauseContainerCmd(container2.getContainerId()).exec(); + client.unpauseContainerCmd(container1.getContainerId()).exec(); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /root/dhfs_default/fuse/testf").getExitCode()); + client.unpauseContainerCmd(container2.getContainerId()).exec(); + client.unpauseContainerCmd(container3.getContainerId()).exec(); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); + waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); + + await().atMost(45, TimeUnit.SECONDS).until(() -> { + for (var c : List.of(container1, container2, container3)) { + var ls = c.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse"); + var cat = c.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*"); + Log.info(ls); + Log.info(cat); + if (!(cat.getStdout().contains("test1") && cat.getStdout().contains("test2") && cat.getStdout().contains("test3"))) + return false; + } + return true; + }); + + await().atMost(45, TimeUnit.SECONDS).until(() -> { + return container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals( + container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) && + container3.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals( + container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) && + container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout().equals( + container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout()); + }); + } + + @Test + void fileConflictTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf").getExitCode()); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf").getStdout())); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf").getStdout())); + + var client = DockerClientFactory.instance().client(); + client.pauseContainerCmd(container1.getContainerId()).exec(); + client.pauseContainerCmd(container2.getContainerId()).exec(); + // Pauses needed as otherwise docker buffers some incoming packets + waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /root/dhfs_default/fuse/testf").getExitCode()); + client.pauseContainerCmd(container3.getContainerId()).exec(); + client.unpauseContainerCmd(container2.getContainerId()).exec(); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /root/dhfs_default/fuse/testf").getExitCode()); + client.pauseContainerCmd(container2.getContainerId()).exec(); + client.unpauseContainerCmd(container1.getContainerId()).exec(); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2); + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /root/dhfs_default/fuse/testf").getExitCode()); + client.unpauseContainerCmd(container2.getContainerId()).exec(); + client.unpauseContainerCmd(container3.getContainerId()).exec(); + Log.warn("Waiting for connections"); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); + waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); + Log.warn("Connected"); + + await().atMost(45, TimeUnit.SECONDS).until(() -> { + for (var c : List.of(container1, container2, container3)) { + var ls = c.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse"); + var cat = c.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*"); + Log.info(ls); + Log.info(cat); + if (!(cat.getStdout().contains("test1") && cat.getStdout().contains("test2") && cat.getStdout().contains("test3"))) + return false; + } + return true; + }); + + await().atMost(45, TimeUnit.SECONDS).until(() -> { + return container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals( + container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) && + container3.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals( + container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) && + container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout().equals( + container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout()); + }); + } + +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java new file mode 100644 index 00000000..5bec10e9 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java @@ -0,0 +1,93 @@ +package com.usatiuk.dhfs.integration; + +import io.quarkus.logging.Log; +import org.jetbrains.annotations.NotNull; +import org.testcontainers.images.builder.ImageFromDockerfile; + +import java.nio.file.Paths; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class DhfsImage implements Future { + + private static String _builtImage = null; + private static DhfsImage INSTANCE = new DhfsImage(); + + private DhfsImage() {} + + public static DhfsImage getInstance() { + return INSTANCE; + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return false; + } + + @Override + public boolean isCancelled() { + return false; + } + + @Override + public boolean isDone() { + return true; + } + + @Override + public String get() throws InterruptedException, ExecutionException { + return buildImpl(); + } + + @Override + public String get(long timeout, @NotNull TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { + return buildImpl(); + } + + private synchronized String buildImpl() { + if (_builtImage != null) { + return _builtImage; + } + + Log.info("Building image"); + + String buildPath = System.getProperty("buildDirectory"); + String nativeLibsDirectory = System.getProperty("nativeLibsDirectory"); + Log.info("Build path: " + buildPath); + Log.info("Native libs path: " + nativeLibsDirectory); + + var image = new ImageFromDockerfile() + .withDockerfileFromBuilder(builder -> + builder + .from("azul/zulu-openjdk-debian:21-jre-headless-latest") + .run("apt update && apt install -y libfuse2 curl gcc") + .copy("/app", "/app") + .copy("/libs", "/libs") + .cmd("java", "-ea", "-Xmx128M", + "--add-exports", "java.base/sun.nio.ch=ALL-UNNAMED", + "--add-exports", "java.base/jdk.internal.access=ALL-UNNAMED", + "-Ddhfs.objects.peerdiscovery.interval=100", + "-Ddhfs.objects.invalidation.delay=100", + "-Ddhfs.objects.deletion.delay=0", + "-Ddhfs.objects.deletion.can-delete-retry-delay=1000", + "-Ddhfs.objects.ref_verification=true", + "-Ddhfs.objects.write_log=true", + "-Ddhfs.objects.sync.timeout=10", + "-Ddhfs.objects.sync.ping.timeout=5", + "-Ddhfs.objects.reconnect_interval=1s", + "-Dcom.usatiuk.dhfs.supportlib.native-path=/libs", + "-Dquarkus.log.category.\"com.usatiuk\".level=TRACE", + "-Dquarkus.log.category.\"com.usatiuk.dhfs\".level=TRACE", + "-Ddhfs.objects.periodic-push-op-interval=5s", + "-jar", "/app/quarkus-run.jar") + .build()) + .withFileFromPath("/app", Paths.get(buildPath, "quarkus-app")) + .withFileFromPath("/libs", Paths.get(nativeLibsDirectory)); + + _builtImage = image.get(); + Log.info("Image built: " + _builtImage); + return _builtImage; + } +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/ResyncIT.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/ResyncIT.java new file mode 100644 index 00000000..07a929e4 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/ResyncIT.java @@ -0,0 +1,135 @@ +package com.usatiuk.dhfs.integration; + +import com.github.dockerjava.api.model.Device; +import org.junit.jupiter.api.*; +import org.slf4j.LoggerFactory; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.Network; +import org.testcontainers.containers.output.Slf4jLogConsumer; +import org.testcontainers.containers.output.WaitingConsumer; +import org.testcontainers.containers.wait.strategy.Wait; + +import java.io.IOException; +import java.time.Duration; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Stream; + +import static org.awaitility.Awaitility.await; + +public class ResyncIT { + GenericContainer container1; + GenericContainer container2; + + WaitingConsumer waitingConsumer1; + WaitingConsumer waitingConsumer2; + + String c1uuid; + String c2uuid; + + @BeforeEach + void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException { + Network network = Network.newNetwork(); + + container1 = new GenericContainer<>(DhfsImage.getInstance()) + .withPrivilegedMode(true) + .withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse"))) + .waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network); + container2 = new GenericContainer<>(DhfsImage.getInstance()) + .withPrivilegedMode(true) + .withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse"))) + .waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network); + + Stream.of(container1, container2).parallel().forEach(GenericContainer::start); + + waitingConsumer1 = new WaitingConsumer(); + var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFuseIT.class)).withPrefix("1-" + testInfo.getDisplayName()); + container1.followOutput(loggingConsumer1.andThen(waitingConsumer1)); + waitingConsumer2 = new WaitingConsumer(); + var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFuseIT.class)).withPrefix("2-" + testInfo.getDisplayName()); + container2.followOutput(loggingConsumer2.andThen(waitingConsumer2)); + } + + @AfterEach + void stop() { + Stream.of(container1, container2).parallel().forEach(GenericContainer::stop); + } + + @Test + void readWriteFileTest() throws IOException, InterruptedException, TimeoutException { + await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); + c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); + c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); + + Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid)); + Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid)); + + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); + + var c1curl = container1.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c2uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + var c2curl = container2.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c1uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); + } + + + @Test + void manyFiles() throws IOException, InterruptedException, TimeoutException { + var ret = container1.execInContainer("/bin/sh", "-c", "for i in $(seq 1 200); do echo $i > /root/dhfs_default/fuse/test$i; done"); + Assertions.assertEquals(0, ret.getExitCode()); + var foundWc = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l"); + Assertions.assertEquals(200, Integer.valueOf(foundWc.getStdout().strip())); + + ret = container2.execInContainer("/bin/sh", "-c", "for i in $(seq 1 200); do echo $i > /root/dhfs_default/fuse/test-2-$i; done"); + Assertions.assertEquals(0, ret.getExitCode()); + foundWc = container2.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l"); + Assertions.assertEquals(200, Integer.valueOf(foundWc.getStdout().strip())); + + c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); + c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); + + Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid)); + Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid)); + + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); + + var c1curl = container1.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c2uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + var c2curl = container2.execInContainer("/bin/sh", "-c", + "curl --header \"Content-Type: application/json\" " + + " --request PUT " + + " --data '{\"uuid\":\"" + c1uuid + "\"}' " + + " http://localhost:8080/objects-manage/known-peers"); + + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); + await().atMost(45, TimeUnit.SECONDS).until(() -> { + var foundWc2 = container2.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l"); + return 400 == Integer.valueOf(foundWc2.getStdout().strip()); + }); + await().atMost(45, TimeUnit.SECONDS).until(() -> { + var foundWc2 = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l"); + return 400 == Integer.valueOf(foundWc2.getStdout().strip()); + }); + } + +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/persistence/FileObjectPersistentStoreTest.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/persistence/FileObjectPersistentStoreTest.java new file mode 100644 index 00000000..16e78c86 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/persistence/FileObjectPersistentStoreTest.java @@ -0,0 +1,95 @@ +package com.usatiuk.dhfs.persistence; + +import com.google.protobuf.ByteString; +import com.usatiuk.dhfs.TempDataProfile; +import com.usatiuk.dhfs.objects.persistence.ChunkDataP; +import com.usatiuk.dhfs.objects.persistence.JObjectDataP; +import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP; +import com.usatiuk.dhfs.objects.repository.persistence.FileObjectPersistentStore; +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; +import jakarta.inject.Inject; +import org.apache.commons.lang3.RandomStringUtils; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.util.Map; +import java.util.concurrent.ThreadLocalRandom; + + +class Profiles { + public static class FileObjectPersistentStoreTestProfile extends TempDataProfile { + @Override + protected void getConfigOverrides(Map ret) { + ret.put("quarkus.log.category.\"com.usatiuk.dhfs\".level", "TRACE"); + ret.put("dhfs.fuse.enabled", "false"); + ret.put("dhfs.objects.ref_verification", "true"); + } + } +} + +@QuarkusTest +@TestProfile(Profiles.FileObjectPersistentStoreTestProfile.class) +public class FileObjectPersistentStoreTest { + @Inject + FileObjectPersistentStore fileObjectPersistentStore; + + @Test + public void writeReadFullObject() { + String name = "writeReadFullObjectSmallMeta"; + + var bytes = new byte[100000]; + ThreadLocalRandom.current().nextBytes(bytes); + + ObjectMetadataP meta = ObjectMetadataP.newBuilder().setName("verycoolname123456789").build(); + JObjectDataP data = JObjectDataP.newBuilder().setChunkData(ChunkDataP.newBuilder().setData(ByteString.copyFrom(bytes)).build()).build(); + + fileObjectPersistentStore.writeObjectDirect(name, meta, data); + var readMeta = fileObjectPersistentStore.readObjectMeta(name); + var readData = fileObjectPersistentStore.readObject(name); + Assertions.assertEquals(meta, readMeta); + Assertions.assertEquals(data, readData); + + var bigString = RandomStringUtils.random(100000); + + var newMeta = ObjectMetadataP.newBuilder().setName(String.valueOf(bigString)).build(); + fileObjectPersistentStore.writeObjectMetaDirect(name, newMeta); + readMeta = fileObjectPersistentStore.readObjectMeta(name); + readData = fileObjectPersistentStore.readObject(name); + Assertions.assertEquals(newMeta, readMeta); + Assertions.assertEquals(data, readData); + + fileObjectPersistentStore.writeObjectDirect(name, newMeta, null); + readMeta = fileObjectPersistentStore.readObjectMeta(name); + Assertions.assertEquals(newMeta, readMeta); + Assertions.assertThrows(Throwable.class, () -> fileObjectPersistentStore.readObject(name)); + + fileObjectPersistentStore.writeObjectMetaDirect(name, meta); + readMeta = fileObjectPersistentStore.readObjectMeta(name); + Assertions.assertEquals(meta, readMeta); + Assertions.assertThrows(Throwable.class, () -> fileObjectPersistentStore.readObject(name)); + + fileObjectPersistentStore.writeObjectDirect(name, newMeta, null); + readMeta = fileObjectPersistentStore.readObjectMeta(name); + Assertions.assertEquals(newMeta, readMeta); + Assertions.assertThrows(Throwable.class, () -> fileObjectPersistentStore.readObject(name)); + + fileObjectPersistentStore.writeObjectDirect(name, newMeta, data); + readMeta = fileObjectPersistentStore.readObjectMeta(name); + readData = fileObjectPersistentStore.readObject(name); + Assertions.assertEquals(newMeta, readMeta); + Assertions.assertEquals(data, readData); + + fileObjectPersistentStore.writeObjectMetaDirect(name, meta); + readMeta = fileObjectPersistentStore.readObjectMeta(name); + readData = fileObjectPersistentStore.readObject(name); + Assertions.assertEquals(meta, readMeta); + Assertions.assertEquals(data, readData); + + fileObjectPersistentStore.writeObjectMetaDirect(name, newMeta); + readMeta = fileObjectPersistentStore.readObjectMeta(name); + readData = fileObjectPersistentStore.readObject(name); + Assertions.assertEquals(newMeta, readMeta); + Assertions.assertEquals(data, readData); + } +} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/persistence/ProtoSerializationTest.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/persistence/ProtoSerializationTest.java new file mode 100644 index 00000000..fd6f10e7 --- /dev/null +++ b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/persistence/ProtoSerializationTest.java @@ -0,0 +1,24 @@ +package com.usatiuk.dhfs.persistence; + +import io.quarkus.test.junit.QuarkusTest; + +@QuarkusTest +public class ProtoSerializationTest { + +// @Inject +// ProtoSerializerService protoSerializerService; +// +// @Test +// void SerializeDeserializePeerDirectory() { +// var pd = new PeerDirectory(); +// pd.getPeers().add(UUID.randomUUID()); +// var ser = JObjectDataP.newBuilder().setPeerDirectory((PeerDirectoryP) protoSerializerService.serialize(pd)).build(); +// var deser = (PeerDirectory) protoSerializerService.deserialize(ser); +// Assertions.assertIterableEquals(pd.getPeers(), deser.getPeers()); +// +// var ser2 = protoSerializerService.serializeToJObjectDataP(pd); +// var deser2 = (PeerDirectory) protoSerializerService.deserialize(ser2); +// Assertions.assertIterableEquals(pd.getPeers(), deser2.getPeers()); +// } +// +} diff --git a/dhfs-parent/server-old/src/test/resources/application.properties b/dhfs-parent/server-old/src/test/resources/application.properties new file mode 100644 index 00000000..64f51835 --- /dev/null +++ b/dhfs-parent/server-old/src/test/resources/application.properties @@ -0,0 +1,11 @@ +dhfs.objects.persistence.files.root=${HOME}/dhfs_data/dhfs_root_test +dhfs.objects.root=${HOME}/dhfs_data/dhfs_root_d_test +dhfs.fuse.root=${HOME}/dhfs_data/dhfs_fuse_root_test +dhfs.objects.ref_verification=true +dhfs.objects.deletion.delay=0 +quarkus.log.category."com.usatiuk.dhfs".level=TRACE +quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE +quarkus.class-loading.parent-first-artifacts=com.usatiuk.dhfs:supportlib +quarkus.http.test-port=0 +quarkus.http.test-ssl-port=0 +dhfs.local-discovery=false \ No newline at end of file diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java index 46f8e283..cd0b73b2 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java @@ -1,90 +1,12 @@ package com.usatiuk.dhfs.files.objects; import com.google.protobuf.ByteString; -import com.usatiuk.dhfs.files.conflicts.NoOpConflictResolver; -import com.usatiuk.dhfs.objects.jrepository.AssumedUnique; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.jrepository.Leaf; -import com.usatiuk.dhfs.objects.persistence.ChunkDataP; -import com.usatiuk.dhfs.objects.repository.ConflictResolver; -import net.openhft.hashing.LongTupleHashFunction; +import com.usatiuk.objects.common.runtime.JData; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Objects; -import java.util.stream.Collectors; +import java.io.Serializable; -@AssumedUnique -@Leaf -public class ChunkData extends JObjectData { - final ChunkDataP _data; +public interface ChunkData extends JData, Serializable { + ByteString getData(); - public ChunkData(ByteString bytes) { - super(); - _data = ChunkDataP.newBuilder() - .setData(bytes) - // TODO: There might be (most definitely) a copy there - .setName(Arrays.stream(LongTupleHashFunction.xx128().hashBytes(bytes.asReadOnlyByteBuffer())) - .mapToObj(Long::toHexString).collect(Collectors.joining())) - .build(); - } - - public ChunkData(ByteString bytes, String name) { - super(); - _data = ChunkDataP.newBuilder() - .setData(bytes) - .setName(name) - .build(); - } - - public ChunkData(ChunkDataP chunkDataP) { - super(); - _data = chunkDataP; - } - - ChunkDataP getData() { - return _data; - } - - public ByteString getBytes() { - return _data.getData(); - } - - public int getSize() { - return _data.getData().size(); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ChunkData chunkData = (ChunkData) o; - return Objects.equals(getName(), chunkData.getName()); - } - - @Override - public int hashCode() { - return Objects.hashCode(getName()); - } - - @Override - public String getName() { - return _data.getName(); - } - - @Override - public Class getConflictResolver() { - return NoOpConflictResolver.class; - } - - @Override - public Collection extractRefs() { - return List.of(); - } - - @Override - public int estimateSize() { - return _data.getData().size(); - } + void setData(ByteString data); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java index 0c6fa4e8..ed5cd96c 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java @@ -1,51 +1,19 @@ package com.usatiuk.dhfs.files.objects; -import com.usatiuk.dhfs.files.conflicts.FileConflictResolver; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.repository.ConflictResolver; -import lombok.Getter; -import lombok.Setter; +import com.usatiuk.objects.common.runtime.JObjectKey; -import java.util.*; +import java.util.NavigableMap; -public class File extends FsNode { - @Getter - private final NavigableMap _chunks; - @Getter - private final boolean _symlink; - @Getter - @Setter - private long _size = 0; +public interface File extends FsNode { + NavigableMap getChunks(); - public File(UUID uuid, long mode, boolean symlink) { - super(uuid, mode); - _symlink = symlink; - _chunks = new TreeMap<>(); - } + void setChunks(NavigableMap chunks); - public File(UUID uuid, long mode, boolean symlink, NavigableMap chunks) { - super(uuid, mode); - _symlink = symlink; - _chunks = chunks; - } + boolean getSymlink(); - @Override - public Class getConflictResolver() { - return FileConflictResolver.class; - } + void setSymlink(boolean symlink); - @Override - public Class getRefType() { - return ChunkData.class; - } + long getSize(); - @Override - public Collection extractRefs() { - return Collections.unmodifiableCollection(_chunks.values()); - } - - @Override - public int estimateSize() { - return _chunks.size() * 192; - } + void setSize(long size); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java index a6e6ac14..227c0775 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java @@ -1,43 +1,19 @@ package com.usatiuk.dhfs.files.objects; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import lombok.Getter; -import lombok.Setter; +import com.usatiuk.objects.common.runtime.JData; -import java.io.Serial; -import java.util.UUID; +import java.io.Serializable; -public abstract class FsNode extends JObjectData { - @Serial - private static final long serialVersionUID = 1; +public interface FsNode extends JData, Serializable { + long getMode(); - @Getter - final UUID _uuid; - @Getter - @Setter - private long _mode; - @Getter - @Setter - private long _ctime; - @Getter - @Setter - private long _mtime; + void setMode(long mode); - protected FsNode(UUID uuid) { - this._uuid = uuid; - this._ctime = System.currentTimeMillis(); - this._mtime = this._ctime; - } + long getCtime(); - protected FsNode(UUID uuid, long mode) { - this._uuid = uuid; - this._mode = mode; - this._ctime = System.currentTimeMillis(); - this._mtime = this._ctime; - } + void setCtime(long ctime); - @Override - public String getName() { - return _uuid.toString(); - } + long getMtime(); + + void setMtime(long mtime); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java index 58678dd2..04797d08 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java @@ -3,49 +3,49 @@ package com.usatiuk.dhfs.files.service; import com.google.protobuf.ByteString; import com.google.protobuf.UnsafeByteOperations; import com.usatiuk.dhfs.files.objects.File; -import com.usatiuk.dhfs.objects.jrepository.JObject; +import com.usatiuk.objects.common.runtime.JObjectKey; import org.apache.commons.lang3.tuple.Pair; import java.util.Optional; public interface DhfsFileService { - Optional open(String name); + Optional open(String name); - Optional create(String name, long mode); + Optional create(String name, long mode); - Pair inoToParent(String ino); + Pair inoToParent(JObjectKey ino); void mkdir(String name, long mode); - Optional getattr(String name); + Optional getattr(JObjectKey name); - Boolean chmod(String name, long mode); + Boolean chmod(JObjectKey name, long mode); void unlink(String name); Boolean rename(String from, String to); - Boolean setTimes(String fileUuid, long atimeMs, long mtimeMs); + Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs); Iterable readDir(String name); - void updateFileSize(JObject file); + void updateFileSize(File file); - Long size(String f); + Long size(JObjectKey f); - Optional read(String fileUuid, long offset, int length); + Optional read(JObjectKey fileUuid, long offset, int length); - Long write(String fileUuid, long offset, ByteString data); + Long write(JObjectKey fileUuid, long offset, ByteString data); - default Long write(String fileUuid, long offset, byte[] data) { + default Long write(JObjectKey fileUuid, long offset, byte[] data) { return write(fileUuid, offset, UnsafeByteOperations.unsafeWrap(data)); } - Boolean truncate(String fileUuid, long length); + Boolean truncate(JObjectKey fileUuid, long length); - String readlink(String uuid); + String readlink(JObjectKey uuid); - ByteString readlinkBS(String uuid); + ByteString readlinkBS(JObjectKey uuid); - String symlink(String oldpath, String newpath); + JObjectKey symlink(String oldpath, String newpath); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java index 33b30d85..f4c1155b 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java @@ -4,18 +4,17 @@ import com.google.protobuf.ByteString; import com.google.protobuf.UnsafeByteOperations; import com.usatiuk.dhfs.files.objects.ChunkData; import com.usatiuk.dhfs.files.objects.File; -import com.usatiuk.dhfs.files.objects.FsNode; +import com.usatiuk.dhfs.objects.TransactionManager; import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeManager; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaDirectory; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile; -import com.usatiuk.dhfs.objects.jrepository.JMutator; -import com.usatiuk.dhfs.objects.jrepository.JObject; -import com.usatiuk.dhfs.objects.jrepository.JObjectManager; -import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; +import com.usatiuk.objects.alloc.runtime.ObjectAllocator; +import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.quarkus.logging.Log; @@ -35,9 +34,11 @@ import java.util.stream.StreamSupport; @ApplicationScoped public class DhfsFileServiceImpl implements DhfsFileService { @Inject - JObjectManager jObjectManager; + Transaction curTx; @Inject - JObjectTxManager jObjectTxManager; + TransactionManager jObjectTxManager; + @Inject + ObjectAllocator objectAllocator; @ConfigProperty(name = "dhfs.files.target_chunk_size") int targetChunkSize; @@ -66,73 +67,66 @@ public class DhfsFileServiceImpl implements DhfsFileService { @ConfigProperty(name = "dhfs.objects.write_log") boolean writeLogging; - @Inject - PersistentPeerDataService persistentPeerDataService; @Inject JKleppmannTreeManager jKleppmannTreeManager; - private JKleppmannTreeManager.JKleppmannTree _tree; + private JKleppmannTreeManager.JKleppmannTree getTree() { + return jKleppmannTreeManager.getTree(new JObjectKey("fs")); + } private ChunkData createChunk(ByteString bytes) { - if (useHashForChunks) { - return new ChunkData(bytes); - } else { - return new ChunkData(bytes, persistentPeerDataService.getUniqueId()); - } + var newChunk = objectAllocator.create(ChunkData.class, new JObjectKey(UUID.randomUUID().toString())); + newChunk.setData(bytes); + curTx.putObject(newChunk); + return newChunk; } void init(@Observes @Priority(500) StartupEvent event) { Log.info("Initializing file service"); - _tree = jKleppmannTreeManager.getTree("fs"); + getTree(); } - private JObject getDirEntry(String name) { - var res = _tree.traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList()); + private JKleppmannTreeNode getDirEntry(String name) { + var res = getTree().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList()); if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND); - var ret = jObjectManager.get(res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name))); - if (!ret.getMeta().getKnownClass().equals(JKleppmannTreeNode.class)) - throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not jObject: " + name)); - return (JObject) ret; + var ret = curTx.getObject(JKleppmannTreeNode.class, res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name))); + return ret; } - private Optional> getDirEntryOpt(String name) { - var res = _tree.traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList()); + private Optional getDirEntryOpt(String name) { + var res = getTree().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList()); if (res == null) return Optional.empty(); - var ret = jObjectManager.get(res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name))); - if (!ret.getMeta().getKnownClass().equals(JKleppmannTreeNode.class)) - throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not jObject: " + name)); - return Optional.of((JObject) ret); + var ret = curTx.getObject(JKleppmannTreeNode.class, res); + return ret; } @Override - public Optional getattr(String uuid) { + public Optional getattr(JObjectKey uuid) { return jObjectTxManager.executeTx(() -> { - var ref = jObjectManager.get(uuid); - if (ref.isEmpty()) return Optional.empty(); - return ref.get().runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { - GetattrRes ret; - if (d instanceof File f) { - ret = new GetattrRes(f.getMtime(), f.getCtime(), f.getMode(), f.isSymlink() ? GetattrType.SYMLINK : GetattrType.FILE); - } else if (d instanceof JKleppmannTreeNode) { - ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY); - } else { - throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + m.getName())); - } - return Optional.of(ret); - }); + var ref = curTx.getObject(JData.class, uuid).orElse(null); + if (ref == null) return Optional.empty(); + GetattrRes ret; + if (ref instanceof File f) { + ret = new GetattrRes(f.getMtime(), f.getCtime(), f.getMode(), f.getSymlink() ? GetattrType.SYMLINK : GetattrType.FILE); + } else if (ref instanceof JKleppmannTreeNode) { + ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY); + } else { + throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.getKey())); + } + return Optional.of(ret); }); } @Override - public Optional open(String name) { + public Optional open(String name) { return jObjectTxManager.executeTx(() -> { try { var ret = getDirEntry(name); - return Optional.of(ret.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f) return f.getFileIno(); - else if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f) return m.getName(); - throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + m.getName())); - })); + return switch (ret.getNode().getMeta()) { + case JKleppmannTreeNodeMetaFile f -> Optional.of(f.getFileIno()); + case JKleppmannTreeNodeMetaDirectory f -> Optional.of(ret.getKey()); + default -> Optional.empty(); + }; } catch (StatusRuntimeException e) { if (e.getStatus().getCode() == Status.Code.NOT_FOUND) { return Optional.empty(); @@ -142,17 +136,13 @@ public class DhfsFileServiceImpl implements DhfsFileService { }); } - private void ensureDir(JObject entry) { - entry.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { - if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f) - throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription(m.getName() + " is a file, not directory")); - else if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f) return null; - throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + m.getName())); - }); + private void ensureDir(JKleppmannTreeNode entry) { + if (!(entry.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory)) + throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription("Not a directory: " + entry.getKey())); } @Override - public Optional create(String name, long mode) { + public Optional create(String name, long mode) { return jObjectTxManager.executeTx(() -> { Path path = Path.of(name); var parent = getDirEntry(path.getParent().toString()); @@ -163,27 +153,31 @@ public class DhfsFileServiceImpl implements DhfsFileService { var fuuid = UUID.randomUUID(); Log.debug("Creating file " + fuuid); - File f = new File(fuuid, mode, false); + File f = objectAllocator.create(File.class, new JObjectKey(fuuid.toString())); + f.setMode(mode); + f.setMtime(System.currentTimeMillis()); + f.setCtime(f.getMtime()); + f.setSymlink(false); + f.setChunks(new TreeMap<>()); + curTx.putObject(f); - var newNodeId = _tree.getNewNodeId(); - var fobj = jObjectManager.putLocked(f, Optional.of(newNodeId)); try { - _tree.move(parent.getMeta().getName(), new JKleppmannTreeNodeMetaFile(fname, f.getName()), newNodeId); + getTree().move(parent.getKey(), new JKleppmannTreeNodeMetaFile(fname, f.getKey()), getTree().getNewNodeId()); } catch (Exception e) { - fobj.getMeta().removeRef(newNodeId); +// fobj.getMeta().removeRef(newNodeId); throw e; } finally { - fobj.rwUnlock(); +// fobj.rwUnlock(); } - return Optional.of(f.getName()); + return Optional.of(f.getKey()); }); } //FIXME: Slow.. @Override - public Pair inoToParent(String ino) { + public Pair inoToParent(JObjectKey ino) { return jObjectTxManager.executeTx(() -> { - return _tree.findParent(w -> { + return getTree().findParent(w -> { if (w.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f) if (f.getFileIno().equals(ino)) return true; @@ -203,7 +197,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { Log.debug("Creating directory " + name); - _tree.move(parent.getMeta().getName(), new JKleppmannTreeNodeMetaDirectory(dname), _tree.getNewNodeId()); + getTree().move(parent.getKey(), new JKleppmannTreeNodeMetaDirectory(dname), getTree().getNewNodeId()); }); } @@ -211,13 +205,11 @@ public class DhfsFileServiceImpl implements DhfsFileService { public void unlink(String name) { jObjectTxManager.executeTx(() -> { var node = getDirEntryOpt(name).orElse(null); - JKleppmannTreeNodeMeta meta = node.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { - if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f) - if (!d.getNode().getChildren().isEmpty()) throw new DirectoryNotEmptyException(); - return d.getNode().getMeta(); - }); - - _tree.trash(meta, node.getMeta().getName()); + if (node.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f) { + if (!allowRecursiveDelete && !node.getNode().getChildren().isEmpty()) + throw new DirectoryNotEmptyException(); + } + getTree().trash(node.getNode().getMeta(), node.getKey()); }); } @@ -225,37 +217,31 @@ public class DhfsFileServiceImpl implements DhfsFileService { public Boolean rename(String from, String to) { return jObjectTxManager.executeTx(() -> { var node = getDirEntry(from); - JKleppmannTreeNodeMeta meta = node.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> d.getNode().getMeta()); + JKleppmannTreeNodeMeta meta = node.getNode().getMeta(); var toPath = Path.of(to); var toDentry = getDirEntry(toPath.getParent().toString()); ensureDir(toDentry); - _tree.move(toDentry.getMeta().getName(), meta.withName(toPath.getFileName().toString()), node.getMeta().getName()); - + getTree().move(toDentry.getKey(), meta.withName(toPath.getFileName().toString()), node.getKey()); return true; }); } @Override - public Boolean chmod(String uuid, long mode) { + public Boolean chmod(JObjectKey uuid, long mode) { return jObjectTxManager.executeTx(() -> { - var dent = jObjectManager.get(uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND)); + var dent = curTx.getObject(JData.class, uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND)); - dent.runWriteLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d, bump, i) -> { - if (d instanceof JKleppmannTreeNode) { - return null;//FIXME:? - } else if (d instanceof File f) { - bump.apply(); - f.setMtime(System.currentTimeMillis()); - f.setMode(mode); - } else { - throw new IllegalArgumentException(uuid + " is not a file"); - } - return null; - }); - - return true; + if (dent instanceof JKleppmannTreeNode) { + return true; + } else if (dent instanceof File f) { + f.setMode(mode); + f.setMtime(System.currentTimeMillis()); + return true; + } else { + throw new IllegalArgumentException(uuid + " is not a file"); + } }); } @@ -264,81 +250,73 @@ public class DhfsFileServiceImpl implements DhfsFileService { return jObjectTxManager.executeTx(() -> { var found = getDirEntry(name); - return found.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { - if (!(d instanceof JKleppmannTreeNode) || !(d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory)) { - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - } - return new ArrayList<>(d.getNode().getChildren().keySet()); - }); + if (!(found.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory md)) + throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + + return found.getNode().getChildren().keySet(); }); } @Override - public Optional read(String fileUuid, long offset, int length) { + public Optional read(JObjectKey fileUuid, long offset, int length) { return jObjectTxManager.executeTx(() -> { if (length < 0) throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length)); if (offset < 0) throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset)); - var fileOpt = jObjectManager.get(fileUuid); - if (fileOpt.isEmpty()) { + var file = curTx.getObject(File.class, fileUuid).orElse(null); + if (file == null) { Log.error("File not found when trying to read: " + fileUuid); return Optional.empty(); } - var file = fileOpt.get(); try { - return file.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (md, fileData) -> { - if (!(fileData instanceof File)) { - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - } - var chunksAll = ((File) fileData).getChunks(); - if (chunksAll.isEmpty()) { - return Optional.of(ByteString.empty()); - } - var chunksList = chunksAll.tailMap(chunksAll.floorKey(offset)).entrySet(); + var chunksAll = file.getChunks(); + if (chunksAll.isEmpty()) { + return Optional.of(ByteString.empty()); + } + var chunksList = chunksAll.tailMap(chunksAll.floorKey(offset)).entrySet(); - if (chunksList.isEmpty()) { - return Optional.of(ByteString.empty()); - } + if (chunksList.isEmpty()) { + return Optional.of(ByteString.empty()); + } - var chunks = chunksList.iterator(); - ByteString buf = ByteString.empty(); + var chunks = chunksList.iterator(); + ByteString buf = ByteString.empty(); - long curPos = offset; - var chunk = chunks.next(); + long curPos = offset; + var chunk = chunks.next(); - while (curPos < offset + length) { - var chunkPos = chunk.getKey(); + while (curPos < offset + length) { + var chunkPos = chunk.getKey(); - long offInChunk = curPos - chunkPos; + long offInChunk = curPos - chunkPos; - long toReadInChunk = (offset + length) - curPos; + long toReadInChunk = (offset + length) - curPos; - var chunkBytes = readChunk(chunk.getValue()); + var chunkBytes = readChunk(chunk.getValue()); - long readableLen = chunkBytes.size() - offInChunk; + long readableLen = chunkBytes.size() - offInChunk; - var toReadReally = Math.min(readableLen, toReadInChunk); + var toReadReally = Math.min(readableLen, toReadInChunk); - if (toReadReally < 0) break; + if (toReadReally < 0) break; - buf = buf.concat(chunkBytes.substring((int) offInChunk, (int) (offInChunk + toReadReally))); + buf = buf.concat(chunkBytes.substring((int) offInChunk, (int) (offInChunk + toReadReally))); - curPos += toReadReally; + curPos += toReadReally; - if (readableLen > toReadInChunk) - break; + if (readableLen > toReadInChunk) + break; - if (!chunks.hasNext()) break; + if (!chunks.hasNext()) break; - chunk = chunks.next(); - } + chunk = chunks.next(); + } - // FIXME: - return Optional.of(buf); - }); + // FIXME: + return Optional.of(buf); } catch (Exception e) { Log.error("Error reading file: " + fileUuid, e); return Optional.empty(); @@ -346,357 +324,291 @@ public class DhfsFileServiceImpl implements DhfsFileService { }); } - private ByteString readChunk(String uuid) { - var chunkRead = jObjectManager.get(uuid).orElse(null); + private ByteString readChunk(JObjectKey uuid) { + var chunkRead = curTx.getObject(ChunkData.class, uuid).orElse(null); if (chunkRead == null) { Log.error("Chunk requested not found: " + uuid); throw new StatusRuntimeException(Status.NOT_FOUND); } - return chunkRead.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { - if (!(d instanceof ChunkData cd)) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - return cd.getBytes(); - }); + return chunkRead.getData(); } - private int getChunkSize(String uuid) { + private int getChunkSize(JObjectKey uuid) { return readChunk(uuid).size(); } - private void cleanupChunks(File f, Collection uuids) { + private void cleanupChunks(File f, Collection uuids) { // FIXME: - var inFile = useHashForChunks ? new HashSet<>(f.getChunks().values()) : Collections.emptySet(); - for (var cuuid : uuids) { - try { - if (inFile.contains(cuuid)) continue; - jObjectManager.get(cuuid) - .ifPresent(jObject -> jObject.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, - (m, d, b, v) -> { - m.removeRef(f.getName()); - return null; - })); - } catch (Exception e) { - Log.error("Error when cleaning chunk " + cuuid, e); - } - } +// var inFile = useHashForChunks ? new HashSet<>(f.getChunks().values()) : Collections.emptySet(); +// for (var cuuid : uuids) { +// try { +// if (inFile.contains(cuuid)) continue; +// jObjectManager.get(cuuid) +// .ifPresent(jObject -> jObject.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, +// (m, d, b, v) -> { +// m.removeRef(f.getName()); +// return null; +// })); +// } catch (Exception e) { +// Log.error("Error when cleaning chunk " + cuuid, e); +// } +// } } @Override - public Long write(String fileUuid, long offset, ByteString data) { + public Long write(JObjectKey fileUuid, long offset, ByteString data) { return jObjectTxManager.executeTx(() -> { if (offset < 0) throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset)); // FIXME: - var file = (JObject) jObjectManager.get(fileUuid).orElse(null); + var file = curTx.getObject(File.class, fileUuid).orElse(null); if (file == null) { - Log.error("File not found when trying to read: " + fileUuid); + Log.error("File not found when trying to write: " + fileUuid); return -1L; } - file.rwLockNoCopy(); - try { - file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE); - // FIXME: - if (!(file.getData() instanceof File)) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - - if (writeLogging) { - Log.info("Writing to file: " + file.getMeta().getName() + " size=" + size(fileUuid) + " " - + offset + " " + data.size()); - } - - if (size(fileUuid) < offset) - truncate(fileUuid, offset); - - // FIXME: Some kind of immutable interface? - var chunksAll = Collections.unmodifiableNavigableMap(file.getData().getChunks()); - var first = chunksAll.floorEntry(offset); - var last = chunksAll.lowerEntry(offset + data.size()); - NavigableMap removedChunks = new TreeMap<>(); - - long start = 0; - - NavigableMap beforeFirst = first != null ? chunksAll.headMap(first.getKey(), false) : Collections.emptyNavigableMap(); - NavigableMap afterLast = last != null ? chunksAll.tailMap(last.getKey(), false) : Collections.emptyNavigableMap(); - - if (first != null && (getChunkSize(first.getValue()) + first.getKey() <= offset)) { - beforeFirst = chunksAll; - afterLast = Collections.emptyNavigableMap(); - first = null; - last = null; - start = offset; - } else if (!chunksAll.isEmpty()) { - var between = chunksAll.subMap(first.getKey(), true, last.getKey(), true); - removedChunks.putAll(between); - start = first.getKey(); - } - - ByteString pendingWrites = ByteString.empty(); - - if (first != null && first.getKey() < offset) { - var chunkBytes = readChunk(first.getValue()); - pendingWrites = pendingWrites.concat(chunkBytes.substring(0, (int) (offset - first.getKey()))); - } - pendingWrites = pendingWrites.concat(data); - - if (last != null) { - var lchunkBytes = readChunk(last.getValue()); - if (last.getKey() + lchunkBytes.size() > offset + data.size()) { - var startInFile = offset + data.size(); - var startInChunk = startInFile - last.getKey(); - pendingWrites = pendingWrites.concat(lchunkBytes.substring((int) startInChunk, lchunkBytes.size())); - } - } - - int combinedSize = pendingWrites.size(); - - if (targetChunkSize > 0) { - if (combinedSize < (targetChunkSize * writeMergeThreshold)) { - boolean leftDone = false; - boolean rightDone = false; - while (!leftDone && !rightDone) { - if (beforeFirst.isEmpty()) leftDone = true; - if (!beforeFirst.isEmpty() || !leftDone) { - var takeLeft = beforeFirst.lastEntry(); - - var cuuid = takeLeft.getValue(); - - if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) { - leftDone = true; - continue; - } - - if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) { - leftDone = true; - continue; - } - - // FIXME: (and test this) - beforeFirst = beforeFirst.headMap(takeLeft.getKey(), false); - start = takeLeft.getKey(); - pendingWrites = readChunk(cuuid).concat(pendingWrites); - combinedSize += getChunkSize(cuuid); - removedChunks.put(takeLeft.getKey(), takeLeft.getValue()); - } - if (afterLast.isEmpty()) rightDone = true; - if (!afterLast.isEmpty() && !rightDone) { - var takeRight = afterLast.firstEntry(); - - var cuuid = takeRight.getValue(); - - if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) { - rightDone = true; - continue; - } - - if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) { - rightDone = true; - continue; - } - - // FIXME: (and test this) - afterLast = afterLast.tailMap(takeRight.getKey(), false); - pendingWrites = pendingWrites.concat(readChunk(cuuid)); - combinedSize += getChunkSize(cuuid); - removedChunks.put(takeRight.getKey(), takeRight.getValue()); - } - } - } - } - - NavigableMap newChunks = new TreeMap<>(); - - { - int cur = 0; - while (cur < combinedSize) { - int end; - - if (targetChunkSize <= 0) - end = combinedSize; - else { - if ((combinedSize - cur) > (targetChunkSize * writeLastChunkLimit)) { - end = Math.min(cur + targetChunkSize, combinedSize); - } else { - end = combinedSize; - } - } - - var thisChunk = pendingWrites.substring(cur, end); - - ChunkData newChunkData = createChunk(thisChunk); - //FIXME: - jObjectManager.put(newChunkData, Optional.of(file.getMeta().getName())); - newChunks.put(start, newChunkData.getName()); - - start += thisChunk.size(); - cur = end; - } - } - - file.mutate(new FileChunkMutator(file.getData().getMtime(), System.currentTimeMillis(), removedChunks, newChunks)); - - cleanupChunks(file.getData(), removedChunks.values()); - updateFileSize((JObject) file); - } finally { - file.rwUnlock(); + if (writeLogging) { + Log.info("Writing to file: " + file.getKey() + " size=" + size(fileUuid) + " " + + offset + " " + data.size()); } + if (size(fileUuid) < offset) + truncate(fileUuid, offset); + + // FIXME: Some kind of immutable interface? + var chunksAll = Collections.unmodifiableNavigableMap(file.getChunks()); + var first = chunksAll.floorEntry(offset); + var last = chunksAll.lowerEntry(offset + data.size()); + NavigableMap removedChunks = new TreeMap<>(); + + long start = 0; + + NavigableMap beforeFirst = first != null ? chunksAll.headMap(first.getKey(), false) : Collections.emptyNavigableMap(); + NavigableMap afterLast = last != null ? chunksAll.tailMap(last.getKey(), false) : Collections.emptyNavigableMap(); + + if (first != null && (getChunkSize(first.getValue()) + first.getKey() <= offset)) { + beforeFirst = chunksAll; + afterLast = Collections.emptyNavigableMap(); + first = null; + last = null; + start = offset; + } else if (!chunksAll.isEmpty()) { + var between = chunksAll.subMap(first.getKey(), true, last.getKey(), true); + removedChunks.putAll(between); + start = first.getKey(); + } + + ByteString pendingWrites = ByteString.empty(); + + if (first != null && first.getKey() < offset) { + var chunkBytes = readChunk(first.getValue()); + pendingWrites = pendingWrites.concat(chunkBytes.substring(0, (int) (offset - first.getKey()))); + } + pendingWrites = pendingWrites.concat(data); + + if (last != null) { + var lchunkBytes = readChunk(last.getValue()); + if (last.getKey() + lchunkBytes.size() > offset + data.size()) { + var startInFile = offset + data.size(); + var startInChunk = startInFile - last.getKey(); + pendingWrites = pendingWrites.concat(lchunkBytes.substring((int) startInChunk, lchunkBytes.size())); + } + } + + int combinedSize = pendingWrites.size(); + + if (targetChunkSize > 0) { + if (combinedSize < (targetChunkSize * writeMergeThreshold)) { + boolean leftDone = false; + boolean rightDone = false; + while (!leftDone && !rightDone) { + if (beforeFirst.isEmpty()) leftDone = true; + if (!beforeFirst.isEmpty() || !leftDone) { + var takeLeft = beforeFirst.lastEntry(); + + var cuuid = takeLeft.getValue(); + + if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) { + leftDone = true; + continue; + } + + if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) { + leftDone = true; + continue; + } + + // FIXME: (and test this) + beforeFirst = beforeFirst.headMap(takeLeft.getKey(), false); + start = takeLeft.getKey(); + pendingWrites = readChunk(cuuid).concat(pendingWrites); + combinedSize += getChunkSize(cuuid); + removedChunks.put(takeLeft.getKey(), takeLeft.getValue()); + } + if (afterLast.isEmpty()) rightDone = true; + if (!afterLast.isEmpty() && !rightDone) { + var takeRight = afterLast.firstEntry(); + + var cuuid = takeRight.getValue(); + + if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) { + rightDone = true; + continue; + } + + if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) { + rightDone = true; + continue; + } + + // FIXME: (and test this) + afterLast = afterLast.tailMap(takeRight.getKey(), false); + pendingWrites = pendingWrites.concat(readChunk(cuuid)); + combinedSize += getChunkSize(cuuid); + removedChunks.put(takeRight.getKey(), takeRight.getValue()); + } + } + } + } + + NavigableMap newChunks = new TreeMap<>(); + + { + int cur = 0; + while (cur < combinedSize) { + int end; + + if (targetChunkSize <= 0) + end = combinedSize; + else { + if ((combinedSize - cur) > (targetChunkSize * writeLastChunkLimit)) { + end = Math.min(cur + targetChunkSize, combinedSize); + } else { + end = combinedSize; + } + } + + var thisChunk = pendingWrites.substring(cur, end); + + ChunkData newChunkData = createChunk(thisChunk); + newChunks.put(start, newChunkData.getKey()); + + start += thisChunk.size(); + cur = end; + } + } + + file.setChunks(newChunks); + cleanupChunks(file, removedChunks.values()); + updateFileSize(file); + return (long) data.size(); }); } @Override - public Boolean truncate(String fileUuid, long length) { + public Boolean truncate(JObjectKey fileUuid, long length) { return jObjectTxManager.executeTx(() -> { if (length < 0) throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length)); - var file = (JObject) jObjectManager.get(fileUuid).orElse(null); + var file = curTx.getObject(File.class, fileUuid).orElse(null); if (file == null) { - Log.error("File not found when trying to read: " + fileUuid); + Log.error("File not found when trying to write: " + fileUuid); return false; } if (length == 0) { - file.rwLockNoCopy(); - try { - file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE); + var oldChunks = Collections.unmodifiableNavigableMap(new TreeMap<>(file.getChunks())); - var oldChunks = Collections.unmodifiableNavigableMap(new TreeMap<>(file.getData().getChunks())); - - file.mutate(new JMutator<>() { - long oldMtime; - - @Override - public boolean mutate(File object) { - oldMtime = object.getMtime(); - object.getChunks().clear(); - return true; - } - - @Override - public void revert(File object) { - object.setMtime(oldMtime); - object.getChunks().putAll(oldChunks); - } - }); - cleanupChunks(file.getData(), oldChunks.values()); - updateFileSize((JObject) file); - } catch (Exception e) { - Log.error("Error writing file chunks: " + fileUuid, e); - return false; - } finally { - file.rwUnlock(); - } + file.setChunks(new TreeMap<>()); + file.setMtime(System.currentTimeMillis()); + cleanupChunks(file, oldChunks.values()); + updateFileSize(file); return true; } - file.rwLockNoCopy(); - try { - file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE); + var curSize = size(fileUuid); + if (curSize == length) return true; - var curSize = size(fileUuid); - if (curSize == length) return true; + var chunksAll = Collections.unmodifiableNavigableMap(file.getChunks()); + NavigableMap removedChunks = new TreeMap<>(); + NavigableMap newChunks = new TreeMap<>(); - var chunksAll = Collections.unmodifiableNavigableMap(file.getData().getChunks()); - NavigableMap removedChunks = new TreeMap<>(); - NavigableMap newChunks = new TreeMap<>(); + if (curSize < length) { + long combinedSize = (length - curSize); - if (curSize < length) { - long combinedSize = (length - curSize); + long start = curSize; - long start = curSize; + // Hack + HashMap zeroCache = new HashMap<>(); - // Hack - HashMap zeroCache = new HashMap<>(); + { + long cur = 0; + while (cur < combinedSize) { + long end; - { - long cur = 0; - while (cur < combinedSize) { - long end; - - if (targetChunkSize <= 0) + if (targetChunkSize <= 0) + end = combinedSize; + else { + if ((combinedSize - cur) > (targetChunkSize * 1.5)) { + end = cur + targetChunkSize; + } else { end = combinedSize; - else { - if ((combinedSize - cur) > (targetChunkSize * 1.5)) { - end = cur + targetChunkSize; - } else { - end = combinedSize; - } } - - if (!zeroCache.containsKey(end - cur)) - zeroCache.put(end - cur, UnsafeByteOperations.unsafeWrap(new byte[Math.toIntExact(end - cur)])); - - ChunkData newChunkData = createChunk(zeroCache.get(end - cur)); - //FIXME: - jObjectManager.put(newChunkData, Optional.of(file.getMeta().getName())); - newChunks.put(start, newChunkData.getName()); - - start += newChunkData.getSize(); - cur = end; } + + if (!zeroCache.containsKey(end - cur)) + zeroCache.put(end - cur, UnsafeByteOperations.unsafeWrap(new byte[Math.toIntExact(end - cur)])); + + ChunkData newChunkData = createChunk(zeroCache.get(end - cur)); + newChunks.put(start, newChunkData.getKey()); + + start += newChunkData.getData().size(); + cur = end; } - } else { - var tail = chunksAll.lowerEntry(length); - var afterTail = chunksAll.tailMap(tail.getKey(), false); - - removedChunks.put(tail.getKey(), tail.getValue()); - removedChunks.putAll(afterTail); - - var tailBytes = readChunk(tail.getValue()); - var newChunk = tailBytes.substring(0, (int) (length - tail.getKey())); - - ChunkData newChunkData = createChunk(newChunk); - //FIXME: - jObjectManager.put(newChunkData, Optional.of(file.getMeta().getName())); - newChunks.put(tail.getKey(), newChunkData.getName()); } + } else { + var tail = chunksAll.lowerEntry(length); + var afterTail = chunksAll.tailMap(tail.getKey(), false); - file.mutate(new FileChunkMutator(file.getData().getMtime(), System.currentTimeMillis(), removedChunks, newChunks)); + removedChunks.put(tail.getKey(), tail.getValue()); + removedChunks.putAll(afterTail); - cleanupChunks(file.getData(), removedChunks.values()); - updateFileSize((JObject) file); - return true; - } catch (Exception e) { - Log.error("Error reading file: " + fileUuid, e); - return false; - } finally { - file.rwUnlock(); + var tailBytes = readChunk(tail.getValue()); + var newChunk = tailBytes.substring(0, (int) (length - tail.getKey())); + + ChunkData newChunkData = createChunk(newChunk); + newChunks.put(tail.getKey(), newChunkData.getKey()); } + + file.setChunks(newChunks); + cleanupChunks(file, removedChunks.values()); + updateFileSize(file); + return true; }); } @Override - public String readlink(String uuid) { + public String readlink(JObjectKey uuid) { return jObjectTxManager.executeTx(() -> { return readlinkBS(uuid).toStringUtf8(); }); } @Override - public ByteString readlinkBS(String uuid) { + public ByteString readlinkBS(JObjectKey uuid) { return jObjectTxManager.executeTx(() -> { - var fileOpt = jObjectManager.get(uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid))); - - return fileOpt.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (md, fileData) -> { - if (!(fileData instanceof File)) { - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - } - - if (!((File) fileData).isSymlink()) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Not a symlink: " + uuid)); - - return read(uuid, 0, Math.toIntExact(size(uuid))).get(); - }); + var fileOpt = curTx.getObject(File.class, uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid))); + return read(uuid, 0, Math.toIntExact(size(uuid))).get(); }); } @Override - public String symlink(String oldpath, String newpath) { + public JObjectKey symlink(String oldpath, String newpath) { return jObjectTxManager.executeTx(() -> { Path path = Path.of(newpath); var parent = getDirEntry(path.getParent().toString()); @@ -708,107 +620,55 @@ public class DhfsFileServiceImpl implements DhfsFileService { var fuuid = UUID.randomUUID(); Log.debug("Creating file " + fuuid); - File f = new File(fuuid, 0, true); - var newNodeId = _tree.getNewNodeId(); + File f = objectAllocator.create(File.class, new JObjectKey(fuuid.toString())); + f.setSymlink(true); ChunkData newChunkData = createChunk(UnsafeByteOperations.unsafeWrap(oldpath.getBytes(StandardCharsets.UTF_8))); - f.getChunks().put(0L, newChunkData.getName()); + f.getChunks().put(0L, newChunkData.getKey()); + updateFileSize(f); - jObjectManager.put(newChunkData, Optional.of(f.getName())); - var newFile = jObjectManager.putLocked(f, Optional.of(newNodeId)); - try { - updateFileSize(newFile); - } finally { - newFile.rwUnlock(); - } - - _tree.move(parent.getMeta().getName(), new JKleppmannTreeNodeMetaFile(fname, f.getName()), newNodeId); - return f.getName(); + getTree().move(parent.getKey(), new JKleppmannTreeNodeMetaFile(fname, f.getKey()), getTree().getNewNodeId()); + return f.getKey(); }); } @Override - public Boolean setTimes(String fileUuid, long atimeMs, long mtimeMs) { + public Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs) { return jObjectTxManager.executeTx(() -> { - var file = jObjectManager.get(fileUuid).orElseThrow( + var file = curTx.getObject(File.class, fileUuid).orElseThrow( () -> new StatusRuntimeException(Status.NOT_FOUND.withDescription( "File not found for setTimes: " + fileUuid)) ); - file.runWriteLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, fileData, bump, i) -> { - if (fileData instanceof JKleppmannTreeNode) return null; // FIXME: - if (!(fileData instanceof FsNode fd)) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - - bump.apply(); - fd.setMtime(mtimeMs); - return null; - }); - + file.setMtime(mtimeMs); return true; }); } @Override - public void updateFileSize(JObject file) { + public void updateFileSize(File file) { jObjectTxManager.executeTx(() -> { - file.rwLockNoCopy(); - try { - file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE); - if (!(file.getData() instanceof File fd)) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + long realSize = 0; - long realSize = 0; + var last = file.getChunks().lastEntry(); + if (last != null) { + var lastSize = getChunkSize(last.getValue()); + realSize = last.getKey() + lastSize; + } - var last = fd.getChunks().lastEntry(); - if (last != null) { - var lastSize = getChunkSize(last.getValue()); - realSize = last.getKey() + lastSize; - } - - if (realSize != fd.getSize()) { - long finalRealSize = realSize; - file.mutate(new JMutator() { - long oldSize; - - @Override - public boolean mutate(File object) { - oldSize = object.getSize(); - object.setSize(finalRealSize); - return true; - } - - @Override - public void revert(File object) { - object.setSize(oldSize); - } - }); - } - } catch (Exception e) { - Log.error("Error updating file size: " + file.getMeta().getName(), e); - } finally { - file.rwUnlock(); + if (realSize != file.getSize()) { + file.setSize(realSize); } }); } @Override - public Long size(String uuid) { + public Long size(JObjectKey uuid) { return jObjectTxManager.executeTx(() -> { - var read = jObjectManager.get(uuid) + var read = curTx.getObject(File.class, uuid) .orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND)); - try { - return read.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (fsNodeData, fileData) -> { - if (!(fileData instanceof File fd)) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - - return fd.getSize(); - }); - } catch (Exception e) { - Log.error("Error reading file: " + uuid, e); - return -1L; - } + return read.getSize(); }); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java index 0fa8ee29..01644c3f 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java @@ -5,7 +5,6 @@ import com.sun.security.auth.module.UnixSystem; import com.usatiuk.dhfs.files.service.DhfsFileService; import com.usatiuk.dhfs.files.service.DirectoryNotEmptyException; import com.usatiuk.dhfs.files.service.GetattrRes; -import com.usatiuk.dhfs.objects.repository.persistence.ObjectPersistentStore; import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer; import com.usatiuk.kleppmanntree.AlreadyExistsException; import io.grpc.Status; @@ -38,8 +37,6 @@ import static jnr.posix.FileStat.*; public class DhfsFuse extends FuseStubFS { private static final int blksize = 1048576; private static final int iosize = 1048576; - @Inject - ObjectPersistentStore persistentStore; // FIXME? @ConfigProperty(name = "dhfs.fuse.root") String root; @ConfigProperty(name = "dhfs.fuse.enabled") @@ -100,9 +97,10 @@ public class DhfsFuse extends FuseStubFS { try { stbuf.f_frsize.set(blksize); stbuf.f_bsize.set(blksize); - stbuf.f_blocks.set(persistentStore.getTotalSpace() / blksize); // total data blocks in file system - stbuf.f_bfree.set(persistentStore.getFreeSpace() / blksize); // free blocks in fs - stbuf.f_bavail.set(persistentStore.getUsableSpace() / blksize); // avail blocks in fs + // FIXME: + stbuf.f_blocks.set(1024 * 1024 * 1024 / blksize); // total data blocks in file system + stbuf.f_bfree.set(1024 * 1024 * 1024 / blksize); // free blocks in fs + stbuf.f_bavail.set(1024 * 1024 * 1024 / blksize); // avail blocks in fs stbuf.f_files.set(1000); //FIXME: stbuf.f_ffree.set(Integer.MAX_VALUE - 2000); //FIXME: stbuf.f_favail.set(Integer.MAX_VALUE - 2000); //FIXME: diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java index 2743bf48..93bfce46 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java @@ -1,75 +1,67 @@ package com.usatiuk.dhfs.objects.jkleppmanntree; -import com.usatiuk.dhfs.files.objects.File; -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.*; -import com.usatiuk.dhfs.objects.jrepository.*; -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; -import com.usatiuk.dhfs.objects.repository.opsupport.Op; -import com.usatiuk.dhfs.objects.repository.opsupport.OpObject; -import com.usatiuk.dhfs.objects.repository.opsupport.OpObjectRegistry; -import com.usatiuk.dhfs.objects.repository.opsupport.OpSender; +import com.usatiuk.dhfs.objects.TransactionManager; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaDirectory; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreePersistentData; +import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.kleppmanntree.*; -import com.usatiuk.dhfs.utils.VoidFn; -import io.quarkus.logging.Log; +import com.usatiuk.objects.alloc.runtime.ObjectAllocator; +import com.usatiuk.objects.common.runtime.JObjectKey; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; import org.apache.commons.lang3.tuple.Pair; -import java.util.*; -import java.util.concurrent.ConcurrentHashMap; +import java.util.HashMap; +import java.util.List; +import java.util.TreeMap; +import java.util.UUID; import java.util.function.Function; @ApplicationScoped public class JKleppmannTreeManager { private static final String dataFileName = "trees"; - private final ConcurrentHashMap _trees = new ConcurrentHashMap<>(); @Inject JKleppmannTreePeerInterface jKleppmannTreePeerInterface; @Inject - OpSender opSender; + Transaction curTx; @Inject - OpObjectRegistry opObjectRegistry; + TransactionManager txManager; @Inject - JObjectManager jObjectManager; - @Inject - PersistentPeerDataService persistentPeerDataService; - @Inject - JObjectTxManager jObjectTxManager; - @Inject - SoftJObjectFactory softJObjectFactory; + ObjectAllocator objectAllocator; @Inject JKleppmannTreePeerInterface peerInterface; - public JKleppmannTree getTree(String name) { - return _trees.computeIfAbsent(name, this::createTree); - } - - private JKleppmannTree createTree(String name) { - return jObjectTxManager.executeTx(() -> { - var data = jObjectManager.get(JKleppmannTreePersistentData.nameFromTreeName(name)).orElse(null); + public JKleppmannTree getTree(JObjectKey name) { + return txManager.executeTx(() -> { + var data = curTx.getObject(JKleppmannTreePersistentData.class, name).orElse(null); if (data == null) { - data = jObjectManager.put(new JKleppmannTreePersistentData(name), Optional.empty()); + data = objectAllocator.create(JKleppmannTreePersistentData.class, name); + data.setClock(new AtomicClock(1L)); + data.setQueues(new HashMap<>()); + data.setLog(new TreeMap<>()); + data.setPeerTimestampLog(new HashMap<>()); + curTx.putObject(data); } - var tree = new JKleppmannTree(name); - opObjectRegistry.registerObject(tree); - return tree; + return new JKleppmannTree(data); +// opObjectRegistry.registerObject(tree); }); } - public class JKleppmannTree implements OpObject { - private final KleppmannTree _tree; + public class JKleppmannTree { + private final KleppmannTree _tree; - private final SoftJObject _persistentData; + private final JKleppmannTreePersistentData _data; private final JKleppmannTreeStorageInterface _storageInterface; private final JKleppmannTreeClock _clock; - private final String _treeName; + private final JObjectKey _treeName; - JKleppmannTree(String treeName) { - _treeName = treeName; - - _persistentData = softJObjectFactory.create(JKleppmannTreePersistentData.class, JKleppmannTreePersistentData.nameFromTreeName(treeName)); + JKleppmannTree(JKleppmannTreePersistentData data) { + _treeName = data.getKey(); + _data = data; _storageInterface = new JKleppmannTreeStorageInterface(); _clock = new JKleppmannTreeClock(); @@ -77,305 +69,275 @@ public class JKleppmannTreeManager { _tree = new KleppmannTree<>(_storageInterface, peerInterface, _clock, new JOpRecorder()); } - public String traverse(List names) { + public JObjectKey traverse(List names) { return _tree.traverse(names); } - public String getNewNodeId() { + public JObjectKey getNewNodeId() { return _storageInterface.getNewNodeId(); } - public void move(String newParent, JKleppmannTreeNodeMeta newMeta, String node) { + public void move(JObjectKey newParent, JKleppmannTreeNodeMeta newMeta, JObjectKey node) { _tree.move(newParent, newMeta, node); } - public void trash(JKleppmannTreeNodeMeta newMeta, String node) { - _tree.move(_storageInterface.getTrashId(), newMeta.withName(node), node); + public void trash(JKleppmannTreeNodeMeta newMeta, JObjectKey node) { + _tree.move(_storageInterface.getTrashId(), newMeta.withName(node.name()), node); } - @Override - public boolean hasPendingOpsForHost(UUID host) { - return _persistentData.get() - .runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, - (m, d) -> d.getQueues().containsKey(host) && - !d.getQueues().get(host).isEmpty() - ); - } +// @Override +// public boolean hasPendingOpsForHost(UUID host) { +// return _persistentData.get() +// .runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, +// (m, d) -> d.getQueues().containsKey(host) && +// !d.getQueues().get(host).isEmpty() +// ); +// } +// +// @Override +// public List getPendingOpsForHost(UUID host, int limit) { +// return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { +// if (d.getQueues().containsKey(host)) { +// var queue = d.getQueues().get(host); +// ArrayList collected = new ArrayList<>(); +// +// for (var node : queue.entrySet()) { +// collected.add(new JKleppmannTreeOpWrapper(node.getValue())); +// if (collected.size() >= limit) break; +// } +// +// return collected; +// } +// return List.of(); +// }); +// } - @Override - public List getPendingOpsForHost(UUID host, int limit) { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - if (d.getQueues().containsKey(host)) { - var queue = d.getQueues().get(host); - ArrayList collected = new ArrayList<>(); +// @Override +// public String getId() { +// return _treeName; +// } - for (var node : queue.entrySet()) { - collected.add(new JKleppmannTreeOpWrapper(node.getValue())); - if (collected.size() >= limit) break; - } +// @Override +// public void commitOpForHost(UUID host, Op op) { +// if (!(op instanceof JKleppmannTreeOpWrapper jop)) +// throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass() + " " + getId()); +// _persistentData.get().assertRwLock(); +// _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); +// +// var got = _persistentData.get().getData().getQueues().get(host).firstEntry().getValue(); +// if (!Objects.equals(jop.getOp(), got)) +// throw new IllegalArgumentException("Committed op push was not the oldest"); +// +// _persistentData.get().mutate(new JMutator() { +// @Override +// public boolean mutate(JKleppmannTreePersistentData object) { +// object.getQueues().get(host).pollFirstEntry(); +// return true; +// } +// +// @Override +// public void revert(JKleppmannTreePersistentData object) { +// object.getQueues().get(host).put(jop.getOp().timestamp(), jop.getOp()); +// } +// }); +// +// } - return collected; - } - return List.of(); - }); - } +// @Override +// public void pushBootstrap(UUID host) { +// _tree.recordBoostrapFor(host); +// } - @Override - public String getId() { - return _treeName; - } - - @Override - public void commitOpForHost(UUID host, Op op) { - if (!(op instanceof JKleppmannTreeOpWrapper jop)) - throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass() + " " + getId()); - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - - var got = _persistentData.get().getData().getQueues().get(host).firstEntry().getValue(); - if (!Objects.equals(jop.getOp(), got)) - throw new IllegalArgumentException("Committed op push was not the oldest"); - - _persistentData.get().mutate(new JMutator() { - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - object.getQueues().get(host).pollFirstEntry(); - return true; - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - object.getQueues().get(host).put(jop.getOp().timestamp(), jop.getOp()); - } - }); - - } - - @Override - public void pushBootstrap(UUID host) { - _tree.recordBoostrapFor(host); - } - - public Pair findParent(Function predicate) { + public Pair findParent(Function predicate) { return _tree.findParent(predicate); } - @Override - public boolean acceptExternalOp(UUID from, Op op) { - if (op instanceof JKleppmannTreePeriodicPushOp pushOp) { - return _tree.updateExternalTimestamp(pushOp.getFrom(), pushOp.getTimestamp()); - } +// @Override +// public boolean acceptExternalOp(UUID from, Op op) { +// if (op instanceof JKleppmannTreePeriodicPushOp pushOp) { +// return _tree.updateExternalTimestamp(pushOp.getFrom(), pushOp.getTimestamp()); +// } +// +// if (!(op instanceof JKleppmannTreeOpWrapper jop)) +// throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass() + " " + getId()); +// +// JObject fileRef; +// if (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile f) { +// var fino = f.getFileIno(); +// fileRef = jObjectManager.getOrPut(fino, File.class, Optional.of(jop.getOp().childId())); +// } else { +// fileRef = null; +// } +// +// if (Log.isTraceEnabled()) +// Log.trace("Received op from " + from + ": " + jop.getOp().timestamp().timestamp() + " " + jop.getOp().childId() + "->" + jop.getOp().newParentId() + " as " + jop.getOp().newMeta().getName()); +// +// try { +// _tree.applyExternalOp(from, jop.getOp()); +// } catch (Exception e) { +// Log.error("Error applying external op", e); +// throw e; +// } finally { +// // FIXME: +// // Fixup the ref if it didn't really get applied +// +// if ((fileRef == null) && (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile)) +// Log.error("Could not create child of pushed op: " + jop.getOp()); +// +// if (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile f) { +// if (fileRef != null) { +// var got = jObjectManager.get(jop.getOp().childId()).orElse(null); +// +// VoidFn remove = () -> { +// fileRef.runWriteLockedVoid(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d, b, v) -> { +// m.removeRef(jop.getOp().childId()); +// }); +// }; +// +// if (got == null) { +// remove.apply(); +// } else { +// try { +// got.rLock(); +// try { +// got.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); +// if (got.getData() == null || !got.getData().extractRefs().contains(f.getFileIno())) +// remove.apply(); +// } finally { +// got.rUnlock(); +// } +// } catch (DeletedObjectAccessException dex) { +// remove.apply(); +// } +// } +// } +// } +// } +// return true; +// } - if (!(op instanceof JKleppmannTreeOpWrapper jop)) - throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass() + " " + getId()); +// @Override +// public Op getPeriodicPushOp() { +// return new JKleppmannTreePeriodicPushOp(persistentPeerDataService.getSelfUuid(), _clock.peekTimestamp()); +// } - JObject fileRef; - if (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile f) { - var fino = f.getFileIno(); - fileRef = jObjectManager.getOrPut(fino, File.class, Optional.of(jop.getOp().childId())); - } else { - fileRef = null; - } +// @Override +// public void addToTx() { +// // FIXME: a hack +// _persistentData.get().rwLockNoCopy(); +// _persistentData.get().rwUnlock(); +// } - if (Log.isTraceEnabled()) - Log.trace("Received op from " + from + ": " + jop.getOp().timestamp().timestamp() + " " + jop.getOp().childId() + "->" + jop.getOp().newParentId() + " as " + jop.getOp().newMeta().getName()); - - try { - _tree.applyExternalOp(from, jop.getOp()); - } catch (Exception e) { - Log.error("Error applying external op", e); - throw e; - } finally { - // FIXME: - // Fixup the ref if it didn't really get applied - - if ((fileRef == null) && (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile)) - Log.error("Could not create child of pushed op: " + jop.getOp()); - - if (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile f) { - if (fileRef != null) { - var got = jObjectManager.get(jop.getOp().childId()).orElse(null); - - VoidFn remove = () -> { - fileRef.runWriteLockedVoid(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d, b, v) -> { - m.removeRef(jop.getOp().childId()); - }); - }; - - if (got == null) { - remove.apply(); - } else { - try { - got.rLock(); - try { - got.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - if (got.getData() == null || !got.getData().extractRefs().contains(f.getFileIno())) - remove.apply(); - } finally { - got.rUnlock(); - } - } catch (DeletedObjectAccessException dex) { - remove.apply(); - } - } - } - } - } - return true; - } - - @Override - public Op getPeriodicPushOp() { - return new JKleppmannTreePeriodicPushOp(persistentPeerDataService.getSelfUuid(), _clock.peekTimestamp()); - } - - @Override - public void addToTx() { - // FIXME: a hack - _persistentData.get().rwLockNoCopy(); - _persistentData.get().rwUnlock(); - } - - private class JOpRecorder implements OpRecorder { + private class JOpRecorder implements OpRecorder { @Override - public void recordOp(OpMove op) { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - var hostUuds = persistentPeerDataService.getHostUuids().stream().toList(); - _persistentData.get().mutate(new JMutator() { - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - object.recordOp(hostUuds, op); - return true; - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - object.removeOp(hostUuds, op); - } - }); - opSender.push(JKleppmannTree.this); + public void recordOp(OpMove op) { +// _persistentData.get().assertRwLock(); +// _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); +// var hostUuds = persistentPeerDataService.getHostUuids().stream().toList(); +// _persistentData.get().mutate(new JMutator() { +// @Override +// public boolean mutate(JKleppmannTreePersistentData object) { +// object.recordOp(hostUuds, op); +// return true; +// } +// +// @Override +// public void revert(JKleppmannTreePersistentData object) { +// object.removeOp(hostUuds, op); +// } +// }); +// opSender.push(JKleppmannTree.this); } @Override - public void recordOpForPeer(UUID peer, OpMove op) { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - _persistentData.get().mutate(new JMutator() { - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - object.recordOp(peer, op); - return true; - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - object.removeOp(peer, op); - } - }); - opSender.push(JKleppmannTree.this); + public void recordOpForPeer(UUID peer, OpMove op) { +// _persistentData.get().assertRwLock(); +// _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); +// _persistentData.get().mutate(new JMutator() { +// @Override +// public boolean mutate(JKleppmannTreePersistentData object) { +// object.recordOp(peer, op); +// return true; +// } +// +// @Override +// public void revert(JKleppmannTreePersistentData object) { +// object.removeOp(peer, op); +// } +// }); +// opSender.push(JKleppmannTree.this); } } private class JKleppmannTreeClock implements Clock { @Override public Long getTimestamp() { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - var ret = _persistentData.get().getData().getClock().peekTimestamp() + 1; - _persistentData.get().mutate(new JMutator() { - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - object.getClock().getTimestamp(); - return true; - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - object.getClock().ungetTimestamp(); - } - }); - return ret; + return _data.getClock().getTimestamp(); } @Override public Long peekTimestamp() { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getClock().peekTimestamp()); + return _data.getClock().peekTimestamp(); } @Override public Long updateTimestamp(Long receivedTimestamp) { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - _persistentData.get().mutate(new JMutator() { - Long _old; - - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - _old = object.getClock().updateTimestamp(receivedTimestamp); - return true; - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - object.getClock().setTimestamp(_old); - } - }); - return _persistentData.get().getData().getClock().peekTimestamp(); + return _data.getClock().updateTimestamp(receivedTimestamp); } } - public class JKleppmannTreeStorageInterface implements StorageInterface { + public class JKleppmannTreeStorageInterface implements StorageInterface { private final LogWrapper _logWrapper = new LogWrapper(); private final PeerLogWrapper _peerLogWrapper = new PeerLogWrapper(); public JKleppmannTreeStorageInterface() { - if (jObjectManager.get(getRootId()).isEmpty()) { - putNode(new JKleppmannTreeNode(new TreeNode<>(getRootId(), null, new JKleppmannTreeNodeMetaDirectory("")))); - putNode(new JKleppmannTreeNode(new TreeNode<>(getTrashId(), null, null))); + if (curTx.getObject(JKleppmannTreeNode.class, getRootId()).isEmpty()) { + var rootNode = objectAllocator.create(JKleppmannTreeNode.class, getRootId()); + rootNode.setNode(new TreeNode<>(getRootId(), null, new JKleppmannTreeNodeMetaDirectory(""))); + curTx.putObject(rootNode); + var trashNode = objectAllocator.create(JKleppmannTreeNode.class, getTrashId()); + trashNode.setNode(new TreeNode<>(getTrashId(), null, new JKleppmannTreeNodeMetaDirectory(""))); + curTx.putObject(trashNode); } } - public JObject putNode(JKleppmannTreeNode node) { - return jObjectManager.put(node, Optional.ofNullable(node.getNode().getParent())); - } - - public JObject putNodeLocked(JKleppmannTreeNode node) { - return jObjectManager.putLocked(node, Optional.ofNullable(node.getNode().getParent())); + @Override + public JObjectKey getRootId() { + return new JObjectKey(_treeName + "_jt_root"); } @Override - public String getRootId() { - return _treeName + "_jt_root"; + public JObjectKey getTrashId() { + return new JObjectKey(_treeName + "_jt_trash"); } @Override - public String getTrashId() { - return _treeName + "_jt_trash"; + public JObjectKey getNewNodeId() { + return new JObjectKey(UUID.randomUUID().toString()); } @Override - public String getNewNodeId() { - return persistentPeerDataService.getUniqueId(); - } - - @Override - public JKleppmannTreeNodeWrapper getById(String id) { - var got = jObjectManager.get(id); + public JKleppmannTreeNodeWrapper getById(JObjectKey id) { + var got = curTx.getObject(JKleppmannTreeNode.class, id); if (got.isEmpty()) return null; - return new JKleppmannTreeNodeWrapper((JObject) got.get()); + return new JKleppmannTreeNodeWrapper(got.get()); } @Override - public JKleppmannTreeNodeWrapper createNewNode(TreeNode node) { - return new JKleppmannTreeNodeWrapper(putNodeLocked(new JKleppmannTreeNode(node))); + public JKleppmannTreeNodeWrapper createNewNode(TreeNode node) { + var created = objectAllocator.create(JKleppmannTreeNode.class, node.getId()); + created.setNode(node); + curTx.putObject(created); + return new JKleppmannTreeNodeWrapper(created); } @Override - public void removeNode(String id) {} + public void removeNode(JObjectKey id) { + // TODO + } @Override - public LogInterface getLog() { + public LogInterface getLog() { return _logWrapper; } @@ -386,179 +348,93 @@ public class JKleppmannTreeManager { @Override public void rLock() { - _persistentData.get().rLock(); } @Override public void rUnlock() { - _persistentData.get().rUnlock(); } @Override public void rwLock() { - _persistentData.get().rwLockNoCopy(); } @Override public void rwUnlock() { - _persistentData.get().rwUnlock(); } @Override public void assertRwLock() { - _persistentData.get().assertRwLock(); } private class PeerLogWrapper implements PeerTimestampLogInterface { - @Override public Long getForPeer(UUID peerId) { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, - (m, d) -> d.getPeerTimestampLog().get(peerId)); + return _data.getPeerTimestampLog().get(peerId); } @Override public void putForPeer(UUID peerId, Long timestamp) { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - _persistentData.get().mutate(new JMutator() { - Long old; - - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - old = object.getPeerTimestampLog().put(peerId, timestamp); - return !Objects.equals(old, timestamp); - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - if (old != null) - object.getPeerTimestampLog().put(peerId, old); - else - object.getPeerTimestampLog().remove(peerId, timestamp); - } - }); + _data.getPeerTimestampLog().put(peerId, timestamp); } } - private class LogWrapper implements LogInterface { + private class LogWrapper implements LogInterface { @Override - public Pair, LogRecord> peekOldest() { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - var ret = d.getLog().firstEntry(); - if (ret == null) return null; - return Pair.of(ret); - }); - } - - @Override - public Pair, LogRecord> takeOldest() { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - - var ret = _persistentData.get().getData().getLog().firstEntry(); - if (ret != null) - _persistentData.get().mutate(new JMutator() { - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - object.getLog().pollFirstEntry(); - return true; - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - object.getLog().put(ret.getKey(), ret.getValue()); - } - }); + public Pair, LogRecord> peekOldest() { + var ret = _data.getLog().firstEntry(); + if (ret == null) return null; return Pair.of(ret); } @Override - public Pair, LogRecord> peekNewest() { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - var ret = d.getLog().lastEntry(); - if (ret == null) return null; - return Pair.of(ret); - }); + public Pair, LogRecord> takeOldest() { + var ret = _data.getLog().pollFirstEntry(); + if (ret == null) return null; + return Pair.of(ret); } @Override - public List, LogRecord>> newestSlice(CombinedTimestamp since, boolean inclusive) { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - var tail = d.getLog().tailMap(since, inclusive); - return tail.entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); - }); + public Pair, LogRecord> peekNewest() { + var ret = _data.getLog().lastEntry(); + if (ret == null) return null; + return Pair.of(ret); } @Override - public List, LogRecord>> getAll() { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - return d.getLog().entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); - }); + public List, LogRecord>> newestSlice(CombinedTimestamp since, boolean inclusive) { + return _data.getLog().tailMap(since, inclusive).entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); + } + + @Override + public List, LogRecord>> getAll() { + return _data.getLog().entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); } @Override public boolean isEmpty() { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - return d.getLog().isEmpty(); - }); + return _data.getLog().isEmpty(); } @Override public boolean containsKey(CombinedTimestamp timestamp) { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - return d.getLog().containsKey(timestamp); - }); + return _data.getLog().containsKey(timestamp); } @Override public long size() { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - return (long) d.getLog().size(); - }); + return (long) _data.getLog().size(); } @Override - public void put(CombinedTimestamp timestamp, LogRecord record) { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - if (_persistentData.get().getData().getLog().containsKey(timestamp)) + public void put(CombinedTimestamp timestamp, LogRecord record) { + if (_data.getLog().containsKey(timestamp)) throw new IllegalStateException("Overwriting log entry?"); - _persistentData.get().mutate(new JMutator() { - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - object.getLog().put(timestamp, record); - return true; - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - object.getLog().remove(timestamp, record); - } - }); + _data.getLog().put(timestamp, record); } @Override - public void replace(CombinedTimestamp timestamp, LogRecord record) { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - _persistentData.get().mutate(new JMutator() { - LogRecord old; - - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - old = object.getLog().put(timestamp, record); - return !Objects.equals(old, record); - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - if (old != null) - object.getLog().put(timestamp, old); - else - object.getLog().remove(timestamp, record); - } - }); + public void replace(CombinedTimestamp timestamp, LogRecord record) { + _data.getLog().put(timestamp, record); } } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java index cd4b09c9..7f060fc0 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java @@ -2,70 +2,60 @@ package com.usatiuk.dhfs.objects.jkleppmanntree; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; -import com.usatiuk.dhfs.objects.jrepository.JObject; -import com.usatiuk.dhfs.objects.jrepository.JObjectManager; import com.usatiuk.kleppmanntree.TreeNode; import com.usatiuk.kleppmanntree.TreeNodeWrapper; +import com.usatiuk.objects.common.runtime.JObjectKey; import java.util.UUID; -public class JKleppmannTreeNodeWrapper implements TreeNodeWrapper { - private final JObject _backing; +public class JKleppmannTreeNodeWrapper implements TreeNodeWrapper { + private final JKleppmannTreeNode _backing; - public JKleppmannTreeNodeWrapper(JObject backing) {_backing = backing;} + public JKleppmannTreeNodeWrapper(JKleppmannTreeNode backing) { + assert backing != null; + assert backing.getNode() != null; + _backing = backing; + } @Override public void rLock() { - _backing.rLock(); } @Override public void rUnlock() { - _backing.rUnlock(); } @Override public void rwLock() { - _backing.rwLock(); } @Override public void rwUnlock() { - _backing.bumpVer(); // FIXME:? - _backing.rwUnlock(); } @Override public void freeze() { - _backing.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> { - m.freeze(); - return null; - }); } @Override public void unfreeze() { - _backing.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> { - m.unfreeze(); - return null; - }); } @Override - public void notifyRef(String id) { - _backing.getMeta().addRef(id); + public void notifyRef(JObjectKey id) { } @Override - public void notifyRmRef(String id) { - _backing.getMeta().removeRef(id); + public void notifyRmRef(JObjectKey id) { } @Override - public TreeNode getNode() { - _backing.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - if (_backing.getData() == null) - throw new IllegalStateException("Node " + _backing.getMeta().getName() + " data lost!"); - return _backing.getData().getNode(); + public TreeNode getNode() { + // TODO: + return _backing.getNode(); +// _backing.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); +// if (_backing.getData() == null) +// throw new IllegalStateException("Node " + _backing.getMeta().getName() + " data lost!"); +// return _backing.getData().getNode(); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java index 4612f8fc..52749fb1 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java @@ -1,30 +1,27 @@ package com.usatiuk.dhfs.objects.jkleppmanntree; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile; -import com.usatiuk.dhfs.objects.repository.opsupport.Op; import com.usatiuk.kleppmanntree.OpMove; +import com.usatiuk.objects.common.runtime.JObjectKey; import lombok.Getter; -import java.util.Collection; -import java.util.List; import java.util.UUID; // Wrapper to avoid having to specify generic types -public class JKleppmannTreeOpWrapper implements Op { +public class JKleppmannTreeOpWrapper { @Getter - private final OpMove _op; + private final OpMove _op; - public JKleppmannTreeOpWrapper(OpMove op) { + public JKleppmannTreeOpWrapper(OpMove op) { if (op == null) throw new IllegalArgumentException("op shouldn't be null"); _op = op; } - @Override - public Collection getEscapedRefs() { - if (_op.newMeta() instanceof JKleppmannTreeNodeMetaFile mf) { - return List.of(mf.getFileIno()); - } - return List.of(); - } +// @Override +// public Collection getEscapedRefs() { +// if (_op.newMeta() instanceof JKleppmannTreeNodeMetaFile mf) { +// return List.of(mf.getFileIno()); +// } +// return List.of(); +// } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java index 39b5d484..9088ecfd 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java @@ -1,25 +1,21 @@ package com.usatiuk.dhfs.objects.jkleppmanntree; -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; import com.usatiuk.kleppmanntree.PeerInterface; -import jakarta.inject.Inject; import jakarta.inject.Singleton; import java.util.Collection; +import java.util.List; import java.util.UUID; @Singleton public class JKleppmannTreePeerInterface implements PeerInterface { - @Inject - PersistentPeerDataService persistentPeerDataService; - @Override public UUID getSelfId() { - return persistentPeerDataService.getSelfUuid(); + return UUID.nameUUIDFromBytes("1".getBytes()); } @Override public Collection getAllPeers() { - return persistentPeerDataService.getHostUuidsAndSelf(); + return List.of(getSelfId()); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java index 3c84d067..5259c51b 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java @@ -1,13 +1,12 @@ package com.usatiuk.dhfs.objects.jkleppmanntree; -import com.usatiuk.dhfs.objects.repository.opsupport.Op; import lombok.Getter; import java.util.Collection; import java.util.List; import java.util.UUID; -public class JKleppmannTreePeriodicPushOp implements Op { +public class JKleppmannTreePeriodicPushOp { @Getter private final UUID _from; @Getter @@ -18,8 +17,8 @@ public class JKleppmannTreePeriodicPushOp implements Op { _timestamp = timestamp; } - @Override - public Collection getEscapedRefs() { - return List.of(); - } +// @Override +// public Collection getEscapedRefs() { +// return List.of(); +// } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java index 0146da88..1f8d365a 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java @@ -1,45 +1,15 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.jrepository.OnlyLocal; -import com.usatiuk.dhfs.objects.repository.ConflictResolver; import com.usatiuk.kleppmanntree.TreeNode; -import lombok.Getter; +import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; -import java.util.Collection; -import java.util.Collections; -import java.util.List; +import java.io.Serializable; import java.util.UUID; // FIXME: Ideally this is two classes? -@OnlyLocal -public class JKleppmannTreeNode extends JObjectData { - @Getter - final TreeNode _node; +public interface JKleppmannTreeNode extends JData, Serializable { + TreeNode getNode(); - public JKleppmannTreeNode(TreeNode node) { - _node = node; - } - - @Override - public String getName() { - return _node.getId(); - } - - @Override - public Class getConflictResolver() { - return null; - } - - @Override - public Collection extractRefs() { - if (_node.getMeta() instanceof JKleppmannTreeNodeMetaFile) - return List.of(((JKleppmannTreeNodeMetaFile) _node.getMeta()).getFileIno()); - return Collections.unmodifiableCollection(_node.getChildren().values()); - } - - @Override - public Class getRefType() { - return JObjectData.class; - } + void setNode(TreeNode node); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java index 124cd51d..4e0e77ee 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java @@ -2,6 +2,7 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; import com.usatiuk.autoprotomap.runtime.ProtoMirror; import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaFileP; +import com.usatiuk.objects.common.runtime.JObjectKey; import lombok.Getter; import java.util.Objects; @@ -9,9 +10,9 @@ import java.util.Objects; @ProtoMirror(JKleppmannTreeNodeMetaFileP.class) public class JKleppmannTreeNodeMetaFile extends JKleppmannTreeNodeMeta { @Getter - private final String _fileIno; + private final JObjectKey _fileIno; - public JKleppmannTreeNodeMetaFile(String name, String fileIno) { + public JKleppmannTreeNodeMetaFile(String name, JObjectKey fileIno) { super(name); _fileIno = fileIno; } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java index d6881d5b..f4bc34ce 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java @@ -1,88 +1,53 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.jrepository.OnlyLocal; -import com.usatiuk.dhfs.objects.repository.ConflictResolver; import com.usatiuk.kleppmanntree.AtomicClock; import com.usatiuk.kleppmanntree.CombinedTimestamp; import com.usatiuk.kleppmanntree.LogRecord; import com.usatiuk.kleppmanntree.OpMove; -import lombok.Getter; +import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; -import java.util.*; +import java.util.Collection; +import java.util.HashMap; +import java.util.TreeMap; +import java.util.UUID; -@OnlyLocal -public class JKleppmannTreePersistentData extends JObjectData { - private final String _treeName; - @Getter - private final AtomicClock _clock; - @Getter - private final HashMap, OpMove>> _queues; - @Getter - private final HashMap _peerTimestampLog; - @Getter - private final TreeMap, LogRecord> _log; +public interface JKleppmannTreePersistentData extends JData { + AtomicClock getClock(); - public JKleppmannTreePersistentData(String treeName, AtomicClock clock, - HashMap, OpMove>> queues, - HashMap peerTimestampLog, TreeMap, LogRecord> log) { - _treeName = treeName; - _clock = clock; - _queues = queues; - _peerTimestampLog = peerTimestampLog; - _log = log; + void setClock(AtomicClock clock); + + HashMap, OpMove>> getQueues(); + + void setQueues(HashMap, OpMove>> queues); + + HashMap getPeerTimestampLog(); + + void setPeerTimestampLog(HashMap peerTimestampLog); + + TreeMap, LogRecord> getLog(); + + void setLog(TreeMap, LogRecord> log); + + default void recordOp(UUID host, OpMove opMove) { + getQueues().computeIfAbsent(host, h -> new TreeMap<>()); + getQueues().get(host).put(opMove.timestamp(), opMove); } - public JKleppmannTreePersistentData(String treeName) { - _treeName = treeName; - _clock = new AtomicClock(1); - _queues = new HashMap<>(); - _peerTimestampLog = new HashMap<>(); - _log = new TreeMap<>(); + default void removeOp(UUID host, OpMove opMove) { + getQueues().get(host).remove(opMove.timestamp(), opMove); } - public static String nameFromTreeName(String treeName) { - return treeName + "_pd"; - } - - public void recordOp(UUID host, OpMove opMove) { - _queues.computeIfAbsent(host, h -> new TreeMap<>()); - _queues.get(host).put(opMove.timestamp(), opMove); - } - - public void removeOp(UUID host, OpMove opMove) { - _queues.get(host).remove(opMove.timestamp(), opMove); - } - - public void recordOp(Collection hosts, OpMove opMove) { + default void recordOp(Collection hosts, OpMove opMove) { for (var u : hosts) { recordOp(u, opMove); } } - public void removeOp(Collection hosts, OpMove opMove) { + default void removeOp(Collection hosts, OpMove opMove) { for (var u : hosts) { removeOp(u, opMove); } } - - @Override - public String getName() { - return nameFromTreeName(_treeName); - } - - public String getTreeName() { - return _treeName; - } - - @Override - public Class getConflictResolver() { - return null; - } - - @Override - public Collection extractRefs() { - return List.of(); - } } From 9273dc818e80957c5b9e69089e3bde0e00a297b6 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 29 Dec 2024 11:46:29 +0100 Subject: [PATCH 019/105] basic object deletion --- .../alloc/test/ObjectsAllocDevModeTest.java | 2 ++ .../objects/alloc/test/ObjectsAllocTest.java | 2 ++ .../dhfs/objects/CurrentTransaction.java | 5 +++ .../usatiuk/dhfs/objects/JObjectManager.java | 32 +++++++++++-------- .../dhfs/objects/transaction/Transaction.java | 2 ++ .../transaction/TransactionFactoryImpl.java | 5 +++ .../dhfs/objects/transaction/TxRecord.java | 7 ++++ .../com/usatiuk/dhfs/objects/ObjectsTest.java | 31 ++++++++++++++++++ 8 files changed, 72 insertions(+), 14 deletions(-) diff --git a/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocDevModeTest.java b/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocDevModeTest.java index 526a143c..9db98d28 100644 --- a/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocDevModeTest.java +++ b/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocDevModeTest.java @@ -3,11 +3,13 @@ package com.usatiuk.objects.alloc.test; import org.jboss.shrinkwrap.api.ShrinkWrap; import org.jboss.shrinkwrap.api.spec.JavaArchive; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.RegisterExtension; import io.quarkus.test.QuarkusDevModeTest; +@Disabled public class ObjectsAllocDevModeTest { // Start hot reload (DevMode) test with your extension loaded diff --git a/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocTest.java b/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocTest.java index 637d504f..3c856f81 100644 --- a/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocTest.java +++ b/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocTest.java @@ -6,9 +6,11 @@ import jakarta.inject.Inject; import org.jboss.shrinkwrap.api.ShrinkWrap; import org.jboss.shrinkwrap.api.spec.JavaArchive; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.RegisterExtension; +@Disabled public class ObjectsAllocTest { // Start unit test with your extension loaded diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java index b986b4e4..cc411cfa 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java @@ -24,6 +24,11 @@ public class CurrentTransaction implements Transaction { return transactionManager.current().getObject(type, key, strategy); } + @Override + public void deleteObject(JObjectKey key) { + transactionManager.current().deleteObject(key); + } + @Override public void putObject(JData obj) { transactionManager.current().putObject(obj); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 674454f5..a296f0f4 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -196,7 +196,8 @@ public class JObjectManager { var toFlush = new LinkedList>(); var toPut = new LinkedList>(); - var toLock = new ArrayList>(); + var toDelete = new LinkedList(); + var toLock = new ArrayList(); var dependencies = new LinkedList>(); Log.trace("Committing transaction " + tx.getId()); @@ -214,12 +215,16 @@ public class JObjectManager { toFlush.add(copy); } case TxRecord.TxObjectRecordOptimistic copy -> { - toLock.add(copy.original()); + toLock.add(copy.original().data().getKey()); toFlush.add(copy); } case TxRecord.TxObjectRecordNew created -> { toPut.add(created); } + case TxRecord.TxObjectRecordDeleted deleted -> { + toLock.add(deleted.key()); + toDelete.add(deleted.key()); + } default -> throw new IllegalStateException("Unexpected value: " + entry); } } @@ -231,7 +236,7 @@ public class JObjectManager { // TODO: Check this } case ReadTrackingObjectSource.TxReadObjectSome(var obj) -> { - toLock.add(obj); + toLock.add(obj.data().getKey()); dependencies.add(obj); } default -> throw new IllegalStateException("Unexpected value: " + entry); @@ -240,28 +245,23 @@ public class JObjectManager { toLock.sort(Comparator.comparingInt(System::identityHashCode)); - for (var record : toLock) { - Log.trace("Locking " + record.toString()); + for (var key : toLock) { + Log.trace("Locking " + key.toString()); - var got = getLocked(record.data().getClass(), record.data().getKey(), true); + var got = getLocked(JData.class, key, true); if (got == null) { - throw new IllegalStateException("Object " + record.data().getKey() + " not found"); + throw new IllegalStateException("Object " + key + " not found"); } toUnlock.add(got.wrapper().lock.writeLock()::unlock); - - if (got.obj() != record.data()) { - throw new IllegalStateException("Object changed during transaction: " + got.obj() + " vs " + record.data()); - } } for (var dep : dependencies) { Log.trace("Checking dependency " + dep.toString()); var current = _objects.get(dep.data().getKey()).get(); - // Checked above - assert current == dep.data(); + if (current == null) continue; // FIXME? Does this matter much for deletion if (current.getVersion() >= tx.getId()) { throw new IllegalStateException("Serialization hazard: " + current.getVersion() + " vs " + tx.getId()); @@ -312,7 +312,11 @@ public class JObjectManager { Log.tracef("Committing transaction %d to storage", tx.getId()); - objectStorage.commitTx(new SimpleTxManifest(written.stream().map(JData::getKey).toList(), Collections.emptyList())); + objectStorage.commitTx(new SimpleTxManifest(written.stream().map(JData::getKey).toList(), toDelete)); + + for (var del : toDelete) { + _objects.remove(del); + } } catch (Throwable t) { Log.error("Error when committing transaction", t); throw t; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java index 6e7f2228..6325ef97 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java @@ -13,6 +13,8 @@ public interface Transaction { void putObject(JData obj); + void deleteObject(JObjectKey key); + default Optional getObject(Class type, JObjectKey key) { return getObject(type, key, LockingStrategy.OPTIMISTIC); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index 3b35db50..14db6281 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -65,6 +65,11 @@ public class TransactionFactoryImpl implements TransactionFactory { } } + @Override + public void deleteObject(JObjectKey key) { + _objects.put(key, new TxRecord.TxObjectRecordDeleted(key)); + } + @Override public void putObject(JData obj) { if (_objects.containsKey(obj.getKey())) { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java index 818ab340..510ff800 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java @@ -31,6 +31,13 @@ public class TxRecord { } } + public record TxObjectRecordDeleted(JObjectKey key) implements TxObjectRecord { + @Override + public JData getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { + return null; + } + } + public record TxObjectRecordCopyLock(TransactionObject original, ChangeTrackingJData copy) implements TxObjectRecordWrite { diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index ccacd341..fc27c382 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -46,6 +46,37 @@ public class ObjectsTest { } } + @Test + void createDeleteObject() { + { + txm.begin(); + var newParent = alloc.create(Parent.class, new JObjectKey("Parent")); + newParent.setLastName("John"); + curTx.putObject(newParent); + txm.commit(); + } + + { + txm.begin(); + var parent = curTx.getObject(Parent.class, new JObjectKey("Parent")).orElse(null); + Assertions.assertEquals("John", parent.getLastName()); + txm.commit(); + } + + { + txm.begin(); + curTx.deleteObject(new JObjectKey("Parent")); + txm.commit(); + } + + { + txm.begin(); + var parent = curTx.getObject(Parent.class, new JObjectKey("Parent")).orElse(null); + Assertions.assertNull(parent); + txm.commit(); + } + } + @Test void createCreateObject() { { From 097929260b1b679dc57fc3f29946701742363f04 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 29 Dec 2024 11:47:21 +0100 Subject: [PATCH 020/105] simplify transaction method names --- .../dhfs/objects/CurrentTransaction.java | 12 +++--- .../dhfs/objects/transaction/Transaction.java | 10 ++--- .../transaction/TransactionFactoryImpl.java | 6 +-- .../com/usatiuk/dhfs/objects/ObjectsTest.java | 40 +++++++++---------- .../files/service/DhfsFileServiceImpl.java | 26 ++++++------ .../jkleppmanntree/JKleppmannTreeManager.java | 14 +++---- 6 files changed, 54 insertions(+), 54 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java index cc411cfa..552b45e8 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java @@ -20,17 +20,17 @@ public class CurrentTransaction implements Transaction { } @Override - public Optional getObject(Class type, JObjectKey key, LockingStrategy strategy) { - return transactionManager.current().getObject(type, key, strategy); + public Optional get(Class type, JObjectKey key, LockingStrategy strategy) { + return transactionManager.current().get(type, key, strategy); } @Override - public void deleteObject(JObjectKey key) { - transactionManager.current().deleteObject(key); + public void delete(JObjectKey key) { + transactionManager.current().delete(key); } @Override - public void putObject(JData obj) { - transactionManager.current().putObject(obj); + public void put(JData obj) { + transactionManager.current().put(obj); } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java index 6325ef97..dc6325ec 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java @@ -9,13 +9,13 @@ import java.util.Optional; public interface Transaction { long getId(); - Optional getObject(Class type, JObjectKey key, LockingStrategy strategy); + Optional get(Class type, JObjectKey key, LockingStrategy strategy); - void putObject(JData obj); + void put(JData obj); - void deleteObject(JObjectKey key); + void delete(JObjectKey key); - default Optional getObject(Class type, JObjectKey key) { - return getObject(type, key, LockingStrategy.OPTIMISTIC); + default Optional get(Class type, JObjectKey key) { + return get(type, key, LockingStrategy.OPTIMISTIC); } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index 14db6281..04eb7438 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -28,7 +28,7 @@ public class TransactionFactoryImpl implements TransactionFactory { } @Override - public Optional getObject(Class type, JObjectKey key, LockingStrategy strategy) { + public Optional get(Class type, JObjectKey key, LockingStrategy strategy) { var got = _objects.get(key); if (got != null) { var compatible = got.getIfStrategyCompatible(key, strategy); @@ -66,12 +66,12 @@ public class TransactionFactoryImpl implements TransactionFactory { } @Override - public void deleteObject(JObjectKey key) { + public void delete(JObjectKey key) { _objects.put(key, new TxRecord.TxObjectRecordDeleted(key)); } @Override - public void putObject(JData obj) { + public void put(JData obj) { if (_objects.containsKey(obj.getKey())) { throw new IllegalArgumentException("Object already exists in transaction"); } diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index fc27c382..d7262c13 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -34,13 +34,13 @@ public class ObjectsTest { txm.begin(); var newParent = alloc.create(Parent.class, new JObjectKey("Parent")); newParent.setLastName("John"); - curTx.putObject(newParent); + curTx.put(newParent); txm.commit(); } { txm.begin(); - var parent = curTx.getObject(Parent.class, new JObjectKey("Parent")).orElse(null); + var parent = curTx.get(Parent.class, new JObjectKey("Parent")).orElse(null); Assertions.assertEquals("John", parent.getLastName()); txm.commit(); } @@ -52,26 +52,26 @@ public class ObjectsTest { txm.begin(); var newParent = alloc.create(Parent.class, new JObjectKey("Parent")); newParent.setLastName("John"); - curTx.putObject(newParent); + curTx.put(newParent); txm.commit(); } { txm.begin(); - var parent = curTx.getObject(Parent.class, new JObjectKey("Parent")).orElse(null); + var parent = curTx.get(Parent.class, new JObjectKey("Parent")).orElse(null); Assertions.assertEquals("John", parent.getLastName()); txm.commit(); } { txm.begin(); - curTx.deleteObject(new JObjectKey("Parent")); + curTx.delete(new JObjectKey("Parent")); txm.commit(); } { txm.begin(); - var parent = curTx.getObject(Parent.class, new JObjectKey("Parent")).orElse(null); + var parent = curTx.get(Parent.class, new JObjectKey("Parent")).orElse(null); Assertions.assertNull(parent); txm.commit(); } @@ -83,17 +83,17 @@ public class ObjectsTest { txm.begin(); var newParent = alloc.create(Parent.class, new JObjectKey("Parent7")); newParent.setLastName("John"); - curTx.putObject(newParent); + curTx.put(newParent); txm.commit(); } Assertions.assertThrows(Exception.class, () -> txm.run(() -> { var newParent = alloc.create(Parent.class, new JObjectKey("Parent7")); newParent.setLastName("John2"); - curTx.putObject(newParent); + curTx.put(newParent); })); { txm.begin(); - var parent = curTx.getObject(Parent.class, new JObjectKey("Parent7")).orElse(null); + var parent = curTx.get(Parent.class, new JObjectKey("Parent7")).orElse(null); Assertions.assertEquals("John", parent.getLastName()); txm.commit(); } @@ -105,13 +105,13 @@ public class ObjectsTest { txm.begin(); var newParent = alloc.create(Parent.class, new JObjectKey("Parent3")); newParent.setLastName("John"); - curTx.putObject(newParent); + curTx.put(newParent); txm.commit(); } { txm.begin(); - var parent = curTx.getObject(Parent.class, new JObjectKey("Parent3"), LockingStrategy.OPTIMISTIC).orElse(null); + var parent = curTx.get(Parent.class, new JObjectKey("Parent3"), LockingStrategy.OPTIMISTIC).orElse(null); Assertions.assertEquals("John", parent.getLastName()); parent.setLastName("John2"); txm.commit(); @@ -119,7 +119,7 @@ public class ObjectsTest { { txm.begin(); - var parent = curTx.getObject(Parent.class, new JObjectKey("Parent3"), LockingStrategy.WRITE).orElse(null); + var parent = curTx.get(Parent.class, new JObjectKey("Parent3"), LockingStrategy.WRITE).orElse(null); Assertions.assertEquals("John2", parent.getLastName()); parent.setLastName("John3"); txm.commit(); @@ -127,7 +127,7 @@ public class ObjectsTest { { txm.begin(); - var parent = curTx.getObject(Parent.class, new JObjectKey("Parent3")).orElse(null); + var parent = curTx.get(Parent.class, new JObjectKey("Parent3")).orElse(null); Assertions.assertEquals("John3", parent.getLastName()); txm.commit(); } @@ -148,7 +148,7 @@ public class ObjectsTest { barrier.await(); var newParent = alloc.create(Parent.class, new JObjectKey("Parent2")); newParent.setLastName("John"); - curTx.putObject(newParent); + curTx.put(newParent); Log.warn("Thread 1 commit"); txm.commit(); thread1Failed.set(false); @@ -164,7 +164,7 @@ public class ObjectsTest { barrier.await(); var newParent = alloc.create(Parent.class, new JObjectKey("Parent2")); newParent.setLastName("John2"); - curTx.putObject(newParent); + curTx.put(newParent); Log.warn("Thread 2 commit"); txm.commit(); thread2Failed.set(false); @@ -177,7 +177,7 @@ public class ObjectsTest { latch.await(); txm.begin(); - var got = curTx.getObject(Parent.class, new JObjectKey("Parent2")).orElse(null); + var got = curTx.get(Parent.class, new JObjectKey("Parent2")).orElse(null); txm.commit(); if (!thread1Failed.get()) { @@ -198,7 +198,7 @@ public class ObjectsTest { txm.begin(); var newParent = alloc.create(Parent.class, new JObjectKey(key)); newParent.setLastName("John3"); - curTx.putObject(newParent); + curTx.put(newParent); txm.commit(); } @@ -213,7 +213,7 @@ public class ObjectsTest { Log.warn("Thread 1"); txm.begin(); barrier.await(); - var parent = curTx.getObject(Parent.class, new JObjectKey(key), strategy).orElse(null); + var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null); parent.setLastName("John"); Log.warn("Thread 1 commit"); txm.commit(); @@ -228,7 +228,7 @@ public class ObjectsTest { Log.warn("Thread 2"); txm.begin(); barrier.await(); - var parent = curTx.getObject(Parent.class, new JObjectKey(key), strategy).orElse(null); + var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null); parent.setLastName("John2"); Log.warn("Thread 2 commit"); txm.commit(); @@ -242,7 +242,7 @@ public class ObjectsTest { latchEnd.await(); txm.begin(); - var got = curTx.getObject(Parent.class, new JObjectKey(key)).orElse(null); + var got = curTx.get(Parent.class, new JObjectKey(key)).orElse(null); txm.commit(); if (!thread1Failed.get()) { diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java index f4c1155b..33ccc5e6 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java @@ -77,7 +77,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { private ChunkData createChunk(ByteString bytes) { var newChunk = objectAllocator.create(ChunkData.class, new JObjectKey(UUID.randomUUID().toString())); newChunk.setData(bytes); - curTx.putObject(newChunk); + curTx.put(newChunk); return newChunk; } @@ -89,21 +89,21 @@ public class DhfsFileServiceImpl implements DhfsFileService { private JKleppmannTreeNode getDirEntry(String name) { var res = getTree().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList()); if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND); - var ret = curTx.getObject(JKleppmannTreeNode.class, res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name))); + var ret = curTx.get(JKleppmannTreeNode.class, res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name))); return ret; } private Optional getDirEntryOpt(String name) { var res = getTree().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList()); if (res == null) return Optional.empty(); - var ret = curTx.getObject(JKleppmannTreeNode.class, res); + var ret = curTx.get(JKleppmannTreeNode.class, res); return ret; } @Override public Optional getattr(JObjectKey uuid) { return jObjectTxManager.executeTx(() -> { - var ref = curTx.getObject(JData.class, uuid).orElse(null); + var ref = curTx.get(JData.class, uuid).orElse(null); if (ref == null) return Optional.empty(); GetattrRes ret; if (ref instanceof File f) { @@ -159,7 +159,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { f.setCtime(f.getMtime()); f.setSymlink(false); f.setChunks(new TreeMap<>()); - curTx.putObject(f); + curTx.put(f); try { getTree().move(parent.getKey(), new JKleppmannTreeNodeMetaFile(fname, f.getKey()), getTree().getNewNodeId()); @@ -231,7 +231,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { @Override public Boolean chmod(JObjectKey uuid, long mode) { return jObjectTxManager.executeTx(() -> { - var dent = curTx.getObject(JData.class, uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND)); + var dent = curTx.get(JData.class, uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND)); if (dent instanceof JKleppmannTreeNode) { return true; @@ -265,7 +265,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { if (offset < 0) throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset)); - var file = curTx.getObject(File.class, fileUuid).orElse(null); + var file = curTx.get(File.class, fileUuid).orElse(null); if (file == null) { Log.error("File not found when trying to read: " + fileUuid); return Optional.empty(); @@ -325,7 +325,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { } private ByteString readChunk(JObjectKey uuid) { - var chunkRead = curTx.getObject(ChunkData.class, uuid).orElse(null); + var chunkRead = curTx.get(ChunkData.class, uuid).orElse(null); if (chunkRead == null) { Log.error("Chunk requested not found: " + uuid); @@ -364,7 +364,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset)); // FIXME: - var file = curTx.getObject(File.class, fileUuid).orElse(null); + var file = curTx.get(File.class, fileUuid).orElse(null); if (file == null) { Log.error("File not found when trying to write: " + fileUuid); return -1L; @@ -515,7 +515,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { if (length < 0) throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length)); - var file = curTx.getObject(File.class, fileUuid).orElse(null); + var file = curTx.get(File.class, fileUuid).orElse(null); if (file == null) { Log.error("File not found when trying to write: " + fileUuid); return false; @@ -602,7 +602,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { @Override public ByteString readlinkBS(JObjectKey uuid) { return jObjectTxManager.executeTx(() -> { - var fileOpt = curTx.getObject(File.class, uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid))); + var fileOpt = curTx.get(File.class, uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid))); return read(uuid, 0, Math.toIntExact(size(uuid))).get(); }); } @@ -635,7 +635,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { @Override public Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs) { return jObjectTxManager.executeTx(() -> { - var file = curTx.getObject(File.class, fileUuid).orElseThrow( + var file = curTx.get(File.class, fileUuid).orElseThrow( () -> new StatusRuntimeException(Status.NOT_FOUND.withDescription( "File not found for setTimes: " + fileUuid)) ); @@ -665,7 +665,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { @Override public Long size(JObjectKey uuid) { return jObjectTxManager.executeTx(() -> { - var read = curTx.getObject(File.class, uuid) + var read = curTx.get(File.class, uuid) .orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND)); return read.getSize(); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java index 93bfce46..cd8b1cbd 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java @@ -35,14 +35,14 @@ public class JKleppmannTreeManager { public JKleppmannTree getTree(JObjectKey name) { return txManager.executeTx(() -> { - var data = curTx.getObject(JKleppmannTreePersistentData.class, name).orElse(null); + var data = curTx.get(JKleppmannTreePersistentData.class, name).orElse(null); if (data == null) { data = objectAllocator.create(JKleppmannTreePersistentData.class, name); data.setClock(new AtomicClock(1L)); data.setQueues(new HashMap<>()); data.setLog(new TreeMap<>()); data.setPeerTimestampLog(new HashMap<>()); - curTx.putObject(data); + curTx.put(data); } return new JKleppmannTree(data); // opObjectRegistry.registerObject(tree); @@ -291,13 +291,13 @@ public class JKleppmannTreeManager { private final PeerLogWrapper _peerLogWrapper = new PeerLogWrapper(); public JKleppmannTreeStorageInterface() { - if (curTx.getObject(JKleppmannTreeNode.class, getRootId()).isEmpty()) { + if (curTx.get(JKleppmannTreeNode.class, getRootId()).isEmpty()) { var rootNode = objectAllocator.create(JKleppmannTreeNode.class, getRootId()); rootNode.setNode(new TreeNode<>(getRootId(), null, new JKleppmannTreeNodeMetaDirectory(""))); - curTx.putObject(rootNode); + curTx.put(rootNode); var trashNode = objectAllocator.create(JKleppmannTreeNode.class, getTrashId()); trashNode.setNode(new TreeNode<>(getTrashId(), null, new JKleppmannTreeNodeMetaDirectory(""))); - curTx.putObject(trashNode); + curTx.put(trashNode); } } @@ -318,7 +318,7 @@ public class JKleppmannTreeManager { @Override public JKleppmannTreeNodeWrapper getById(JObjectKey id) { - var got = curTx.getObject(JKleppmannTreeNode.class, id); + var got = curTx.get(JKleppmannTreeNode.class, id); if (got.isEmpty()) return null; return new JKleppmannTreeNodeWrapper(got.get()); } @@ -327,7 +327,7 @@ public class JKleppmannTreeManager { public JKleppmannTreeNodeWrapper createNewNode(TreeNode node) { var created = objectAllocator.create(JKleppmannTreeNode.class, node.getId()); created.setNode(node); - curTx.putObject(created); + curTx.put(created); return new JKleppmannTreeNodeWrapper(created); } From 62fbaa206a75e4f7e20cf7eb7703c7928fe2eb61 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 29 Dec 2024 19:27:18 +0100 Subject: [PATCH 021/105] basic pre-tx hook --- dhfs-parent/objects/pom.xml | 10 +- .../usatiuk/dhfs/objects/JObjectManager.java | 130 +++++++++++------- .../usatiuk/dhfs/objects/PreCommitTxHook.java | 15 ++ .../dhfs/objects/TransactionManagerImpl.java | 3 +- .../transaction/TransactionFactoryImpl.java | 41 +++++- .../transaction/TransactionPrivate.java | 4 +- .../dhfs/objects/transaction/TxRecord.java | 25 ++-- .../dhfs/objects/PreCommitTxHookTest.java | 116 ++++++++++++++++ 8 files changed, 274 insertions(+), 70 deletions(-) create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PreCommitTxHook.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java diff --git a/dhfs-parent/objects/pom.xml b/dhfs-parent/objects/pom.xml index e20ad13e..4d97539c 100644 --- a/dhfs-parent/objects/pom.xml +++ b/dhfs-parent/objects/pom.xml @@ -80,11 +80,11 @@ objects-common 1.0-SNAPSHOT - - - - - + + io.quarkus + quarkus-junit5-mockito + test + diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index a296f0f4..1b8eab72 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -11,6 +11,7 @@ import com.usatiuk.objects.common.runtime.JData; import com.usatiuk.objects.common.runtime.JObjectKey; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.inject.Instance; import jakarta.inject.Inject; import java.io.Serializable; @@ -36,6 +37,8 @@ public class JObjectManager { ObjectAllocator objectAllocator; @Inject TransactionFactory transactionFactory; + @Inject + Instance preCommitTxHooks; private final DataLocker _storageReadLocker = new DataLocker(); private final ConcurrentHashMap> _objects = new ConcurrentHashMap<>(); @@ -191,77 +194,108 @@ public class JObjectManager { } public void commit(TransactionPrivate tx) { + Log.trace("Committing transaction " + tx.getId()); + // This also holds the weak references var toUnlock = new LinkedList(); var toFlush = new LinkedList>(); var toPut = new LinkedList>(); var toDelete = new LinkedList(); - var toLock = new ArrayList(); var dependencies = new LinkedList>(); - Log.trace("Committing transaction " + tx.getId()); - // For existing objects: // Check that their version is not higher than the version of transaction being committed // TODO: check deletions, inserts try { - for (var entry : tx.writes()) { - Log.trace("Processing write " + entry.toString()); - switch (entry) { - case TxRecord.TxObjectRecordCopyLock copy -> { - toUnlock.add(copy.original().lock().writeLock()::unlock); - toFlush.add(copy); - } - case TxRecord.TxObjectRecordOptimistic copy -> { - toLock.add(copy.original().data().getKey()); - toFlush.add(copy); - } - case TxRecord.TxObjectRecordNew created -> { - toPut.add(created); - } - case TxRecord.TxObjectRecordDeleted deleted -> { - toLock.add(deleted.key()); - toDelete.add(deleted.key()); - } - default -> throw new IllegalStateException("Unexpected value: " + entry); - } - } + Collection> drained; + while (!(drained = tx.drainWrites()).isEmpty()) { + Log.trace("Commit iteration with " + drained.size() + " records"); + var toLock = new ArrayList(); - for (var entry : tx.reads().entrySet()) { - Log.trace("Processing read " + entry.toString()); - switch (entry.getValue()) { - case ReadTrackingObjectSource.TxReadObjectNone none -> { - // TODO: Check this + for (var entry : drained) { + Log.trace("Processing write " + entry.toString()); + switch (entry) { + case TxRecord.TxObjectRecordCopyLock copy -> { + toUnlock.add(copy.original().lock().writeLock()::unlock); + toFlush.add(copy); + } + case TxRecord.TxObjectRecordOptimistic copy -> { + toLock.add(copy.original().data().getKey()); + toFlush.add(copy); + } + case TxRecord.TxObjectRecordNew created -> { + toPut.add(created); + } + case TxRecord.TxObjectRecordDeleted deleted -> { + toLock.add(deleted.getKey()); + toDelete.add(deleted.getKey()); + } + default -> throw new IllegalStateException("Unexpected value: " + entry); } - case ReadTrackingObjectSource.TxReadObjectSome(var obj) -> { - toLock.add(obj.data().getKey()); - dependencies.add(obj); - } - default -> throw new IllegalStateException("Unexpected value: " + entry); - } - } - - toLock.sort(Comparator.comparingInt(System::identityHashCode)); - - for (var key : toLock) { - Log.trace("Locking " + key.toString()); - - var got = getLocked(JData.class, key, true); - - if (got == null) { - throw new IllegalStateException("Object " + key + " not found"); } - toUnlock.add(got.wrapper().lock.writeLock()::unlock); + for (var entry : tx.drainReads().entrySet()) { + Log.trace("Processing read " + entry.toString()); + switch (entry.getValue()) { + case ReadTrackingObjectSource.TxReadObjectNone none -> { + // TODO: Check this + } + case ReadTrackingObjectSource.TxReadObjectSome(var obj) -> { + toLock.add(obj.data().getKey()); + dependencies.add(obj); + } + default -> throw new IllegalStateException("Unexpected value: " + entry); + } + } + + toLock.sort(Comparator.comparingInt(System::identityHashCode)); + + for (var key : toLock) { + Log.trace("Locking " + key.toString()); + + var got = getLocked(JData.class, key, true); + + if (got == null) { + throw new IllegalStateException("Object " + key + " not found"); + } + + toUnlock.add(got.wrapper().lock.writeLock()::unlock); + } + + for (var hook : preCommitTxHooks) { + for (var entry : drained) { + Log.trace("Running pre-commit hook " + hook.getClass() + " for" + entry.toString()); + switch (entry) { + case TxRecord.TxObjectRecordCopyLock copy -> { + hook.onChange(copy.getKey(), copy.original().data(), copy.copy().wrapped()); + } + case TxRecord.TxObjectRecordOptimistic copy -> { + hook.onChange(copy.getKey(), copy.original().data(), copy.copy().wrapped()); + } + case TxRecord.TxObjectRecordNew created -> { + hook.onCreate(created.getKey(), created.created()); + } + case TxRecord.TxObjectRecordDeleted deleted -> { + hook.onDelete(deleted.getKey(), deleted.original().data()); + } + default -> throw new IllegalStateException("Unexpected value: " + entry); + } + } + } } for (var dep : dependencies) { Log.trace("Checking dependency " + dep.toString()); var current = _objects.get(dep.data().getKey()).get(); - if (current == null) continue; // FIXME? Does this matter much for deletion + // Check that the object we have locked is really the one in the map + // Note that current can be null, not only if it doesn't exist, but + // also for example in the case when it was changed and then garbage collected + if (dep.data() != current) { + throw new IllegalStateException("Serialization hazard: " + dep.data() + " vs " + current); + } if (current.getVersion() >= tx.getId()) { throw new IllegalStateException("Serialization hazard: " + current.getVersion() + " vs " + tx.getId()); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PreCommitTxHook.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PreCommitTxHook.java new file mode 100644 index 00000000..0319ceac --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PreCommitTxHook.java @@ -0,0 +1,15 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; + +public interface PreCommitTxHook { + default void onChange(JObjectKey key, JData old, JData cur) { + } + + default void onCreate(JObjectKey key, JData cur) { + } + + default void onDelete(JObjectKey key, JData cur) { + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java index f08159a4..b71228f9 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java @@ -45,7 +45,8 @@ public class TransactionManagerImpl implements TransactionManager { @Override public void rollback() { var tx = _currentTransaction.get(); - for (var o : tx.writes()) { + // Works only before commit was called + for (var o : tx.drainWrites()) { switch (o) { case TxRecord.TxObjectRecordCopyLock r -> r.original().lock().writeLock().unlock(); default -> { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index 04eb7438..ae7b05ee 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -8,7 +8,10 @@ import jakarta.inject.Inject; import lombok.AccessLevel; import lombok.Getter; -import java.util.*; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; @ApplicationScoped public class TransactionFactoryImpl implements TransactionFactory { @@ -20,7 +23,7 @@ public class TransactionFactoryImpl implements TransactionFactory { private final long _id; private final ReadTrackingObjectSource _source; - private final Map> _objects = new HashMap<>(); + private Map> _objects = new HashMap<>(); private TransactionImpl(long id, TransactionObjectSource source) { _id = id; @@ -67,7 +70,31 @@ public class TransactionFactoryImpl implements TransactionFactory { @Override public void delete(JObjectKey key) { - _objects.put(key, new TxRecord.TxObjectRecordDeleted(key)); + // FIXME + var got = _objects.get(key); + if (got != null) { + switch (got) { + case TxRecord.TxObjectRecordNew created -> { + _objects.remove(key); + } + case TxRecord.TxObjectRecordCopyLock copyLockRecord -> { + _objects.put(key, new TxRecord.TxObjectRecordDeleted<>(copyLockRecord.original())); + } + case TxRecord.TxObjectRecordOptimistic optimisticRecord -> { + _objects.put(key, new TxRecord.TxObjectRecordDeleted<>(optimisticRecord.original())); + } + case TxRecord.TxObjectRecordDeleted deletedRecord -> { + return; + } + default -> throw new IllegalStateException("Unexpected value: " + got); + } + } + + var read = _source.get(JData.class, key).orElse(null); + if (read == null) { + return; + } + _objects.put(key, new TxRecord.TxObjectRecordDeleted<>(read)); } @Override @@ -80,12 +107,14 @@ public class TransactionFactoryImpl implements TransactionFactory { } @Override - public Collection> writes() { - return Collections.unmodifiableCollection(_objects.values()); + public Collection> drainWrites() { + var ret = _objects; + _objects = new HashMap<>(); + return ret.values(); } @Override - public Map> reads() { + public Map> drainReads() { return _source.getRead(); } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java index 18a5f488..233d6cd4 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java @@ -7,7 +7,7 @@ import java.util.Map; // The transaction interface actually used by user code to retrieve objects public interface TransactionPrivate extends Transaction { - Collection> writes(); + Collection> drainWrites(); - Map> reads(); + Map> drainReads(); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java index 510ff800..516fbd2b 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java @@ -7,19 +7,18 @@ import com.usatiuk.objects.common.runtime.JObjectKey; public class TxRecord { public interface TxObjectRecord { T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy); - } - public record TxObjectRecordMissing(JObjectKey key) implements TxObjectRecord { - @Override - public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { - return null; - } + JObjectKey getKey(); } public interface TxObjectRecordWrite extends TxObjectRecord { TransactionObject original(); ChangeTrackingJData copy(); + + default JObjectKey getKey() { + return original().data().getKey(); + } } public record TxObjectRecordNew(T created) implements TxObjectRecord { @@ -29,13 +28,23 @@ public class TxRecord { return created; return null; } + + @Override + public JObjectKey getKey() { + return created.getKey(); + } } - public record TxObjectRecordDeleted(JObjectKey key) implements TxObjectRecord { + public record TxObjectRecordDeleted(TransactionObject original) implements TxObjectRecord { @Override - public JData getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { + public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { return null; } + + @Override + public JObjectKey getKey() { + return original.data().getKey(); + } } public record TxObjectRecordCopyLock(TransactionObject original, diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java new file mode 100644 index 00000000..07325b76 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java @@ -0,0 +1,116 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.data.Parent; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import com.usatiuk.objects.alloc.runtime.ObjectAllocator; +import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.mockito.InjectSpy; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; + +@QuarkusTest +public class PreCommitTxHookTest { + @Inject + TransactionManager txm; + + @Inject + Transaction curTx; + + @Inject + ObjectAllocator alloc; + + @ApplicationScoped + public static class DummyPreCommitTxHook implements PreCommitTxHook { + } + + @InjectSpy + private DummyPreCommitTxHook spyHook; + + @Test + void createObject() { + { + txm.begin(); + var newParent = alloc.create(Parent.class, new JObjectKey("ParentCreate")); + newParent.setLastName("John"); + curTx.put(newParent); + txm.commit(); + } + + { + txm.begin(); + var parent = curTx.get(Parent.class, new JObjectKey("Parent")).orElse(null); + Assertions.assertEquals("John", parent.getLastName()); + txm.commit(); + } + + ArgumentCaptor dataCaptor = ArgumentCaptor.forClass(JData.class); + ArgumentCaptor keyCaptor = ArgumentCaptor.forClass(JObjectKey.class); + Mockito.verify(spyHook, Mockito.times(1)).onCreate(keyCaptor.capture(), dataCaptor.capture()); + Assertions.assertEquals("John", ((Parent) dataCaptor.getValue()).getLastName()); + Assertions.assertEquals(new JObjectKey("ParentCreate"), keyCaptor.getValue()); + } + + @Test + void deleteObject() { + { + txm.begin(); + var newParent = alloc.create(Parent.class, new JObjectKey("ParentDel")); + newParent.setLastName("John"); + curTx.put(newParent); + txm.commit(); + } + + { + txm.begin(); + var parent = curTx.get(Parent.class, new JObjectKey("ParentDel")).orElse(null); + Assertions.assertEquals("John", parent.getLastName()); + txm.commit(); + } + + { + txm.begin(); + curTx.delete(new JObjectKey("ParentDel")); + txm.commit(); + } + + ArgumentCaptor dataCaptor = ArgumentCaptor.forClass(JData.class); + ArgumentCaptor keyCaptor = ArgumentCaptor.forClass(JObjectKey.class); + Mockito.verify(spyHook, Mockito.times(1)).onDelete(keyCaptor.capture(), dataCaptor.capture()); + Assertions.assertEquals("John", ((Parent) dataCaptor.getValue()).getLastName()); + Assertions.assertEquals(new JObjectKey("ParentDel"), keyCaptor.getValue()); + } + + @Test + void editObject() { + { + txm.begin(); + var newParent = alloc.create(Parent.class, new JObjectKey("ParentEdit")); + newParent.setLastName("John"); + curTx.put(newParent); + txm.commit(); + } + + { + txm.begin(); + var parent = curTx.get(Parent.class, new JObjectKey("ParentEdit")).orElse(null); + Assertions.assertEquals("John", parent.getLastName()); + parent.setLastName("John changed"); + txm.commit(); + } + + ArgumentCaptor dataCaptorOld = ArgumentCaptor.forClass(JData.class); + ArgumentCaptor dataCaptorNew = ArgumentCaptor.forClass(JData.class); + ArgumentCaptor keyCaptor = ArgumentCaptor.forClass(JObjectKey.class); + Mockito.verify(spyHook, Mockito.times(1)).onChange(keyCaptor.capture(), dataCaptorOld.capture(), dataCaptorNew.capture()); + Assertions.assertEquals("John", ((Parent) dataCaptorOld.getValue()).getLastName()); + Assertions.assertEquals("John changed", ((Parent) dataCaptorNew.getValue()).getLastName()); + Assertions.assertEquals(new JObjectKey("ParentEdit"), keyCaptor.getValue()); + } + +} From 5d159ffde1db36befd04d5349791aecd5f9c6904 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 29 Dec 2024 21:27:08 +0100 Subject: [PATCH 022/105] possibly working refcounting --- .../usatiuk/dhfs/objects/JObjectManager.java | 4 +- .../dhfs/objects/TransactionManagerImpl.java | 2 +- .../transaction/TransactionFactoryImpl.java | 16 +++-- .../transaction/TransactionPrivate.java | 4 +- .../usatiuk/dhfs/files/objects/ChunkData.java | 3 +- .../com/usatiuk/dhfs/files/objects/File.java | 6 ++ .../usatiuk/dhfs/files/objects/FsNode.java | 3 +- .../files/service/DhfsFileServiceImpl.java | 3 + .../usatiuk/dhfs/objects/JDataRefcounted.java | 17 +++++ .../dhfs/objects/RefcounterTxHook.java | 63 +++++++++++++++++++ .../jkleppmanntree/JKleppmannTreeManager.java | 7 ++- .../structs/JKleppmannTreeNode.java | 17 ++++- .../structs/JKleppmannTreePersistentData.java | 13 ++-- 13 files changed, 137 insertions(+), 21 deletions(-) create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 1b8eab72..e846596d 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -210,7 +210,7 @@ public class JObjectManager { try { Collection> drained; - while (!(drained = tx.drainWrites()).isEmpty()) { + while (!(drained = tx.drainNewWrites()).isEmpty()) { Log.trace("Commit iteration with " + drained.size() + " records"); var toLock = new ArrayList(); @@ -236,7 +236,7 @@ public class JObjectManager { } } - for (var entry : tx.drainReads().entrySet()) { + for (var entry : tx.reads().entrySet()) { Log.trace("Processing read " + entry.toString()); switch (entry.getValue()) { case ReadTrackingObjectSource.TxReadObjectNone none -> { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java index b71228f9..9e8d9433 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java @@ -46,7 +46,7 @@ public class TransactionManagerImpl implements TransactionManager { public void rollback() { var tx = _currentTransaction.get(); // Works only before commit was called - for (var o : tx.drainWrites()) { + for (var o : tx.drainNewWrites()) { switch (o) { case TxRecord.TxObjectRecordCopyLock r -> r.original().lock().writeLock().unlock(); default -> { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index ae7b05ee..9368ca0f 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -24,6 +24,7 @@ public class TransactionFactoryImpl implements TransactionFactory { private final ReadTrackingObjectSource _source; private Map> _objects = new HashMap<>(); + private Map> _newObjects = new HashMap<>(); private TransactionImpl(long id, TransactionObjectSource source) { _id = id; @@ -52,6 +53,7 @@ public class TransactionFactoryImpl implements TransactionFactory { } var copy = objectAllocator.copy(read.data()); _objects.put(key, new TxRecord.TxObjectRecordOptimistic<>(read, copy)); + _newObjects.put(key, new TxRecord.TxObjectRecordOptimistic<>(read, copy)); return Optional.of(copy.wrapped()); } case WRITE: { @@ -61,6 +63,7 @@ public class TransactionFactoryImpl implements TransactionFactory { } var copy = objectAllocator.copy(locked.data()); _objects.put(key, new TxRecord.TxObjectRecordCopyLock<>(locked, copy)); + _newObjects.put(key, new TxRecord.TxObjectRecordCopyLock<>(locked, copy)); return Optional.of(copy.wrapped()); } default: @@ -76,12 +79,15 @@ public class TransactionFactoryImpl implements TransactionFactory { switch (got) { case TxRecord.TxObjectRecordNew created -> { _objects.remove(key); + _newObjects.remove(key); } case TxRecord.TxObjectRecordCopyLock copyLockRecord -> { _objects.put(key, new TxRecord.TxObjectRecordDeleted<>(copyLockRecord.original())); + _newObjects.put(key, new TxRecord.TxObjectRecordDeleted<>(copyLockRecord.original())); } case TxRecord.TxObjectRecordOptimistic optimisticRecord -> { _objects.put(key, new TxRecord.TxObjectRecordDeleted<>(optimisticRecord.original())); + _newObjects.put(key, new TxRecord.TxObjectRecordDeleted<>(optimisticRecord.original())); } case TxRecord.TxObjectRecordDeleted deletedRecord -> { return; @@ -95,6 +101,7 @@ public class TransactionFactoryImpl implements TransactionFactory { return; } _objects.put(key, new TxRecord.TxObjectRecordDeleted<>(read)); + _newObjects.put(key, new TxRecord.TxObjectRecordDeleted<>(read)); } @Override @@ -104,17 +111,18 @@ public class TransactionFactoryImpl implements TransactionFactory { } _objects.put(obj.getKey(), new TxRecord.TxObjectRecordNew<>(obj)); + _newObjects.put(obj.getKey(), new TxRecord.TxObjectRecordNew<>(obj)); } @Override - public Collection> drainWrites() { - var ret = _objects; - _objects = new HashMap<>(); + public Collection> drainNewWrites() { + var ret = _newObjects; + _newObjects = new HashMap<>(); return ret.values(); } @Override - public Map> drainReads() { + public Map> reads() { return _source.getRead(); } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java index 233d6cd4..25f39804 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java @@ -7,7 +7,7 @@ import java.util.Map; // The transaction interface actually used by user code to retrieve objects public interface TransactionPrivate extends Transaction { - Collection> drainWrites(); + Collection> drainNewWrites(); - Map> drainReads(); + Map> reads(); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java index cd0b73b2..407e73da 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java @@ -1,11 +1,12 @@ package com.usatiuk.dhfs.files.objects; import com.google.protobuf.ByteString; +import com.usatiuk.dhfs.objects.JDataRefcounted; import com.usatiuk.objects.common.runtime.JData; import java.io.Serializable; -public interface ChunkData extends JData, Serializable { +public interface ChunkData extends JDataRefcounted, Serializable { ByteString getData(); void setData(ByteString data); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java index ed5cd96c..2b8e2054 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java @@ -2,6 +2,7 @@ package com.usatiuk.dhfs.files.objects; import com.usatiuk.objects.common.runtime.JObjectKey; +import java.util.Collection; import java.util.NavigableMap; public interface File extends FsNode { @@ -16,4 +17,9 @@ public interface File extends FsNode { long getSize(); void setSize(long size); + + @Override + default Collection collectRefsTo() { + return getChunks().values().stream().toList(); + } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java index 227c0775..7c09f7dc 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java @@ -1,10 +1,11 @@ package com.usatiuk.dhfs.files.objects; +import com.usatiuk.dhfs.objects.JDataRefcounted; import com.usatiuk.objects.common.runtime.JData; import java.io.Serializable; -public interface FsNode extends JData, Serializable { +public interface FsNode extends JDataRefcounted, Serializable { long getMode(); void setMode(long mode); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java index 33ccc5e6..67b51b87 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java @@ -77,6 +77,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { private ChunkData createChunk(ByteString bytes) { var newChunk = objectAllocator.create(ChunkData.class, new JObjectKey(UUID.randomUUID().toString())); newChunk.setData(bytes); + newChunk.setRefsFrom(List.of()); curTx.put(newChunk); return newChunk; } @@ -159,6 +160,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { f.setCtime(f.getMtime()); f.setSymlink(false); f.setChunks(new TreeMap<>()); + f.setRefsFrom(List.of()); curTx.put(f); try { @@ -622,6 +624,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { File f = objectAllocator.create(File.class, new JObjectKey(fuuid.toString())); f.setSymlink(true); + f.setRefsFrom(List.of()); ChunkData newChunkData = createChunk(UnsafeByteOperations.unsafeWrap(oldpath.getBytes(StandardCharsets.UTF_8))); f.getChunks().put(0L, newChunkData.getKey()); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java new file mode 100644 index 00000000..16f00b15 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java @@ -0,0 +1,17 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; + +import java.util.Collection; +import java.util.List; + +public interface JDataRefcounted extends JData { + Collection getRefsFrom(); + + void setRefsFrom(Collection refs); + + default Collection collectRefsTo() { + return List.of(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java new file mode 100644 index 00000000..ab33a807 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java @@ -0,0 +1,63 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.transaction.Transaction; +import com.usatiuk.objects.alloc.runtime.ObjectAllocator; +import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.apache.commons.collections4.CollectionUtils; + +import java.util.Set; + +@ApplicationScoped +public class RefcounterTxHook implements PreCommitTxHook { + @Inject + Transaction curTx; + + @Inject + ObjectAllocator alloc; + + @Override + public void onChange(JObjectKey key, JData old, JData cur) { + if (!(cur instanceof JDataRefcounted refCur)) { + return; + } + var refOld = (JDataRefcounted) old; + + for (var newRef : CollectionUtils.subtract(refCur.collectRefsTo(), refOld.collectRefsTo())) { + var referenced = curTx.get(JDataRefcounted.class, newRef).orElse(null); + referenced.setRefsFrom(CollectionUtils.union(referenced.getRefsFrom(), Set.of(key))); + } + + for (var removedRef : CollectionUtils.subtract(refOld.collectRefsTo(), refCur.collectRefsTo())) { + var referenced = curTx.get(JDataRefcounted.class, removedRef).orElse(null); + referenced.setRefsFrom(CollectionUtils.subtract(referenced.getRefsFrom(), Set.of(key))); + } + } + + @Override + public void onCreate(JObjectKey key, JData cur) { + if (!(cur instanceof JDataRefcounted refCur)) { + return; + } + + for (var newRef : refCur.collectRefsTo()) { + var referenced = curTx.get(JDataRefcounted.class, newRef).orElse(null); + referenced.setRefsFrom(CollectionUtils.union(referenced.getRefsFrom(), Set.of(key))); + } + } + + @Override + public void onDelete(JObjectKey key, JData cur) { + if (!(cur instanceof JDataRefcounted refCur)) { + return; + } + + + for (var removedRef : refCur.collectRefsTo()) { + var referenced = curTx.get(JDataRefcounted.class, removedRef).orElse(null); + referenced.setRefsFrom(CollectionUtils.subtract(referenced.getRefsFrom(), Set.of(key))); + } + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java index cd8b1cbd..57c91913 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java @@ -294,8 +294,10 @@ public class JKleppmannTreeManager { if (curTx.get(JKleppmannTreeNode.class, getRootId()).isEmpty()) { var rootNode = objectAllocator.create(JKleppmannTreeNode.class, getRootId()); rootNode.setNode(new TreeNode<>(getRootId(), null, new JKleppmannTreeNodeMetaDirectory(""))); + rootNode.setRefsFrom(List.of()); curTx.put(rootNode); var trashNode = objectAllocator.create(JKleppmannTreeNode.class, getTrashId()); + trashNode.setRefsFrom(List.of()); trashNode.setNode(new TreeNode<>(getTrashId(), null, new JKleppmannTreeNodeMetaDirectory(""))); curTx.put(trashNode); } @@ -303,12 +305,12 @@ public class JKleppmannTreeManager { @Override public JObjectKey getRootId() { - return new JObjectKey(_treeName + "_jt_root"); + return new JObjectKey(_treeName.name() + "_jt_root"); } @Override public JObjectKey getTrashId() { - return new JObjectKey(_treeName + "_jt_trash"); + return new JObjectKey(_treeName.name() + "_jt_trash"); } @Override @@ -327,6 +329,7 @@ public class JKleppmannTreeManager { public JKleppmannTreeNodeWrapper createNewNode(TreeNode node) { var created = objectAllocator.create(JKleppmannTreeNode.class, node.getId()); created.setNode(node); + created.setRefsFrom(List.of()); curTx.put(created); return new JKleppmannTreeNodeWrapper(created); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java index 1f8d365a..eb8851bc 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java @@ -1,15 +1,28 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; +import com.usatiuk.dhfs.objects.JDataRefcounted; import com.usatiuk.kleppmanntree.TreeNode; -import com.usatiuk.objects.common.runtime.JData; import com.usatiuk.objects.common.runtime.JObjectKey; import java.io.Serializable; +import java.util.Collection; import java.util.UUID; +import java.util.stream.Stream; // FIXME: Ideally this is two classes? -public interface JKleppmannTreeNode extends JData, Serializable { +public interface JKleppmannTreeNode extends JDataRefcounted, Serializable { TreeNode getNode(); void setNode(TreeNode node); + + @Override + default Collection collectRefsTo() { + return Stream.concat(getNode().getChildren().values().stream(), + switch (getNode().getMeta()) { + case JKleppmannTreeNodeMetaDirectory dir -> Stream.of(); + case JKleppmannTreeNodeMetaFile file -> Stream.of(file.getFileIno()); + default -> throw new IllegalStateException("Unexpected value: " + getNode().getMeta()); + } + ).toList(); + } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java index f4bc34ce..84ca109d 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java @@ -1,18 +1,15 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; +import com.usatiuk.dhfs.objects.JDataRefcounted; import com.usatiuk.kleppmanntree.AtomicClock; import com.usatiuk.kleppmanntree.CombinedTimestamp; import com.usatiuk.kleppmanntree.LogRecord; import com.usatiuk.kleppmanntree.OpMove; -import com.usatiuk.objects.common.runtime.JData; import com.usatiuk.objects.common.runtime.JObjectKey; -import java.util.Collection; -import java.util.HashMap; -import java.util.TreeMap; -import java.util.UUID; +import java.util.*; -public interface JKleppmannTreePersistentData extends JData { +public interface JKleppmannTreePersistentData extends JDataRefcounted { AtomicClock getClock(); void setClock(AtomicClock clock); @@ -50,4 +47,8 @@ public interface JKleppmannTreePersistentData extends JData { } } + @Override + default Collection collectRefsTo() { + return List.of(new JObjectKey(getKey().name() + "_jt_trash"), new JObjectKey(getKey().name() + "_jt_root")); + } } From dc19e1862d89c720e853e484b039dd8bcde64b93 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 29 Dec 2024 23:13:04 +0100 Subject: [PATCH 023/105] possibly working gc that breaks everything now --- .../usatiuk/kleppmanntree/KleppmannTree.java | 2 +- .../usatiuk/dhfs/objects/JObjectManager.java | 14 +++-- .../usatiuk/dhfs/objects/PreCommitTxHook.java | 4 ++ .../transaction/TransactionFactoryImpl.java | 11 ++-- .../dhfs/objects/transaction/TxRecord.java | 11 +++- .../usatiuk/dhfs/objects/DeleterTxHook.java | 62 +++++++++++++++++++ .../usatiuk/dhfs/objects/JDataRefcounted.java | 4 ++ .../dhfs/objects/RefcounterTxHook.java | 5 ++ .../jkleppmanntree/JKleppmannTreeManager.java | 5 +- 9 files changed, 104 insertions(+), 14 deletions(-) create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java index 42fb5de1..eef681ab 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java @@ -375,7 +375,7 @@ public class KleppmannTree, PeerIdT ex newParentNode.getNode().getChildren().put(effect.newMeta().getName(), effect.childId()); if (effect.newParentId().equals(_storage.getTrashId()) && - !Objects.equals(effect.newMeta().getName(), effect.childId())) + !Objects.equals(effect.newMeta().getName(), effect.childId().toString())) throw new IllegalArgumentException("Move to trash should have id of node as name"); node.getNode().setParent(effect.newParentId()); node.getNode().setMeta(effect.newMeta()); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index e846596d..944c99db 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -37,8 +37,12 @@ public class JObjectManager { ObjectAllocator objectAllocator; @Inject TransactionFactory transactionFactory; - @Inject - Instance preCommitTxHooks; + + private final List _preCommitTxHooks; + + JObjectManager(Instance preCommitTxHooks) { + _preCommitTxHooks = preCommitTxHooks.stream().sorted(Comparator.comparingInt(PreCommitTxHook::getPriority)).toList(); + } private final DataLocker _storageReadLocker = new DataLocker(); private final ConcurrentHashMap> _objects = new ConcurrentHashMap<>(); @@ -228,7 +232,7 @@ public class JObjectManager { case TxRecord.TxObjectRecordNew created -> { toPut.add(created); } - case TxRecord.TxObjectRecordDeleted deleted -> { + case TxRecord.TxObjectRecordDeleted deleted -> { toLock.add(deleted.getKey()); toDelete.add(deleted.getKey()); } @@ -264,7 +268,7 @@ public class JObjectManager { toUnlock.add(got.wrapper().lock.writeLock()::unlock); } - for (var hook : preCommitTxHooks) { + for (var hook : _preCommitTxHooks) { for (var entry : drained) { Log.trace("Running pre-commit hook " + hook.getClass() + " for" + entry.toString()); switch (entry) { @@ -278,7 +282,7 @@ public class JObjectManager { hook.onCreate(created.getKey(), created.created()); } case TxRecord.TxObjectRecordDeleted deleted -> { - hook.onDelete(deleted.getKey(), deleted.original().data()); + hook.onDelete(deleted.getKey(), deleted.current()); } default -> throw new IllegalStateException("Unexpected value: " + entry); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PreCommitTxHook.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PreCommitTxHook.java index 0319ceac..afc190dc 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PreCommitTxHook.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PreCommitTxHook.java @@ -12,4 +12,8 @@ public interface PreCommitTxHook { default void onDelete(JObjectKey key, JData cur) { } + + default int getPriority() { + return 0; + } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index 9368ca0f..43a4bbbb 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -82,25 +82,26 @@ public class TransactionFactoryImpl implements TransactionFactory { _newObjects.remove(key); } case TxRecord.TxObjectRecordCopyLock copyLockRecord -> { - _objects.put(key, new TxRecord.TxObjectRecordDeleted<>(copyLockRecord.original())); - _newObjects.put(key, new TxRecord.TxObjectRecordDeleted<>(copyLockRecord.original())); + _objects.put(key, new TxRecord.TxObjectRecordDeleted<>(copyLockRecord)); + _newObjects.put(key, new TxRecord.TxObjectRecordDeleted<>(copyLockRecord)); } case TxRecord.TxObjectRecordOptimistic optimisticRecord -> { - _objects.put(key, new TxRecord.TxObjectRecordDeleted<>(optimisticRecord.original())); - _newObjects.put(key, new TxRecord.TxObjectRecordDeleted<>(optimisticRecord.original())); + _objects.put(key, new TxRecord.TxObjectRecordDeleted<>(optimisticRecord)); + _newObjects.put(key, new TxRecord.TxObjectRecordDeleted<>(optimisticRecord)); } case TxRecord.TxObjectRecordDeleted deletedRecord -> { return; } default -> throw new IllegalStateException("Unexpected value: " + got); } + return; } var read = _source.get(JData.class, key).orElse(null); if (read == null) { return; } - _objects.put(key, new TxRecord.TxObjectRecordDeleted<>(read)); + _objects.put(key, new TxRecord.TxObjectRecordDeleted<>(read)); // FIXME: _newObjects.put(key, new TxRecord.TxObjectRecordDeleted<>(read)); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java index 516fbd2b..7104706b 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java @@ -35,7 +35,8 @@ public class TxRecord { } } - public record TxObjectRecordDeleted(TransactionObject original) implements TxObjectRecord { + public record TxObjectRecordDeleted(TransactionObject original, + T current) implements TxObjectRecord { @Override public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { return null; @@ -45,6 +46,14 @@ public class TxRecord { public JObjectKey getKey() { return original.data().getKey(); } + + public TxObjectRecordDeleted(TxObjectRecordWrite original) { + this(original.original(), original.copy().wrapped()); + } + + public TxObjectRecordDeleted(TransactionObject original) { + this(original, original.data()); + } } public record TxObjectRecordCopyLock(TransactionObject original, diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java new file mode 100644 index 00000000..c1ad75d3 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java @@ -0,0 +1,62 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.transaction.Transaction; +import com.usatiuk.objects.alloc.runtime.ObjectAllocator; +import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +@ApplicationScoped +public class DeleterTxHook implements PreCommitTxHook { + @Inject + Transaction curTx; + + @Inject + ObjectAllocator alloc; + + private boolean canDelete(JDataRefcounted data) { + return !data.getFrozen() && data.getRefsFrom().isEmpty(); + } + + @Override + public void onChange(JObjectKey key, JData old, JData cur) { + if (!(cur instanceof JDataRefcounted refCur)) { + return; + } + + if (canDelete(refCur)) { + Log.trace("Deleting object on change: " + key); + curTx.delete(key); + } + } + + @Override + public void onCreate(JObjectKey key, JData cur) { + if (!(cur instanceof JDataRefcounted refCur)) { + return; + } + + if (canDelete(refCur)) { + Log.warn("Deleting object on creation: " + key); + curTx.delete(key); + } + } + + @Override + public void onDelete(JObjectKey key, JData cur) { + if (!(cur instanceof JDataRefcounted refCur)) { + return; + } + + if (!canDelete(refCur)) { + throw new IllegalStateException("Deleting object with refs: " + key); + } + } + + @Override + public int getPriority() { + return 200; + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java index 16f00b15..848f59d2 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java @@ -11,6 +11,10 @@ public interface JDataRefcounted extends JData { void setRefsFrom(Collection refs); + boolean getFrozen(); + + void setFrozen(boolean frozen); + default Collection collectRefsTo() { return List.of(); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java index ab33a807..47c3de43 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java @@ -60,4 +60,9 @@ public class RefcounterTxHook implements PreCommitTxHook { referenced.setRefsFrom(CollectionUtils.subtract(referenced.getRefsFrom(), Set.of(key))); } } + + @Override + public int getPriority() { + return 100; + } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java index 57c91913..d443ab3e 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java @@ -42,6 +42,7 @@ public class JKleppmannTreeManager { data.setQueues(new HashMap<>()); data.setLog(new TreeMap<>()); data.setPeerTimestampLog(new HashMap<>()); + data.setFrozen(true); curTx.put(data); } return new JKleppmannTree(data); @@ -81,8 +82,8 @@ public class JKleppmannTreeManager { _tree.move(newParent, newMeta, node); } - public void trash(JKleppmannTreeNodeMeta newMeta, JObjectKey node) { - _tree.move(_storageInterface.getTrashId(), newMeta.withName(node.name()), node); + public void trash(JKleppmannTreeNodeMeta newMeta, JObjectKey nodeKey) { + _tree.move(_storageInterface.getTrashId(), newMeta.withName(nodeKey.toString()), nodeKey); } // @Override From a0cad2a5f6e54aecd595508627e0a27666d6a1cb Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Tue, 31 Dec 2024 16:10:46 +0100 Subject: [PATCH 024/105] much simplified transactions with immutable objects --- .../usatiuk/dhfs/objects/JObjectManager.java | 323 ++++++++---------- .../dhfs/objects/TransactionManagerImpl.java | 21 +- .../transaction/ReadTrackingObjectSource.java | 61 +--- .../transaction/TransactionFactoryImpl.java | 94 ++--- .../transaction/TransactionObject.java | 6 +- .../transaction/TransactionObjectSource.java | 6 +- .../transaction/TransactionPrivate.java | 2 +- .../dhfs/objects/transaction/TxRecord.java | 72 +--- .../com/usatiuk/dhfs/objects/ObjectsTest.java | 8 +- .../dhfs/objects/PreCommitTxHookTest.java | 37 +- .../dhfs/utils/AutoCloseableNoThrow.java | 6 + .../com/usatiuk/dhfs/utils/DataLocker.java | 25 +- 12 files changed, 266 insertions(+), 395 deletions(-) create mode 100644 dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/AutoCloseableNoThrow.java diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 944c99db..28e336f9 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -1,11 +1,10 @@ package com.usatiuk.dhfs.objects; -import com.google.common.collect.Streams; import com.usatiuk.dhfs.objects.persistence.ObjectPersistentStore; import com.usatiuk.dhfs.objects.persistence.TxManifest; import com.usatiuk.dhfs.objects.transaction.*; +import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; import com.usatiuk.dhfs.utils.DataLocker; -import com.usatiuk.dhfs.utils.VoidFn; import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import com.usatiuk.objects.common.runtime.JData; import com.usatiuk.objects.common.runtime.JObjectKey; @@ -20,8 +19,8 @@ import java.lang.ref.WeakReference; import java.util.*; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Consumer; +import java.util.function.Function; // Manages all access to com.usatiuk.objects.common.runtime.JData objects. // In particular, it serves as a source of truth for what is committed to the backing storage. @@ -44,15 +43,13 @@ public class JObjectManager { _preCommitTxHooks = preCommitTxHooks.stream().sorted(Comparator.comparingInt(PreCommitTxHook::getPriority)).toList(); } - private final DataLocker _storageReadLocker = new DataLocker(); + private final DataLocker _objLocker = new DataLocker(); private final ConcurrentHashMap> _objects = new ConcurrentHashMap<>(); private final AtomicLong _txCounter = new AtomicLong(); private class JDataWrapper extends WeakReference { private static final Cleaner CLEANER = Cleaner.create(); - final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); - public JDataWrapper(T referent) { super(referent); var key = referent.getKey(); @@ -65,15 +62,11 @@ public class JObjectManager { public String toString() { return "JDataWrapper{" + "ref=" + get() + - ", lock=" + lock + '}'; } } - private record WrapperRet(T obj, JDataWrapper wrapper) { - } - - private WrapperRet get(Class type, JObjectKey key) { + private T get(Class type, JObjectKey key) { while (true) { { var got = _objects.get(key); @@ -81,7 +74,7 @@ public class JObjectManager { if (got != null) { var ref = got.get(); if (type.isInstance(ref)) { - return new WrapperRet<>((T) ref, (JDataWrapper) got); + return type.cast(ref); } else if (ref == null) { _objects.remove(key, got); } else { @@ -91,59 +84,48 @@ public class JObjectManager { } //noinspection unused - try (var readLock = _storageReadLocker.lock(key)) { - var read = objectStorage.readObject(key).orElse(null); + try (var readLock = _objLocker.lock(key)) { + if (_objects.containsKey(key)) continue; + + var read = objectStorage.readObject(key) + .map(objectSerializer::deserialize) + .orElse(null); + if (read == null) return null; - var got = objectSerializer.deserialize(read); - - if (type.isInstance(got)) { - var wrapper = new JDataWrapper<>((T) got); - var old = _objects.putIfAbsent(key, wrapper); - if (old != null) continue; - return new WrapperRet<>((T) got, wrapper); - } else if (got == null) { - return null; + if (type.isInstance(read)) { + var wrapper = new JDataWrapper<>(type.cast(read)); + var old = _objects.put(key, wrapper); + assert old == null; + return type.cast(read); } else { - throw new IllegalArgumentException("Object type mismatch: " + got.getClass() + " vs " + type); + throw new IllegalArgumentException("Object type mismatch: " + read.getClass() + " vs " + type); } } } } - - private WrapperRet getLocked(Class type, JObjectKey key, boolean write) { - var read = get(type, key); - if (read == null) return null; - var lock = write ? read.wrapper().lock.writeLock() : read.wrapper().lock.readLock(); - lock.lock(); - while (true) { - try { - var readAgain = get(type, key); - if (readAgain == null) { - lock.unlock(); - return null; - } - if (!Objects.equals(read, readAgain)) { - lock.unlock(); - read = readAgain; - lock = write ? read.wrapper().lock.writeLock() : read.wrapper().lock.readLock(); - lock.lock(); - continue; - } - return read; - } catch (Throwable e) { - lock.unlock(); - throw e; - } - } - } - - private record TransactionObjectImpl - (T data, ReadWriteLock lock) + private record TransactionObjectNoLock + (Optional data) implements TransactionObject { } + private record TransactionObjectLocked + (Optional data, AutoCloseableNoThrow lock) + implements TransactionObject { + } + + private TransactionObjectNoLock getObj(Class type, JObjectKey key) { + var got = get(type, key); + return new TransactionObjectNoLock<>(Optional.ofNullable(got)); + } + + private TransactionObjectLocked getObjLock(Class type, JObjectKey key) { + var lock = _objLocker.lock(key); + var got = get(type, key); + return new TransactionObjectLocked<>(Optional.ofNullable(got), lock); + } + private class TransactionObjectSourceImpl implements TransactionObjectSource { private final long _txId; @@ -152,21 +134,26 @@ public class JObjectManager { } @Override - public Optional> get(Class type, JObjectKey key) { - var got = JObjectManager.this.get(type, key); - if (got == null) return Optional.empty(); - return Optional.of(new TransactionObjectImpl<>(got.obj(), got.wrapper().lock)); + public TransactionObject get(Class type, JObjectKey key) { + return getObj(type, key); +// return getObj(type, key).map(got -> { +// if (got.data().getVersion() > _txId) { +// throw new IllegalStateException("Serialization race for " + key + ": " + got.data().getVersion() + " vs " + _txId); +// } +// return got; +// }); } @Override - public Optional> getWriteLocked(Class type, JObjectKey key) { - var got = JObjectManager.this.getLocked(type, key, true); - if (got == null) return Optional.empty(); - if (got.obj.getVersion() >= _txId) { - got.wrapper().lock.writeLock().unlock(); - throw new IllegalStateException("Serialization race"); - } - return Optional.of(new TransactionObjectImpl<>(got.obj(), got.wrapper().lock)); + public TransactionObject getWriteLocked(Class type, JObjectKey key) { + return getObjLock(type, key); +// return getObjLock(type, key).map(got -> { +// if (got.data().getVersion() > _txId) { +// got.lock.close(); +// throw new IllegalStateException("Serialization race for " + key + ": " + got.data().getVersion() + " vs " + _txId); +// } +// return got; +// }); } } @@ -200,168 +187,130 @@ public class JObjectManager { public void commit(TransactionPrivate tx) { Log.trace("Committing transaction " + tx.getId()); - // This also holds the weak references - var toUnlock = new LinkedList(); + var current = new LinkedHashMap>(); + var dependenciesLocked = new LinkedHashMap>(); + var toUnlock = new ArrayList(); - var toFlush = new LinkedList>(); - var toPut = new LinkedList>(); - var toDelete = new LinkedList(); - var dependencies = new LinkedList>(); + Consumer addDependency = + key -> { + dependenciesLocked.computeIfAbsent(key, k -> { + Log.trace("Adding dependency " + k.toString()); + var got = getObjLock(JData.class, k); + toUnlock.add(got.lock); + return got; + }); + }; + + Function getCurrent = + key -> switch (current.get(key)) { + case TxRecord.TxObjectRecordWrite write -> write.data(); + case TxRecord.TxObjectRecordDeleted deleted -> null; + case null -> { + var dep = dependenciesLocked.get(key); + if (dep == null) { + throw new IllegalStateException("No dependency for " + key); + } + yield dep.data.orElse(null); + } + default -> { + throw new IllegalStateException("Unexpected value: " + current.get(key)); + } + }; // For existing objects: // Check that their version is not higher than the version of transaction being committed // TODO: check deletions, inserts - try { Collection> drained; while (!(drained = tx.drainNewWrites()).isEmpty()) { - Log.trace("Commit iteration with " + drained.size() + " records"); var toLock = new ArrayList(); - for (var entry : drained) { - Log.trace("Processing write " + entry.toString()); - switch (entry) { - case TxRecord.TxObjectRecordCopyLock copy -> { - toUnlock.add(copy.original().lock().writeLock()::unlock); - toFlush.add(copy); - } - case TxRecord.TxObjectRecordOptimistic copy -> { - toLock.add(copy.original().data().getKey()); - toFlush.add(copy); - } - case TxRecord.TxObjectRecordNew created -> { - toPut.add(created); - } - case TxRecord.TxObjectRecordDeleted deleted -> { - toLock.add(deleted.getKey()); - toDelete.add(deleted.getKey()); - } - default -> throw new IllegalStateException("Unexpected value: " + entry); - } - } + Log.trace("Commit iteration with " + drained.size() + " records"); - for (var entry : tx.reads().entrySet()) { - Log.trace("Processing read " + entry.toString()); - switch (entry.getValue()) { - case ReadTrackingObjectSource.TxReadObjectNone none -> { - // TODO: Check this - } - case ReadTrackingObjectSource.TxReadObjectSome(var obj) -> { - toLock.add(obj.data().getKey()); - dependencies.add(obj); - } - default -> throw new IllegalStateException("Unexpected value: " + entry); - } - } - - toLock.sort(Comparator.comparingInt(System::identityHashCode)); - - for (var key : toLock) { - Log.trace("Locking " + key.toString()); - - var got = getLocked(JData.class, key, true); - - if (got == null) { - throw new IllegalStateException("Object " + key + " not found"); - } - - toUnlock.add(got.wrapper().lock.writeLock()::unlock); - } + drained.stream() + .map(TxRecord.TxObjectRecord::key) + .sorted(Comparator.comparing(JObjectKey::toString)) + .forEach(addDependency); for (var hook : _preCommitTxHooks) { for (var entry : drained) { Log.trace("Running pre-commit hook " + hook.getClass() + " for" + entry.toString()); switch (entry) { - case TxRecord.TxObjectRecordCopyLock copy -> { - hook.onChange(copy.getKey(), copy.original().data(), copy.copy().wrapped()); + case TxRecord.TxObjectRecordWrite write -> { + var oldObj = getCurrent.apply(write.key()); + if (oldObj == null) { + hook.onCreate(write.key(), write.data()); + } else { + hook.onChange(write.key(), oldObj, write.data()); + } } - case TxRecord.TxObjectRecordOptimistic copy -> { - hook.onChange(copy.getKey(), copy.original().data(), copy.copy().wrapped()); - } - case TxRecord.TxObjectRecordNew created -> { - hook.onCreate(created.getKey(), created.created()); - } - case TxRecord.TxObjectRecordDeleted deleted -> { - hook.onDelete(deleted.getKey(), deleted.current()); + case TxRecord.TxObjectRecordDeleted deleted -> { + hook.onDelete(deleted.key(), getCurrent.apply(deleted.key())); } default -> throw new IllegalStateException("Unexpected value: " + entry); } + current.put(entry.key(), entry); } } } - for (var dep : dependencies) { - Log.trace("Checking dependency " + dep.toString()); - var current = _objects.get(dep.data().getKey()).get(); - - // Check that the object we have locked is really the one in the map - // Note that current can be null, not only if it doesn't exist, but - // also for example in the case when it was changed and then garbage collected - if (dep.data() != current) { - throw new IllegalStateException("Serialization hazard: " + dep.data() + " vs " + current); - } - - if (current.getVersion() >= tx.getId()) { - throw new IllegalStateException("Serialization hazard: " + current.getVersion() + " vs " + tx.getId()); + // FIXME: lock leak + for (var read : tx.reads().entrySet()) { + addDependency.accept(read.getKey()); + if (read.getValue() instanceof TransactionObjectLocked locked) { + toUnlock.add(locked.lock); } } - for (var put : toPut) { - Log.trace("Putting new object " + put.toString()); - var wrapper = new JDataWrapper<>(put.created()); - wrapper.lock.writeLock().lock(); - var old = _objects.putIfAbsent(put.created().getKey(), wrapper); - if (old != null) - throw new IllegalStateException("Object already exists: " + old.get()); - toUnlock.add(wrapper.lock.writeLock()::unlock); - } + for (var dep : dependenciesLocked.entrySet()) { + Log.trace("Checking dependency " + dep.getKey()); - for (var record : toFlush) { - if (!record.copy().isModified()) { - Log.trace("Not changed " + record.toString()); - continue; + if (dep.getValue().data.isEmpty()) continue; + + if (dep.getValue().data.get().getVersion() >= tx.getId()) { + throw new IllegalStateException("Serialization hazard: " + dep.getValue().data.get().getVersion() + " vs " + tx.getId()); } - - Log.trace("Flushing changed " + record.toString()); - var current = _objects.get(record.original().data().getKey()); - - var newWrapper = new JDataWrapper<>(record.copy().wrapped()); - newWrapper.lock.writeLock().lock(); - if (!_objects.replace(record.copy().wrapped().getKey(), current, newWrapper)) { - assert false; // Should not happen, as the object is locked - throw new IllegalStateException("Object changed during transaction after locking: " + current.get() + " vs " + record.copy().wrapped()); - } - toUnlock.add(newWrapper.lock.writeLock()::unlock); } Log.tracef("Flushing transaction %d to storage", tx.getId()); - var written = Streams.concat(toFlush.stream().map(f -> f.copy().wrapped()), - toPut.stream().map(TxRecord.TxObjectRecordNew::created)).toList(); + var toDelete = new ArrayList(); + var toWrite = new ArrayList(); - // Really flushing to storage - written.forEach(obj -> { - Log.trace("Flushing object " + obj.getKey()); - assert obj.getVersion() == tx.getId(); - var key = obj.getKey(); - var data = objectSerializer.serialize(obj); - objectStorage.writeObject(key, data); - }); + for (var action : current.entrySet()) { + switch (action.getValue()) { + case TxRecord.TxObjectRecordWrite write -> { + Log.trace("Flushing object " + action.getKey()); + toWrite.add(action.getKey()); + var data = objectSerializer.serialize(write.data()); + objectStorage.writeObject(action.getKey(), data); + _objects.put(action.getKey(), new JDataWrapper<>(write.data())); + } + case TxRecord.TxObjectRecordDeleted deleted -> { + Log.trace("Deleting object " + action.getKey()); + toDelete.add(action.getKey()); + _objects.remove(action.getKey()); + } + default -> { + throw new IllegalStateException("Unexpected value: " + action.getValue()); + } + } + } Log.tracef("Committing transaction %d to storage", tx.getId()); - objectStorage.commitTx(new SimpleTxManifest(written.stream().map(JData::getKey).toList(), toDelete)); - - for (var del : toDelete) { - _objects.remove(del); - } - } catch (Throwable t) { + objectStorage.commitTx(new SimpleTxManifest(toWrite, toDelete)); + } catch ( + Throwable t) { Log.error("Error when committing transaction", t); throw t; } finally { for (var unlock : toUnlock) { - unlock.apply(); + unlock.close(); } } } + + public void rollback(TransactionPrivate tx) { + } } \ No newline at end of file diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java index 9e8d9433..91ee6d50 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java @@ -2,7 +2,6 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.dhfs.objects.transaction.TransactionPrivate; -import com.usatiuk.dhfs.objects.transaction.TxRecord; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; @@ -44,16 +43,18 @@ public class TransactionManagerImpl implements TransactionManager { @Override public void rollback() { - var tx = _currentTransaction.get(); - // Works only before commit was called - for (var o : tx.drainNewWrites()) { - switch (o) { - case TxRecord.TxObjectRecordCopyLock r -> r.original().lock().writeLock().unlock(); - default -> { - } - } + if (_currentTransaction.get() == null) { + throw new IllegalStateException("No transaction started"); + } + + try { + jObjectManager.rollback(_currentTransaction.get()); + } catch (Throwable e) { + Log.error("Transaction rollback failed", e); + throw e; + } finally { + _currentTransaction.remove(); } - _currentTransaction.remove(); } @Override diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSource.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSource.java index a8337b95..ff8931f9 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSource.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSource.java @@ -6,78 +6,53 @@ import com.usatiuk.objects.common.runtime.JObjectKey; import java.util.Collections; import java.util.HashMap; import java.util.Map; -import java.util.Optional; public class ReadTrackingObjectSource implements TransactionObjectSource { private final TransactionObjectSource _delegate; - public interface TxReadObject {} - - public record TxReadObjectNone() implements TxReadObject {} - - public record TxReadObjectSome(TransactionObject obj) implements TxReadObject {} - - private final Map> _readSet = new HashMap<>(); + private final Map> _readSet = new HashMap<>(); public ReadTrackingObjectSource(TransactionObjectSource delegate) { _delegate = delegate; } - public Map> getRead() { + public Map> getRead() { return Collections.unmodifiableMap(_readSet); } @Override - public Optional> get(Class type, JObjectKey key) { + public TransactionObject get(Class type, JObjectKey key) { var got = _readSet.get(key); if (got == null) { var read = _delegate.get(type, key); - if (read.isPresent()) { - _readSet.put(key, new TxReadObjectSome<>(read.get())); - } else { - _readSet.put(key, new TxReadObjectNone<>()); - } + _readSet.put(key, read); return read; } - return switch (got) { - case TxReadObjectNone none -> Optional.empty(); - case TxReadObjectSome some -> { - if (type.isInstance(some.obj().data())) { - yield Optional.of((TransactionObject) some.obj()); - } else { - yield Optional.empty(); - } - } - default -> throw new IllegalStateException("Unexpected value: " + got); - }; + got.data().ifPresent(data -> { + if (!type.isInstance(data)) + throw new IllegalStateException("Type mismatch for " + got + ": expected " + type + ", got " + data.getClass()); + }); + + return (TransactionObject) got; } @Override - public Optional> getWriteLocked(Class type, JObjectKey key) { + public TransactionObject getWriteLocked(Class type, JObjectKey key) { var got = _readSet.get(key); if (got == null) { var read = _delegate.getWriteLocked(type, key); - if (read.isPresent()) { - _readSet.put(key, new TxReadObjectSome<>(read.get())); - } else { - _readSet.put(key, new TxReadObjectNone<>()); - } + _readSet.put(key, read); return read; } - return switch (got) { - case TxReadObjectNone none -> Optional.empty(); - case TxReadObjectSome some -> { - if (type.isInstance(some.obj().data())) { - yield Optional.of((TransactionObject) some.obj()); - } else { - yield Optional.empty(); - } - } - default -> throw new IllegalStateException("Unexpected value: " + got); - }; + got.data().ifPresent(data -> { + if (!type.isInstance(data)) + throw new IllegalStateException("Type mismatch for " + got + ": expected " + type + ", got " + data.getClass()); + }); + + return (TransactionObject) got; } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index 43a4bbbb..fda96766 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -23,8 +23,8 @@ public class TransactionFactoryImpl implements TransactionFactory { private final long _id; private final ReadTrackingObjectSource _source; - private Map> _objects = new HashMap<>(); - private Map> _newObjects = new HashMap<>(); + private final Map> _writes = new HashMap<>(); + private Map> _newWrites = new HashMap<>(); private TransactionImpl(long id, TransactionObjectSource source) { _id = id; @@ -33,97 +33,53 @@ public class TransactionFactoryImpl implements TransactionFactory { @Override public Optional get(Class type, JObjectKey key, LockingStrategy strategy) { - var got = _objects.get(key); - if (got != null) { - var compatible = got.getIfStrategyCompatible(key, strategy); - if (compatible == null) { - throw new IllegalArgumentException("Locking strategy mismatch"); - } - if (!type.isInstance(compatible)) { - throw new IllegalArgumentException("Object type mismatch"); - } - return Optional.of(type.cast(compatible)); - } - - switch (strategy) { - case OPTIMISTIC: { - var read = _source.get(type, key).orElse(null); - if (read == null) { - return Optional.empty(); - } - var copy = objectAllocator.copy(read.data()); - _objects.put(key, new TxRecord.TxObjectRecordOptimistic<>(read, copy)); - _newObjects.put(key, new TxRecord.TxObjectRecordOptimistic<>(read, copy)); - return Optional.of(copy.wrapped()); - } - case WRITE: { - var locked = _source.getWriteLocked(type, key).orElse(null); - if (locked == null) { - return Optional.empty(); - } - var copy = objectAllocator.copy(locked.data()); - _objects.put(key, new TxRecord.TxObjectRecordCopyLock<>(locked, copy)); - _newObjects.put(key, new TxRecord.TxObjectRecordCopyLock<>(locked, copy)); - return Optional.of(copy.wrapped()); - } - default: - throw new IllegalArgumentException("Unknown locking strategy"); - } + return switch (strategy) { + case OPTIMISTIC -> _source.get(type, key).data(); + case WRITE -> _source.getWriteLocked(type, key).data(); + }; } @Override public void delete(JObjectKey key) { +// get(JData.class, key, LockingStrategy.OPTIMISTIC); + // FIXME - var got = _objects.get(key); + var got = _writes.get(key); if (got != null) { switch (got) { - case TxRecord.TxObjectRecordNew created -> { - _objects.remove(key); - _newObjects.remove(key); - } - case TxRecord.TxObjectRecordCopyLock copyLockRecord -> { - _objects.put(key, new TxRecord.TxObjectRecordDeleted<>(copyLockRecord)); - _newObjects.put(key, new TxRecord.TxObjectRecordDeleted<>(copyLockRecord)); - } - case TxRecord.TxObjectRecordOptimistic optimisticRecord -> { - _objects.put(key, new TxRecord.TxObjectRecordDeleted<>(optimisticRecord)); - _newObjects.put(key, new TxRecord.TxObjectRecordDeleted<>(optimisticRecord)); - } - case TxRecord.TxObjectRecordDeleted deletedRecord -> { + case TxRecord.TxObjectRecordDeleted deleted -> { return; } - default -> throw new IllegalStateException("Unexpected value: " + got); + default -> { + } } - return; } - - var read = _source.get(JData.class, key).orElse(null); - if (read == null) { - return; - } - _objects.put(key, new TxRecord.TxObjectRecordDeleted<>(read)); // FIXME: - _newObjects.put(key, new TxRecord.TxObjectRecordDeleted<>(read)); +// +// var read = _source.get(JData.class, key).orElse(null); +// if (read == null) { +// return; +// } + _writes.put(key, new TxRecord.TxObjectRecordDeleted(key)); // FIXME: + _newWrites.put(key, new TxRecord.TxObjectRecordDeleted(key)); } @Override public void put(JData obj) { - if (_objects.containsKey(obj.getKey())) { - throw new IllegalArgumentException("Object already exists in transaction"); - } +// get(JData.class, obj.getKey(), LockingStrategy.OPTIMISTIC); - _objects.put(obj.getKey(), new TxRecord.TxObjectRecordNew<>(obj)); - _newObjects.put(obj.getKey(), new TxRecord.TxObjectRecordNew<>(obj)); + _writes.put(obj.getKey(), new TxRecord.TxObjectRecordWrite<>(obj)); + _newWrites.put(obj.getKey(), new TxRecord.TxObjectRecordWrite<>(obj)); } @Override public Collection> drainNewWrites() { - var ret = _newObjects; - _newObjects = new HashMap<>(); + var ret = _newWrites; + _newWrites = new HashMap<>(); return ret.values(); } @Override - public Map> reads() { + public Map> reads() { return _source.getRead(); } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java index cd5dc7e6..0d87926e 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java @@ -2,10 +2,8 @@ package com.usatiuk.dhfs.objects.transaction; import com.usatiuk.objects.common.runtime.JData; -import java.util.concurrent.locks.ReadWriteLock; +import java.util.Optional; public interface TransactionObject { - T data(); - - ReadWriteLock lock(); + Optional data(); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java index 14835797..acfae1ca 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java @@ -3,10 +3,8 @@ package com.usatiuk.dhfs.objects.transaction; import com.usatiuk.objects.common.runtime.JData; import com.usatiuk.objects.common.runtime.JObjectKey; -import java.util.Optional; - public interface TransactionObjectSource { - Optional> get(Class type, JObjectKey key); + TransactionObject get(Class type, JObjectKey key); - Optional> getWriteLocked(Class type, JObjectKey key); + TransactionObject getWriteLocked(Class type, JObjectKey key); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java index 25f39804..c2e147ed 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java @@ -9,5 +9,5 @@ import java.util.Map; public interface TransactionPrivate extends Transaction { Collection> drainNewWrites(); - Map> reads(); + Map> reads(); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java index 7104706b..7600fc31 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java @@ -1,80 +1,20 @@ package com.usatiuk.dhfs.objects.transaction; -import com.usatiuk.objects.alloc.runtime.ChangeTrackingJData; import com.usatiuk.objects.common.runtime.JData; import com.usatiuk.objects.common.runtime.JObjectKey; public class TxRecord { public interface TxObjectRecord { - T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy); - - JObjectKey getKey(); + JObjectKey key(); } - public interface TxObjectRecordWrite extends TxObjectRecord { - TransactionObject original(); - - ChangeTrackingJData copy(); - - default JObjectKey getKey() { - return original().data().getKey(); + public record TxObjectRecordWrite(JData data) implements TxObjectRecord { + @Override + public JObjectKey key() { + return data.getKey(); } } - public record TxObjectRecordNew(T created) implements TxObjectRecord { - @Override - public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { - if (strategy == LockingStrategy.WRITE || strategy == LockingStrategy.OPTIMISTIC) - return created; - return null; - } - - @Override - public JObjectKey getKey() { - return created.getKey(); - } - } - - public record TxObjectRecordDeleted(TransactionObject original, - T current) implements TxObjectRecord { - @Override - public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { - return null; - } - - @Override - public JObjectKey getKey() { - return original.data().getKey(); - } - - public TxObjectRecordDeleted(TxObjectRecordWrite original) { - this(original.original(), original.copy().wrapped()); - } - - public TxObjectRecordDeleted(TransactionObject original) { - this(original, original.data()); - } - } - - public record TxObjectRecordCopyLock(TransactionObject original, - ChangeTrackingJData copy) - implements TxObjectRecordWrite { - @Override - public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { - if (strategy == LockingStrategy.WRITE || strategy == LockingStrategy.OPTIMISTIC) - return copy.wrapped(); - return null; - } - } - - public record TxObjectRecordOptimistic(TransactionObject original, - ChangeTrackingJData copy) - implements TxObjectRecordWrite { - @Override - public T getIfStrategyCompatible(JObjectKey key, LockingStrategy strategy) { - if (strategy == LockingStrategy.WRITE || strategy == LockingStrategy.OPTIMISTIC) - return copy.wrapped(); - return null; - } + public record TxObjectRecordDeleted(JObjectKey key) implements TxObjectRecord { } } diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index d7262c13..d01d5cc5 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -86,15 +86,17 @@ public class ObjectsTest { curTx.put(newParent); txm.commit(); } - Assertions.assertThrows(Exception.class, () -> txm.run(() -> { + { + txm.begin(); var newParent = alloc.create(Parent.class, new JObjectKey("Parent7")); newParent.setLastName("John2"); curTx.put(newParent); - })); + txm.commit(); + } { txm.begin(); var parent = curTx.get(Parent.class, new JObjectKey("Parent7")).orElse(null); - Assertions.assertEquals("John", parent.getLastName()); + Assertions.assertEquals("John2", parent.getLastName()); txm.commit(); } } diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java index 07325b76..25e9a194 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java @@ -44,7 +44,7 @@ public class PreCommitTxHookTest { { txm.begin(); - var parent = curTx.get(Parent.class, new JObjectKey("Parent")).orElse(null); + var parent = curTx.get(Parent.class, new JObjectKey("ParentCreate")).orElse(null); Assertions.assertEquals("John", parent.getLastName()); txm.commit(); } @@ -98,9 +98,9 @@ public class PreCommitTxHookTest { { txm.begin(); - var parent = curTx.get(Parent.class, new JObjectKey("ParentEdit")).orElse(null); - Assertions.assertEquals("John", parent.getLastName()); - parent.setLastName("John changed"); + var newParent = alloc.create(Parent.class, new JObjectKey("ParentEdit")); + newParent.setLastName("John changed"); + curTx.put(newParent); txm.commit(); } @@ -113,4 +113,33 @@ public class PreCommitTxHookTest { Assertions.assertEquals(new JObjectKey("ParentEdit"), keyCaptor.getValue()); } + @Test + void editObjectWithGet() { + { + txm.begin(); + var newParent = alloc.create(Parent.class, new JObjectKey("ParentEdit2")); + newParent.setLastName("John"); + curTx.put(newParent); + txm.commit(); + } + + { + txm.begin(); + var parent = curTx.get(Parent.class, new JObjectKey("ParentEdit2")).orElse(null); + Assertions.assertEquals("John", parent.getLastName()); + var newParent = alloc.create(Parent.class, new JObjectKey("ParentEdit2")); + newParent.setLastName("John changed"); + curTx.put(newParent); + txm.commit(); + } + + ArgumentCaptor dataCaptorOld = ArgumentCaptor.forClass(JData.class); + ArgumentCaptor dataCaptorNew = ArgumentCaptor.forClass(JData.class); + ArgumentCaptor keyCaptor = ArgumentCaptor.forClass(JObjectKey.class); + Mockito.verify(spyHook, Mockito.times(1)).onChange(keyCaptor.capture(), dataCaptorOld.capture(), dataCaptorNew.capture()); + Assertions.assertEquals("John", ((Parent) dataCaptorOld.getValue()).getLastName()); + Assertions.assertEquals("John changed", ((Parent) dataCaptorNew.getValue()).getLastName()); + Assertions.assertEquals(new JObjectKey("ParentEdit2"), keyCaptor.getValue()); + } + } diff --git a/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/AutoCloseableNoThrow.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/AutoCloseableNoThrow.java new file mode 100644 index 00000000..29ec47ca --- /dev/null +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/AutoCloseableNoThrow.java @@ -0,0 +1,6 @@ +package com.usatiuk.dhfs.utils; + +public interface AutoCloseableNoThrow extends AutoCloseable { + @Override + void close(); +} diff --git a/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java index ecb1288a..35e882da 100644 --- a/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java @@ -1,41 +1,58 @@ package com.usatiuk.dhfs.utils; +import io.quarkus.logging.Log; + +import java.lang.ref.Cleaner; import java.util.concurrent.ConcurrentHashMap; public class DataLocker { private static class LockTag { boolean released = false; + final Thread owner = Thread.currentThread(); } private final ConcurrentHashMap _locks = new ConcurrentHashMap<>(); - public class Lock implements AutoCloseable { + private class Lock implements AutoCloseableNoThrow { private final Object _key; private final LockTag _tag; + private static final Cleaner CLEANER = Cleaner.create(); public Lock(Object key, LockTag tag) { _key = key; _tag = tag; + CLEANER.register(this, () -> { + if (!tag.released) { + Log.error("Lock collected without release: " + key); + } + }); } @Override public void close() { synchronized (_tag) { _tag.released = true; - _tag.notifyAll(); + _tag.notify(); _locks.remove(_key, _tag); } } } - public Lock lock(Object data) { + private static final AutoCloseableNoThrow DUMMY_LOCK = () -> { + }; + + public AutoCloseableNoThrow lock(Object data) { while (true) { try { var tag = _locks.get(data); if (tag != null) { synchronized (tag) { - if (!tag.released) + if (!tag.released) { + if (tag.owner == Thread.currentThread()) { + return DUMMY_LOCK; + } tag.wait(); + } continue; } } From 2a8fbc72de2781d8c99f84722fc4a45f29ed0d94 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Tue, 31 Dec 2024 18:05:19 +0100 Subject: [PATCH 025/105] slightly hacky versioning --- .../dhfs/objects/JDataVersionedWrapper.java | 11 +++++ .../usatiuk/dhfs/objects/JObjectManager.java | 40 +++++++++---------- .../dhfs/objects/JavaDataSerializer.java | 7 ++-- .../dhfs/objects/ObjectSerializer.java | 3 +- .../transaction/TransactionFactoryImpl.java | 5 ++- .../transaction/TransactionObject.java | 3 +- 6 files changed, 40 insertions(+), 29 deletions(-) create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java new file mode 100644 index 00000000..5194b873 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java @@ -0,0 +1,11 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.objects.common.runtime.JData; +import jakarta.annotation.Nonnull; +import lombok.Builder; + +import java.io.Serializable; + +@Builder +public record JDataVersionedWrapper(@Nonnull T data, long version) implements Serializable { +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 28e336f9..48b18ca9 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -31,7 +31,7 @@ public class JObjectManager { @Inject ObjectPersistentStore objectStorage; @Inject - ObjectSerializer objectSerializer; + ObjectSerializer objectSerializer; @Inject ObjectAllocator objectAllocator; @Inject @@ -47,12 +47,12 @@ public class JObjectManager { private final ConcurrentHashMap> _objects = new ConcurrentHashMap<>(); private final AtomicLong _txCounter = new AtomicLong(); - private class JDataWrapper extends WeakReference { + private class JDataWrapper extends WeakReference> { private static final Cleaner CLEANER = Cleaner.create(); - public JDataWrapper(T referent) { + public JDataWrapper(JDataVersionedWrapper referent) { super(referent); - var key = referent.getKey(); + var key = referent.data().getKey(); CLEANER.register(referent, () -> { _objects.remove(key, this); }); @@ -66,17 +66,17 @@ public class JObjectManager { } } - private T get(Class type, JObjectKey key) { + private JDataVersionedWrapper get(Class type, JObjectKey key) { while (true) { { var got = _objects.get(key); if (got != null) { var ref = got.get(); - if (type.isInstance(ref)) { - return type.cast(ref); - } else if (ref == null) { + if (ref == null) { _objects.remove(key, got); + } else if (type.isInstance(ref.data())) { + return (JDataVersionedWrapper) ref; } else { throw new IllegalArgumentException("Object type mismatch: " + ref.getClass() + " vs " + type); } @@ -93,11 +93,11 @@ public class JObjectManager { if (read == null) return null; - if (type.isInstance(read)) { - var wrapper = new JDataWrapper<>(type.cast(read)); + if (type.isInstance(read.data())) { + var wrapper = new JDataWrapper<>((JDataVersionedWrapper) read); var old = _objects.put(key, wrapper); assert old == null; - return type.cast(read); + return read; } else { throw new IllegalArgumentException("Object type mismatch: " + read.getClass() + " vs " + type); } @@ -106,12 +106,12 @@ public class JObjectManager { } private record TransactionObjectNoLock - (Optional data) + (Optional> data) implements TransactionObject { } private record TransactionObjectLocked - (Optional data, AutoCloseableNoThrow lock) + (Optional> data, AutoCloseableNoThrow lock) implements TransactionObject { } @@ -210,7 +210,7 @@ public class JObjectManager { if (dep == null) { throw new IllegalStateException("No dependency for " + key); } - yield dep.data.orElse(null); + yield dep.data.map(JDataVersionedWrapper::data).orElse(null); } default -> { throw new IllegalStateException("Unexpected value: " + current.get(key)); @@ -254,7 +254,6 @@ public class JObjectManager { } } - // FIXME: lock leak for (var read : tx.reads().entrySet()) { addDependency.accept(read.getKey()); if (read.getValue() instanceof TransactionObjectLocked locked) { @@ -265,10 +264,10 @@ public class JObjectManager { for (var dep : dependenciesLocked.entrySet()) { Log.trace("Checking dependency " + dep.getKey()); - if (dep.getValue().data.isEmpty()) continue; + if (dep.getValue().data().isEmpty()) continue; - if (dep.getValue().data.get().getVersion() >= tx.getId()) { - throw new IllegalStateException("Serialization hazard: " + dep.getValue().data.get().getVersion() + " vs " + tx.getId()); + if (dep.getValue().data().get().version() >= tx.getId()) { + throw new IllegalStateException("Serialization hazard: " + dep.getValue().data().get().version() + " vs " + tx.getId()); } } @@ -282,9 +281,10 @@ public class JObjectManager { case TxRecord.TxObjectRecordWrite write -> { Log.trace("Flushing object " + action.getKey()); toWrite.add(action.getKey()); - var data = objectSerializer.serialize(write.data()); + var wrapped = new JDataVersionedWrapper<>(write.data(), tx.getId()); + var data = objectSerializer.serialize(wrapped); objectStorage.writeObject(action.getKey(), data); - _objects.put(action.getKey(), new JDataWrapper<>(write.data())); + _objects.put(action.getKey(), new JDataWrapper<>(wrapped)); } case TxRecord.TxObjectRecordDeleted deleted -> { Log.trace("Deleting object " + action.getKey()); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JavaDataSerializer.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JavaDataSerializer.java index d7ab1597..a42ebc07 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JavaDataSerializer.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JavaDataSerializer.java @@ -3,20 +3,19 @@ package com.usatiuk.dhfs.objects; import com.google.protobuf.ByteString; import com.usatiuk.dhfs.utils.SerializationHelper; -import com.usatiuk.objects.common.runtime.JData; import jakarta.enterprise.context.ApplicationScoped; import java.io.Serializable; @ApplicationScoped -public class JavaDataSerializer implements ObjectSerializer { +public class JavaDataSerializer implements ObjectSerializer { @Override - public ByteString serialize(JData obj) { + public ByteString serialize(JDataVersionedWrapper obj) { return SerializationHelper.serialize((Serializable) obj); } @Override - public JData deserialize(ByteString data) { + public JDataVersionedWrapper deserialize(ByteString data) { return SerializationHelper.deserialize(data.toByteArray()); } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectSerializer.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectSerializer.java index e5922c67..078dd90f 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectSerializer.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ObjectSerializer.java @@ -1,9 +1,8 @@ package com.usatiuk.dhfs.objects; import com.google.protobuf.ByteString; -import com.usatiuk.objects.common.runtime.JData; -public interface ObjectSerializer { +public interface ObjectSerializer { ByteString serialize(T obj); T deserialize(ByteString data); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index fda96766..4384a229 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -1,5 +1,6 @@ package com.usatiuk.dhfs.objects.transaction; +import com.usatiuk.dhfs.objects.JDataVersionedWrapper; import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import com.usatiuk.objects.common.runtime.JData; import com.usatiuk.objects.common.runtime.JObjectKey; @@ -34,8 +35,8 @@ public class TransactionFactoryImpl implements TransactionFactory { @Override public Optional get(Class type, JObjectKey key, LockingStrategy strategy) { return switch (strategy) { - case OPTIMISTIC -> _source.get(type, key).data(); - case WRITE -> _source.getWriteLocked(type, key).data(); + case OPTIMISTIC -> _source.get(type, key).data().map(JDataVersionedWrapper::data); + case WRITE -> _source.getWriteLocked(type, key).data().map(JDataVersionedWrapper::data); }; } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java index 0d87926e..1c9fe912 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java @@ -1,9 +1,10 @@ package com.usatiuk.dhfs.objects.transaction; +import com.usatiuk.dhfs.objects.JDataVersionedWrapper; import com.usatiuk.objects.common.runtime.JData; import java.util.Optional; public interface TransactionObject { - Optional data(); + Optional> data(); } From f869178b0f5bc99298f92b6422783f9ebef38543 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Tue, 31 Dec 2024 18:17:12 +0100 Subject: [PATCH 026/105] seemingly working record data classes --- dhfs-parent/objects-alloc/deployment/pom.xml | 65 --- .../alloc/deployment/JDataFieldInfo.java | 6 - .../alloc/deployment/JDataIndexBuildItem.java | 12 - .../alloc/deployment/JDataInfoBuildItem.java | 16 - .../deployment/ObjectsAllocProcessor.java | 375 ------------------ .../alloc/test/ObjectsAllocDevModeTest.java | 25 -- .../objects/alloc/test/ObjectsAllocTest.java | 26 -- .../objects-alloc/integration-tests/pom.xml | 107 ----- .../alloc/it/DummyVersionProvider.java | 20 - .../objects/alloc/it/TestJDataAssorted.java | 18 - .../objects/alloc/it/TestJDataEmpty.java | 6 - .../src/main/resources/application.properties | 1 - .../objects/alloc/it/ObjectAllocIT.java | 96 ----- dhfs-parent/objects-alloc/pom.xml | 23 -- dhfs-parent/objects-alloc/runtime/pom.xml | 64 --- .../alloc/runtime/ChangeTrackingJData.java | 9 - .../alloc/runtime/ObjectAllocator.java | 12 - .../resources/META-INF/quarkus-extension.yaml | 9 - .../usatiuk/objects/common/runtime/JData.java | 8 +- .../runtime/JDataAllocVersionProvider.java | 5 - .../objects/common/runtime/JObjectKey.java | 3 + dhfs-parent/objects/pom.xml | 11 - .../usatiuk/dhfs/objects/JObjectManager.java | 5 +- .../transaction/TransactionFactoryImpl.java | 8 +- ...TransactionObjectAllocVersionProvider.java | 16 - .../dhfs/objects/transaction/TxRecord.java | 2 +- .../com/usatiuk/dhfs/objects/ObjectsTest.java | 64 ++- .../dhfs/objects/PreCommitTxHookTest.java | 42 +- .../com/usatiuk/dhfs/objects/data/Kid.java | 10 +- .../com/usatiuk/dhfs/objects/data/Parent.java | 13 +- dhfs-parent/pom.xml | 1 - 31 files changed, 62 insertions(+), 1016 deletions(-) delete mode 100644 dhfs-parent/objects-alloc/deployment/pom.xml delete mode 100644 dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataFieldInfo.java delete mode 100644 dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataIndexBuildItem.java delete mode 100644 dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataInfoBuildItem.java delete mode 100644 dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java delete mode 100644 dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocDevModeTest.java delete mode 100644 dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocTest.java delete mode 100644 dhfs-parent/objects-alloc/integration-tests/pom.xml delete mode 100644 dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/DummyVersionProvider.java delete mode 100644 dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/TestJDataAssorted.java delete mode 100644 dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/TestJDataEmpty.java delete mode 100644 dhfs-parent/objects-alloc/integration-tests/src/main/resources/application.properties delete mode 100644 dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectAllocIT.java delete mode 100644 dhfs-parent/objects-alloc/pom.xml delete mode 100644 dhfs-parent/objects-alloc/runtime/pom.xml delete mode 100644 dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ChangeTrackingJData.java delete mode 100644 dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ObjectAllocator.java delete mode 100644 dhfs-parent/objects-alloc/runtime/src/main/resources/META-INF/quarkus-extension.yaml delete mode 100644 dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JDataAllocVersionProvider.java delete mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectAllocVersionProvider.java diff --git a/dhfs-parent/objects-alloc/deployment/pom.xml b/dhfs-parent/objects-alloc/deployment/pom.xml deleted file mode 100644 index e2107f92..00000000 --- a/dhfs-parent/objects-alloc/deployment/pom.xml +++ /dev/null @@ -1,65 +0,0 @@ - - - 4.0.0 - - - com.usatiuk - objects-alloc-parent - 1.0-SNAPSHOT - - objects-alloc-deployment - DHFS objects allocation - Deployment - - - - io.quarkus - quarkus-arc-deployment - - - com.usatiuk - objects-alloc - ${project.version} - - - com.usatiuk - objects-common-deployment - ${project.version} - - - io.quarkus - quarkus-junit5-internal - test - - - org.apache.commons - commons-collections4 - - - org.apache.commons - commons-lang3 - - - - - - - maven-compiler-plugin - - - default-compile - - - - io.quarkus - quarkus-extension-processor - ${quarkus.platform.version} - - - - - - - - - diff --git a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataFieldInfo.java b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataFieldInfo.java deleted file mode 100644 index 198a4cfa..00000000 --- a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataFieldInfo.java +++ /dev/null @@ -1,6 +0,0 @@ -package com.usatiuk.objects.alloc.deployment; - -import org.jboss.jandex.DotName; - -public record JDataFieldInfo(String name, DotName type) { -} diff --git a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataIndexBuildItem.java b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataIndexBuildItem.java deleted file mode 100644 index a94e26fb..00000000 --- a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataIndexBuildItem.java +++ /dev/null @@ -1,12 +0,0 @@ -package com.usatiuk.objects.alloc.deployment; - -import io.quarkus.builder.item.MultiBuildItem; -import org.jboss.jandex.ClassInfo; - -public final class JDataIndexBuildItem extends MultiBuildItem { - public final ClassInfo jData; - - public JDataIndexBuildItem(ClassInfo jData) { - this.jData = jData; - } -} diff --git a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataInfoBuildItem.java b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataInfoBuildItem.java deleted file mode 100644 index 3b90a98a..00000000 --- a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/JDataInfoBuildItem.java +++ /dev/null @@ -1,16 +0,0 @@ -package com.usatiuk.objects.alloc.deployment; - -import io.quarkus.builder.item.MultiBuildItem; -import org.jboss.jandex.ClassInfo; - -import java.util.Map; - -public final class JDataInfoBuildItem extends MultiBuildItem { - public final ClassInfo klass; - public final Map fields; - - public JDataInfoBuildItem(ClassInfo klass, Map fields) { - this.klass = klass; - this.fields = fields; - } -} diff --git a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java b/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java deleted file mode 100644 index 36ebea42..00000000 --- a/dhfs-parent/objects-alloc/deployment/src/main/java/com/usatiuk/objects/alloc/deployment/ObjectsAllocProcessor.java +++ /dev/null @@ -1,375 +0,0 @@ -package com.usatiuk.objects.alloc.deployment; - -import com.usatiuk.objects.alloc.runtime.ChangeTrackingJData; -import com.usatiuk.objects.alloc.runtime.ObjectAllocator; -import com.usatiuk.objects.common.runtime.JData; -import com.usatiuk.objects.common.runtime.JDataAllocVersionProvider; -import com.usatiuk.objects.common.runtime.JObjectKey; -import io.quarkus.arc.deployment.GeneratedBeanBuildItem; -import io.quarkus.arc.deployment.GeneratedBeanGizmoAdaptor; -import io.quarkus.deployment.GeneratedClassGizmoAdaptor; -import io.quarkus.deployment.annotations.BuildProducer; -import io.quarkus.deployment.annotations.BuildStep; -import io.quarkus.deployment.builditem.ApplicationIndexBuildItem; -import io.quarkus.deployment.builditem.GeneratedClassBuildItem; -import io.quarkus.gizmo.*; -import jakarta.inject.Inject; -import jakarta.inject.Singleton; -import org.apache.commons.lang3.tuple.Pair; -import org.jboss.jandex.ClassInfo; -import org.jboss.jandex.DotName; -import org.jboss.jandex.MethodInfo; - -import java.io.Serializable; -import java.util.*; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static java.lang.reflect.Modifier.*; - -class ObjectsAllocProcessor { - @BuildStep - void collectJDatas(BuildProducer producer, ApplicationIndexBuildItem jandex) { - var jdatas = jandex.getIndex().getAllKnownSubinterfaces(JData.class); - - // Collect the leaves - for (var jdata : jdatas) { - System.out.println("Found JData: " + jdata.name()); - if (jandex.getIndex().getAllKnownSubinterfaces(jdata.name()).isEmpty()) { - System.out.println("Found JData leaf: " + jdata.name()); - producer.produce(new JDataIndexBuildItem(jdata)); - } - } - } - - private static final String KEY_NAME = "key"; - private static final String VERSION_NAME = "version"; - private static final List SPECIAL_FIELDS = List.of(KEY_NAME, VERSION_NAME); - - String propNameToFieldName(String name) { - return name; - } - - String propNameToGetterName(String name) { - return "get" + name.substring(0, 1).toUpperCase() + name.substring(1); - } - - String propNameToSetterName(String name) { - return "set" + name.substring(0, 1).toUpperCase() + name.substring(1); - } - - DotName getDataClassName(ClassInfo jData) { - return DotName.createComponentized(jData.name().packagePrefixName(), jData.name().local() + "Data"); - } - - DotName getCTClassName(ClassInfo jData) { - return DotName.createComponentized(jData.name().packagePrefixName(), jData.name().local() + "CTData"); - } - - DotName getImmutableClassName(ClassInfo jData) { - return DotName.createComponentized(jData.name().packagePrefixName(), jData.name().local() + "ImmutableData"); - } - - @BuildStep - void generateDataClass(List jDataItems, BuildProducer generatedClasses) { - var gizmoAdapter = new GeneratedClassGizmoAdaptor(generatedClasses, true); - for (var item : jDataItems) { - try (ClassCreator classCreator = ClassCreator.builder() - .className(getDataClassName(item.klass).toString()) - .interfaces(JData.class) - .interfaces(item.klass.name().toString()) - .interfaces(Serializable.class) - .classOutput(gizmoAdapter) - .build()) { - - - var fieldsMap = createFields(item, classCreator); - - for (var field : fieldsMap.values()) { - if (!SPECIAL_FIELDS.contains(field.getName())) { - try (var setter = classCreator.getMethodCreator(propNameToSetterName(field.getName()), void.class, field.getType())) { - setter.writeInstanceField(field, setter.getThis(), setter.getMethodParam(0)); - setter.returnVoid(); - } - } - } - - try (var constructor = classCreator.getConstructorCreator(JObjectKey.class, long.class)) { - constructor.invokeSpecialMethod(MethodDescriptor.ofConstructor(Object.class), constructor.getThis()); - constructor.writeInstanceField(fieldsMap.get(KEY_NAME), constructor.getThis(), constructor.getMethodParam(0)); - constructor.writeInstanceField(fieldsMap.get(VERSION_NAME), constructor.getThis(), constructor.getMethodParam(1)); - constructor.returnVoid(); - } - - } - } - } - - private static final String MODIFIED_FIELD_NAME = "_modified"; - private static final String ON_CHANGE_METHOD_NAME = "onChange"; - - @BuildStep - void generateCTClass(List jDataItems, BuildProducer generatedClasses) { - var gizmoAdapter = new GeneratedClassGizmoAdaptor(generatedClasses, true); - for (var item : jDataItems) { - try (ClassCreator classCreator = ClassCreator.builder() - .className(getCTClassName(item.klass).toString()) - .interfaces(JData.class, ChangeTrackingJData.class) - .interfaces(item.klass.name().toString()) - .interfaces(Serializable.class) - .classOutput(gizmoAdapter) - .build()) { - var modified = classCreator.getFieldCreator(MODIFIED_FIELD_NAME, boolean.class); - modified.setModifiers(PRIVATE | TRANSIENT); - - try (var modifiedGetter = classCreator.getMethodCreator("isModified", boolean.class)) { - modifiedGetter.returnValue(modifiedGetter.readInstanceField(modified.getFieldDescriptor(), modifiedGetter.getThis())); - } - - try (var onChanged = classCreator.getMethodCreator(ON_CHANGE_METHOD_NAME, void.class)) { - onChanged.writeInstanceField(modified.getFieldDescriptor(), onChanged.getThis(), onChanged.load(true)); - onChanged.returnVoid(); - } - - try (var wrapped = classCreator.getMethodCreator("wrapped", item.klass.name().toString())) { - wrapped.returnValue(wrapped.getThis()); - } - - try (var wrapped = classCreator.getMethodCreator("wrapped", JData.class)) { - wrapped.returnValue(wrapped.getThis()); - } - - var fieldsMap = createFields(item, classCreator); - - for (var field : fieldsMap.values()) { - if (!SPECIAL_FIELDS.contains(field.getName())) { - try (var setter = classCreator.getMethodCreator(propNameToSetterName(field.getName()), void.class, field.getType())) { - setter.writeInstanceField(field, setter.getThis(), setter.getMethodParam(0)); - setter.invokeVirtualMethod(MethodDescriptor.ofMethod(classCreator.getClassName(), ON_CHANGE_METHOD_NAME, void.class), setter.getThis()); - setter.returnVoid(); - } - } - } - - try (var constructor = classCreator.getConstructorCreator(item.klass.name().toString(), long.class.getName())) { - constructor.invokeSpecialMethod(MethodDescriptor.ofConstructor(Object.class), constructor.getThis()); - constructor.writeInstanceField(modified.getFieldDescriptor(), constructor.getThis(), constructor.load(true)); // FIXME: - for (var field : fieldsMap.values()) { - if (!Objects.equals(field.getName(), VERSION_NAME)) - constructor.writeInstanceField(field, constructor.getThis(), constructor.invokeInterfaceMethod( - MethodDescriptor.ofMethod(item.klass.name().toString(), propNameToGetterName(field.getName()), field.getType()), - constructor.getMethodParam(0) - )); - } - constructor.writeInstanceField(fieldsMap.get(VERSION_NAME), constructor.getThis(), constructor.getMethodParam(1)); - constructor.returnVoid(); - } - } - } - - } - - @BuildStep - void generateImmutableClass(List jDataItems, BuildProducer generatedClasses) { - var gizmoAdapter = new GeneratedClassGizmoAdaptor(generatedClasses, true); - for (var item : jDataItems) { - try (ClassCreator classCreator = ClassCreator.builder() - .className(getImmutableClassName(item.klass).toString()) - .interfaces(JData.class, ChangeTrackingJData.class) - .interfaces(item.klass.name().toString()) - .interfaces(Serializable.class) - .classOutput(gizmoAdapter) - .build()) { - - var fieldsMap = createFields(item, classCreator); - - for (var field : fieldsMap.values()) { - try (var setter = classCreator.getMethodCreator(propNameToSetterName(field.getName()), void.class, field.getType())) { - setter.throwException(UnsupportedOperationException.class, "Immutable object"); - } - } - - try (var constructor = classCreator.getConstructorCreator(item.klass.name().toString())) { - constructor.invokeSpecialMethod(MethodDescriptor.ofConstructor(Object.class), constructor.getThis()); - for (var field : fieldsMap.values()) { - constructor.writeInstanceField(field, constructor.getThis(), constructor.invokeInterfaceMethod( - MethodDescriptor.ofMethod(item.klass.name().toString(), propNameToGetterName(field.getName()), field.getType()), - constructor.getMethodParam(0) - )); - } - constructor.returnVoid(); - } - } - } - - } - - private Map createFields(JDataInfoBuildItem item, ClassCreator classCreator) { - return item.fields.values().stream().map(jDataFieldInfo -> { - var fc = classCreator.getFieldCreator(propNameToFieldName(jDataFieldInfo.name()), jDataFieldInfo.type().toString()); - - if (SPECIAL_FIELDS.contains(jDataFieldInfo.name())) { - fc.setModifiers(PRIVATE | FINAL); - } else { - fc.setModifiers(PRIVATE); - } - - try (var getter = classCreator.getMethodCreator(propNameToGetterName(jDataFieldInfo.name()), jDataFieldInfo.type().toString())) { - getter.returnValue(getter.readInstanceField(fc.getFieldDescriptor(), getter.getThis())); - } - return Pair.of(jDataFieldInfo, fc.getFieldDescriptor()); - }).collect(Collectors.toUnmodifiableMap(i -> i.getLeft().name(), Pair::getRight)); - } - - List collectInterfaces(ClassInfo type, ApplicationIndexBuildItem jandex) { - return Stream.concat(Stream.of(type), type.interfaceNames().stream() - .flatMap(x -> { - var ret = jandex.getIndex().getClassByName(x); - if (ret == null) { - System.out.println("Interface not found! " + x); - return Stream.empty(); - } - return Stream.of(ret); - }) - .flatMap(i -> collectInterfaces(i, jandex).stream())) - .collect(Collectors.toList()); - } - - Map collectMethods(List types) { - return types.stream() - .flatMap(x -> x.methods().stream()) - .filter(x -> x.name().startsWith("get") || x.name().startsWith("set")) - .collect(Collectors.toMap(MethodInfo::name, x -> x)); - } - - @BuildStep - void collectData(BuildProducer producer, List items, ApplicationIndexBuildItem jandex) { - for (var item : items) { - var methodNameToInfo = collectMethods(collectInterfaces(item.jData, jandex)); - - var reducableSet = new TreeSet<>(methodNameToInfo.keySet()); - - var fields = new TreeMap(); - if (reducableSet.contains(propNameToGetterName(KEY_NAME))) { - reducableSet.remove(propNameToGetterName(KEY_NAME)); - var methodInfo = methodNameToInfo.get(propNameToGetterName(KEY_NAME)); - if (!methodInfo.returnType().name().equals(DotName.createSimple(JObjectKey.class.getName()))) { - throw new RuntimeException("Key getter must return JObjectKey"); - } - fields.put(KEY_NAME, new JDataFieldInfo(KEY_NAME, methodNameToInfo.get(propNameToGetterName(KEY_NAME)).returnType().name())); - } else { -// throw new RuntimeException("Missing key getter"); - System.out.println("Missing key getter for " + item.jData); - // FIXME!: No matter what, I couldn't get JData to get indexed by jandex - fields.put(KEY_NAME, new JDataFieldInfo(KEY_NAME, DotName.createSimple(JObjectKey.class))); - fields.put(VERSION_NAME, new JDataFieldInfo(VERSION_NAME, DotName.createSimple(long.class))); - } - - // Find pairs of getters and setters - // FIXME: - while (!reducableSet.isEmpty()) { - var name = reducableSet.first(); - reducableSet.remove(name); - if (name.startsWith("get")) { - var setterName = "set" + name.substring(3); - if (reducableSet.contains(setterName)) { - reducableSet.remove(setterName); - } else { - throw new RuntimeException("Missing setter for getter: " + name); - } - - var getter = methodNameToInfo.get(name); - var setter = methodNameToInfo.get(setterName); - - if (!getter.returnType().equals(setter.parameters().getFirst().type())) { - throw new RuntimeException("Getter and setter types do not match: " + name); - } - - var variableName = name.substring(3, 4).toLowerCase() + name.substring(4); - - fields.put(variableName, new JDataFieldInfo(variableName, getter.returnType().name())); - } else { - throw new RuntimeException("Unknown method name: " + name); - } - } - producer.produce(new JDataInfoBuildItem(item.jData, Collections.unmodifiableMap(fields))); - } - } - - // Returns false branch - void matchClass(BytecodeCreator bytecodeCreator, ResultHandle toMatch, List types, ClassTagFunction fn) { - for (var type : types) { - var eq = bytecodeCreator.instanceOf(toMatch, type.name().toString()); - var cmp = bytecodeCreator.ifTrue(eq); - fn.apply(type, cmp.trueBranch(), toMatch); - } - } - - interface ClassTagFunction { - void apply(ClassInfo type, BytecodeCreator branch, ResultHandle value); - } - - // Returns false branch - void matchClassTag(BytecodeCreator bytecodeCreator, ResultHandle toMatch, List types, ClassTagFunction fn) { - for (var type : types) { - var eq = bytecodeCreator.invokeVirtualMethod( - MethodDescriptor.ofMethod(Object.class, "equals", boolean.class, Object.class), - toMatch, - bytecodeCreator.loadClass(type.name().toString()) - ); - - var cmp = bytecodeCreator.ifTrue(eq); - fn.apply(type, cmp.trueBranch(), toMatch); - } - } - - @BuildStep - void makeJDataThingy(List jDataItems, - BuildProducer generatedBeans) { - var data = jDataItems.stream().collect(Collectors.toUnmodifiableMap(i -> i.klass, x -> x)); - var classes = data.keySet().stream().map(ClassInfo::asClass).toList(); - - var gizmoAdapter = new GeneratedBeanGizmoAdaptor(generatedBeans); - - try (ClassCreator classCreator = ClassCreator.builder() - .className("com.usatiuk.objects.alloc.generated.ObjectAllocatorImpl") - .interfaces(ObjectAllocator.class) - .classOutput(gizmoAdapter) - .build()) { - - classCreator.addAnnotation(Singleton.class); - - var versionProvider = classCreator.getFieldCreator("versionProvider", JDataAllocVersionProvider.class); - versionProvider.addAnnotation(Inject.class); - versionProvider.setModifiers(PUBLIC); - - Function loadVersion = (block) -> block.invokeInterfaceMethod( - MethodDescriptor.ofMethod(JDataAllocVersionProvider.class, "getVersion", long.class), - block.readInstanceField(versionProvider.getFieldDescriptor(), block.getThis()) - ); - - try (MethodCreator methodCreator = classCreator.getMethodCreator("create", JData.class, Class.class, JObjectKey.class)) { - matchClassTag(methodCreator, methodCreator.getMethodParam(0), classes, (type, branch, value) -> { - branch.returnValue(branch.newInstance(MethodDescriptor.ofConstructor(getDataClassName(type).toString(), JObjectKey.class, long.class), branch.getMethodParam(1), loadVersion.apply(branch))); - }); - methodCreator.throwException(IllegalArgumentException.class, "Unknown type"); - } - - try (MethodCreator methodCreator = classCreator.getMethodCreator("copy", ChangeTrackingJData.class, JData.class)) { - matchClass(methodCreator, methodCreator.getMethodParam(0), classes, (type, branch, value) -> { - branch.returnValue(branch.newInstance(MethodDescriptor.ofConstructor(getCTClassName(type).toString(), type.name().toString(), long.class.getName()), value, loadVersion.apply(branch))); - }); - methodCreator.throwException(IllegalArgumentException.class, "Unknown type"); - } - - try (MethodCreator methodCreator = classCreator.getMethodCreator("unmodifiable", JData.class, JData.class)) { - matchClass(methodCreator, methodCreator.getMethodParam(0), classes, (type, branch, value) -> { - branch.returnValue(branch.newInstance(MethodDescriptor.ofConstructor(getImmutableClassName(type).toString(), type.name().toString()), value)); - }); - methodCreator.throwException(IllegalArgumentException.class, "Unknown type"); - } - } - - } -} diff --git a/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocDevModeTest.java b/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocDevModeTest.java deleted file mode 100644 index 9db98d28..00000000 --- a/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocDevModeTest.java +++ /dev/null @@ -1,25 +0,0 @@ -package com.usatiuk.objects.alloc.test; - -import org.jboss.shrinkwrap.api.ShrinkWrap; -import org.jboss.shrinkwrap.api.spec.JavaArchive; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.RegisterExtension; - -import io.quarkus.test.QuarkusDevModeTest; - -@Disabled -public class ObjectsAllocDevModeTest { - - // Start hot reload (DevMode) test with your extension loaded - @RegisterExtension - static final QuarkusDevModeTest devModeTest = new QuarkusDevModeTest() - .setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class)); - - @Test - public void writeYourOwnDevModeTest() { - // Write your dev mode tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-hot-reload for more information - Assertions.assertTrue(true, "Add dev mode assertions to " + getClass().getName()); - } -} diff --git a/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocTest.java b/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocTest.java deleted file mode 100644 index 3c856f81..00000000 --- a/dhfs-parent/objects-alloc/deployment/src/test/java/com/usatiuk/objects/alloc/test/ObjectsAllocTest.java +++ /dev/null @@ -1,26 +0,0 @@ -package com.usatiuk.objects.alloc.test; - -import com.usatiuk.objects.alloc.runtime.ObjectAllocator; -import io.quarkus.test.QuarkusUnitTest; -import jakarta.inject.Inject; -import org.jboss.shrinkwrap.api.ShrinkWrap; -import org.jboss.shrinkwrap.api.spec.JavaArchive; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.RegisterExtension; - -@Disabled -public class ObjectsAllocTest { - - // Start unit test with your extension loaded - @RegisterExtension - static final QuarkusUnitTest unitTest = new QuarkusUnitTest() - .setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class)); - - @Test - public void writeYourOwnUnitTest() { - // Write your unit tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-extensions for more information - Assertions.assertTrue(true, "Add some assertions to " + getClass().getName()); - } -} diff --git a/dhfs-parent/objects-alloc/integration-tests/pom.xml b/dhfs-parent/objects-alloc/integration-tests/pom.xml deleted file mode 100644 index 3bf24678..00000000 --- a/dhfs-parent/objects-alloc/integration-tests/pom.xml +++ /dev/null @@ -1,107 +0,0 @@ - - - 4.0.0 - - - com.usatiuk - objects-alloc-parent - 1.0-SNAPSHOT - - objects-alloc-integration-tests - DHFS objects allocation - Integration Tests - - - true - - - - - com.usatiuk - objects-alloc - ${project.version} - - - com.usatiuk - objects-alloc-deployment - ${project.version} - - - com.usatiuk - objects-common - ${project.version} - - - com.usatiuk - objects-common-deployment - ${project.version} - - - io.quarkus - quarkus-junit5 - test - - - - - - - io.quarkus - quarkus-maven-plugin - - - - build - generate-code - generate-code-tests - - - - - - maven-failsafe-plugin - - - - integration-test - verify - - - - - - ${project.build.directory}/${project.build.finalName}-runner - - org.jboss.logmanager.LogManager - ${maven.home} - - - - - - - - - native-image - - - native - - - - - - maven-surefire-plugin - - ${native.surefire.skip} - - - - - - false - true - - - - diff --git a/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/DummyVersionProvider.java b/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/DummyVersionProvider.java deleted file mode 100644 index 5db49c0c..00000000 --- a/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/DummyVersionProvider.java +++ /dev/null @@ -1,20 +0,0 @@ -package com.usatiuk.objects.alloc.it; - -import com.usatiuk.objects.common.runtime.JDataAllocVersionProvider; -import jakarta.enterprise.context.ApplicationScoped; - -@ApplicationScoped -public class DummyVersionProvider implements JDataAllocVersionProvider { - - long version = 0; - - @Override - public long getVersion() { - return version; - } - - public void setVersion(long version) { - this.version = version; - } - -} diff --git a/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/TestJDataAssorted.java b/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/TestJDataAssorted.java deleted file mode 100644 index 429f5370..00000000 --- a/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/TestJDataAssorted.java +++ /dev/null @@ -1,18 +0,0 @@ -package com.usatiuk.objects.alloc.it; - -import com.usatiuk.objects.common.runtime.JData; -import com.usatiuk.objects.common.runtime.JObjectKey; - -public interface TestJDataAssorted extends JData { - String getLastName(); - - void setLastName(String lastName); - - long getAge(); - - void setAge(long age); - - JObjectKey getKidKey(); - - void setKidKey(JObjectKey kid); -} diff --git a/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/TestJDataEmpty.java b/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/TestJDataEmpty.java deleted file mode 100644 index cfac67f3..00000000 --- a/dhfs-parent/objects-alloc/integration-tests/src/main/java/com/usatiuk/objects/alloc/it/TestJDataEmpty.java +++ /dev/null @@ -1,6 +0,0 @@ -package com.usatiuk.objects.alloc.it; - -import com.usatiuk.objects.common.runtime.JData; - -public interface TestJDataEmpty extends JData { -} diff --git a/dhfs-parent/objects-alloc/integration-tests/src/main/resources/application.properties b/dhfs-parent/objects-alloc/integration-tests/src/main/resources/application.properties deleted file mode 100644 index b1645fe9..00000000 --- a/dhfs-parent/objects-alloc/integration-tests/src/main/resources/application.properties +++ /dev/null @@ -1 +0,0 @@ -quarkus.package.jar.decompiler.enabled=true \ No newline at end of file diff --git a/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectAllocIT.java b/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectAllocIT.java deleted file mode 100644 index 7c70671f..00000000 --- a/dhfs-parent/objects-alloc/integration-tests/src/test/java/com/usatiuk/objects/alloc/it/ObjectAllocIT.java +++ /dev/null @@ -1,96 +0,0 @@ -package com.usatiuk.objects.alloc.it; - -import com.usatiuk.objects.alloc.runtime.ObjectAllocator; -import com.usatiuk.objects.common.runtime.JObjectKey; -import io.quarkus.test.junit.QuarkusTest; -import jakarta.inject.Inject; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -@QuarkusTest -public class ObjectAllocIT { - @Inject - ObjectAllocator objectAllocator; - - @Inject - DummyVersionProvider dummyVersionProvider; - - @Test - void testCreateVersion() { - dummyVersionProvider.setVersion(1); - var newObject = objectAllocator.create(TestJDataEmpty.class, new JObjectKey("TestJDataEmptyKey")); - Assertions.assertNotNull(newObject); - Assertions.assertEquals("TestJDataEmptyKey", newObject.getKey().name()); - Assertions.assertEquals(1, newObject.getVersion()); - } - - @Test - void testCopyVersion() { - dummyVersionProvider.setVersion(1); - var newObject = objectAllocator.create(TestJDataAssorted.class, new JObjectKey("TestJDataAssorted")); - newObject.setLastName("1"); - Assertions.assertNotNull(newObject); - Assertions.assertEquals("TestJDataAssorted", newObject.getKey().name()); - Assertions.assertEquals(1, newObject.getVersion()); - - dummyVersionProvider.setVersion(2); - var copyObject = objectAllocator.copy(newObject); - Assertions.assertNotNull(copyObject); - Assertions.assertFalse(copyObject.isModified()); - Assertions.assertEquals("1", copyObject.wrapped().getLastName()); - Assertions.assertEquals(2, copyObject.wrapped().getVersion()); - Assertions.assertEquals(1, newObject.getVersion()); - copyObject.wrapped().setLastName("2"); - Assertions.assertTrue(copyObject.isModified()); - Assertions.assertEquals("2", copyObject.wrapped().getLastName()); - Assertions.assertEquals("1", newObject.getLastName()); - } - - @Test - void testCreateObject() { - var newObject = objectAllocator.create(TestJDataEmpty.class, new JObjectKey("TestJDataEmptyKey")); - Assertions.assertNotNull(newObject); - Assertions.assertEquals("TestJDataEmptyKey", newObject.getKey().name()); - } - - - @Test - void testCopyObject() { - var newObject = objectAllocator.create(TestJDataAssorted.class, new JObjectKey("TestJDataAssorted")); - newObject.setLastName("1"); - Assertions.assertNotNull(newObject); - Assertions.assertEquals("TestJDataAssorted", newObject.getKey().name()); - - var copyObject = objectAllocator.copy(newObject); - Assertions.assertNotNull(copyObject); - Assertions.assertFalse(copyObject.isModified()); - Assertions.assertEquals("1", copyObject.wrapped().getLastName()); - copyObject.wrapped().setLastName("2"); - Assertions.assertTrue(copyObject.isModified()); - Assertions.assertEquals("2", copyObject.wrapped().getLastName()); - Assertions.assertEquals("1", newObject.getLastName()); - } - - @Test - void testImmutable() { - var newObject = objectAllocator.create(TestJDataAssorted.class, new JObjectKey("TestJDataAssorted")); - newObject.setLastName("1"); - Assertions.assertNotNull(newObject); - Assertions.assertEquals("TestJDataAssorted", newObject.getKey().name()); - - var copyObject = objectAllocator.unmodifiable(newObject); - Assertions.assertNotNull(copyObject); - Assertions.assertEquals("1", copyObject.getLastName()); - Assertions.assertThrows(UnsupportedOperationException.class, () -> copyObject.setLastName("2")); - } - - @Test - void testImmutable2() { - var newObject = objectAllocator.create(TestJDataEmpty.class, new JObjectKey("TestJDataEmpty")); - Assertions.assertNotNull(newObject); - Assertions.assertEquals("TestJDataEmpty", newObject.getKey().name()); - - var copyObject = objectAllocator.unmodifiable(newObject); - Assertions.assertNotNull(copyObject); - } -} diff --git a/dhfs-parent/objects-alloc/pom.xml b/dhfs-parent/objects-alloc/pom.xml deleted file mode 100644 index a6dc399f..00000000 --- a/dhfs-parent/objects-alloc/pom.xml +++ /dev/null @@ -1,23 +0,0 @@ - - - 4.0.0 - - - com.usatiuk.dhfs - parent - 1.0-SNAPSHOT - - - com.usatiuk - objects-alloc-parent - 1.0-SNAPSHOT - pom - DHFS objects allocation - Parent - - - deployment - runtime - integration-tests - - - diff --git a/dhfs-parent/objects-alloc/runtime/pom.xml b/dhfs-parent/objects-alloc/runtime/pom.xml deleted file mode 100644 index 64dce149..00000000 --- a/dhfs-parent/objects-alloc/runtime/pom.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - 4.0.0 - - - com.usatiuk - objects-alloc-parent - 1.0-SNAPSHOT - - objects-alloc - DHFS objects allocation - Runtime - - - - io.quarkus - quarkus-arc - - - com.usatiuk - objects-common - 1.0-SNAPSHOT - - - - - - - io.quarkus - quarkus-extension-maven-plugin - ${quarkus.platform.version} - - - compile - - extension-descriptor - - - ${project.groupId}:${project.artifactId}-deployment:${project.version} - - - - - - - maven-compiler-plugin - - - default-compile - - - - io.quarkus - quarkus-extension-processor - ${quarkus.platform.version} - - - - - - - - - diff --git a/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ChangeTrackingJData.java b/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ChangeTrackingJData.java deleted file mode 100644 index 846ab9fa..00000000 --- a/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ChangeTrackingJData.java +++ /dev/null @@ -1,9 +0,0 @@ -package com.usatiuk.objects.alloc.runtime; - -import com.usatiuk.objects.common.runtime.JData; - -public interface ChangeTrackingJData { - T wrapped(); - - boolean isModified(); -} diff --git a/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ObjectAllocator.java b/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ObjectAllocator.java deleted file mode 100644 index 763f1051..00000000 --- a/dhfs-parent/objects-alloc/runtime/src/main/java/com/usatiuk/objects/alloc/runtime/ObjectAllocator.java +++ /dev/null @@ -1,12 +0,0 @@ -package com.usatiuk.objects.alloc.runtime; - -import com.usatiuk.objects.common.runtime.JData; -import com.usatiuk.objects.common.runtime.JObjectKey; - -public interface ObjectAllocator { - T create(Class type, JObjectKey key); - - ChangeTrackingJData copy(T obj); - - T unmodifiable(T obj); -} diff --git a/dhfs-parent/objects-alloc/runtime/src/main/resources/META-INF/quarkus-extension.yaml b/dhfs-parent/objects-alloc/runtime/src/main/resources/META-INF/quarkus-extension.yaml deleted file mode 100644 index 202ecef3..00000000 --- a/dhfs-parent/objects-alloc/runtime/src/main/resources/META-INF/quarkus-extension.yaml +++ /dev/null @@ -1,9 +0,0 @@ -name: DHFS objects allocation -#description: Do something useful. -metadata: -# keywords: -# - objects-alloc -# guide: ... # To create and publish this guide, see https://github.com/quarkiverse/quarkiverse/wiki#documenting-your-extension -# categories: -# - "miscellaneous" -# status: "preview" diff --git a/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JData.java b/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JData.java index f1ca4baf..76bd14f0 100644 --- a/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JData.java +++ b/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JData.java @@ -1,12 +1,12 @@ package com.usatiuk.objects.common.runtime; +import java.io.Serializable; + // TODO: This could be maybe moved to a separate module? // The base class for JObject data // Only one instance of this "exists" per key, the instance in the manager is canonical // When committing a transaction, the instance is checked against it, if it isn't the same, a race occurred. // It is immutable, its version is filled in by the allocator from the AllocVersionProvider -public interface JData { - JObjectKey getKey(); - - long getVersion(); +public interface JData extends Serializable { + JObjectKey key(); } diff --git a/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JDataAllocVersionProvider.java b/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JDataAllocVersionProvider.java deleted file mode 100644 index 27c2b110..00000000 --- a/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JDataAllocVersionProvider.java +++ /dev/null @@ -1,5 +0,0 @@ -package com.usatiuk.objects.common.runtime; - -public interface JDataAllocVersionProvider { - long getVersion(); -} diff --git a/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JObjectKey.java b/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JObjectKey.java index bce27fdc..1d66c9d6 100644 --- a/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JObjectKey.java +++ b/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JObjectKey.java @@ -3,4 +3,7 @@ package com.usatiuk.objects.common.runtime; import java.io.Serializable; public record JObjectKey(String name) implements Serializable { + public static JObjectKey of(String name) { + return new JObjectKey(name); + } } diff --git a/dhfs-parent/objects/pom.xml b/dhfs-parent/objects/pom.xml index 4d97539c..07629904 100644 --- a/dhfs-parent/objects/pom.xml +++ b/dhfs-parent/objects/pom.xml @@ -64,17 +64,6 @@ supportlib 1.0-SNAPSHOT - - com.usatiuk - objects-alloc - 1.0-SNAPSHOT - - - com.usatiuk - objects-alloc-deployment - 1.0-SNAPSHOT - provided - com.usatiuk objects-common diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 48b18ca9..520149df 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -5,7 +5,6 @@ import com.usatiuk.dhfs.objects.persistence.TxManifest; import com.usatiuk.dhfs.objects.transaction.*; import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; import com.usatiuk.dhfs.utils.DataLocker; -import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import com.usatiuk.objects.common.runtime.JData; import com.usatiuk.objects.common.runtime.JObjectKey; import io.quarkus.logging.Log; @@ -33,8 +32,6 @@ public class JObjectManager { @Inject ObjectSerializer objectSerializer; @Inject - ObjectAllocator objectAllocator; - @Inject TransactionFactory transactionFactory; private final List _preCommitTxHooks; @@ -52,7 +49,7 @@ public class JObjectManager { public JDataWrapper(JDataVersionedWrapper referent) { super(referent); - var key = referent.data().getKey(); + var key = referent.data().key(); CLEANER.register(referent, () -> { _objects.remove(key, this); }); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index 4384a229..edeab7ca 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -1,7 +1,6 @@ package com.usatiuk.dhfs.objects.transaction; import com.usatiuk.dhfs.objects.JDataVersionedWrapper; -import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import com.usatiuk.objects.common.runtime.JData; import com.usatiuk.objects.common.runtime.JObjectKey; import jakarta.enterprise.context.ApplicationScoped; @@ -16,9 +15,6 @@ import java.util.Optional; @ApplicationScoped public class TransactionFactoryImpl implements TransactionFactory { - @Inject - ObjectAllocator objectAllocator; - private class TransactionImpl implements TransactionPrivate { @Getter(AccessLevel.PUBLIC) private final long _id; @@ -68,8 +64,8 @@ public class TransactionFactoryImpl implements TransactionFactory { public void put(JData obj) { // get(JData.class, obj.getKey(), LockingStrategy.OPTIMISTIC); - _writes.put(obj.getKey(), new TxRecord.TxObjectRecordWrite<>(obj)); - _newWrites.put(obj.getKey(), new TxRecord.TxObjectRecordWrite<>(obj)); + _writes.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj)); + _newWrites.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj)); } @Override diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectAllocVersionProvider.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectAllocVersionProvider.java deleted file mode 100644 index 4bb6118e..00000000 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectAllocVersionProvider.java +++ /dev/null @@ -1,16 +0,0 @@ -package com.usatiuk.dhfs.objects.transaction; - -import com.usatiuk.objects.common.runtime.JDataAllocVersionProvider; -import jakarta.inject.Inject; -import jakarta.inject.Singleton; - -@Singleton -public class TransactionObjectAllocVersionProvider implements JDataAllocVersionProvider { - @Inject - Transaction transaction; - - public long getVersion() { - return transaction.getId(); - } - -} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java index 7600fc31..60a698cf 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java @@ -11,7 +11,7 @@ public class TxRecord { public record TxObjectRecordWrite(JData data) implements TxObjectRecord { @Override public JObjectKey key() { - return data.getKey(); + return data.key(); } } diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index d01d5cc5..60c66348 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -3,7 +3,6 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.data.Parent; import com.usatiuk.dhfs.objects.transaction.LockingStrategy; import com.usatiuk.dhfs.objects.transaction.Transaction; -import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import com.usatiuk.objects.common.runtime.JObjectKey; import io.quarkus.logging.Log; import io.quarkus.test.junit.QuarkusTest; @@ -25,23 +24,19 @@ public class ObjectsTest { @Inject Transaction curTx; - @Inject - ObjectAllocator alloc; - @Test void createObject() { { txm.begin(); - var newParent = alloc.create(Parent.class, new JObjectKey("Parent")); - newParent.setLastName("John"); + var newParent = new Parent(JObjectKey.of("Parent"), "John"); curTx.put(newParent); txm.commit(); } { txm.begin(); - var parent = curTx.get(Parent.class, new JObjectKey("Parent")).orElse(null); - Assertions.assertEquals("John", parent.getLastName()); + var parent = curTx.get(Parent.class, JObjectKey.of("Parent")).orElse(null); + Assertions.assertEquals("John", parent.name()); txm.commit(); } } @@ -50,28 +45,27 @@ public class ObjectsTest { void createDeleteObject() { { txm.begin(); - var newParent = alloc.create(Parent.class, new JObjectKey("Parent")); - newParent.setLastName("John"); + var newParent = new Parent(JObjectKey.of("Parent2"), "John"); curTx.put(newParent); txm.commit(); } { txm.begin(); - var parent = curTx.get(Parent.class, new JObjectKey("Parent")).orElse(null); - Assertions.assertEquals("John", parent.getLastName()); + var parent = curTx.get(Parent.class, JObjectKey.of("Parent2")).orElse(null); + Assertions.assertEquals("John", parent.name()); txm.commit(); } { txm.begin(); - curTx.delete(new JObjectKey("Parent")); + curTx.delete(new JObjectKey("Parent2")); txm.commit(); } { txm.begin(); - var parent = curTx.get(Parent.class, new JObjectKey("Parent")).orElse(null); + var parent = curTx.get(Parent.class, new JObjectKey("Parent2")).orElse(null); Assertions.assertNull(parent); txm.commit(); } @@ -81,22 +75,20 @@ public class ObjectsTest { void createCreateObject() { { txm.begin(); - var newParent = alloc.create(Parent.class, new JObjectKey("Parent7")); - newParent.setLastName("John"); + var newParent = new Parent(JObjectKey.of("Parent7"), "John"); curTx.put(newParent); txm.commit(); } { txm.begin(); - var newParent = alloc.create(Parent.class, new JObjectKey("Parent7")); - newParent.setLastName("John2"); + var newParent = new Parent(JObjectKey.of("Parent7"), "John2"); curTx.put(newParent); txm.commit(); } { txm.begin(); var parent = curTx.get(Parent.class, new JObjectKey("Parent7")).orElse(null); - Assertions.assertEquals("John2", parent.getLastName()); + Assertions.assertEquals("John2", parent.name()); txm.commit(); } } @@ -105,8 +97,7 @@ public class ObjectsTest { void editObject() { { txm.begin(); - var newParent = alloc.create(Parent.class, new JObjectKey("Parent3")); - newParent.setLastName("John"); + var newParent = new Parent(JObjectKey.of("Parent3"), "John"); curTx.put(newParent); txm.commit(); } @@ -114,23 +105,23 @@ public class ObjectsTest { { txm.begin(); var parent = curTx.get(Parent.class, new JObjectKey("Parent3"), LockingStrategy.OPTIMISTIC).orElse(null); - Assertions.assertEquals("John", parent.getLastName()); - parent.setLastName("John2"); + Assertions.assertEquals("John", parent.name()); + curTx.put(parent.toBuilder().name("John2").build()); txm.commit(); } { txm.begin(); var parent = curTx.get(Parent.class, new JObjectKey("Parent3"), LockingStrategy.WRITE).orElse(null); - Assertions.assertEquals("John2", parent.getLastName()); - parent.setLastName("John3"); + Assertions.assertEquals("John2", parent.name()); + curTx.put(parent.toBuilder().name("John3").build()); txm.commit(); } { txm.begin(); var parent = curTx.get(Parent.class, new JObjectKey("Parent3")).orElse(null); - Assertions.assertEquals("John3", parent.getLastName()); + Assertions.assertEquals("John3", parent.name()); txm.commit(); } } @@ -148,8 +139,7 @@ public class ObjectsTest { Log.warn("Thread 1"); txm.begin(); barrier.await(); - var newParent = alloc.create(Parent.class, new JObjectKey("Parent2")); - newParent.setLastName("John"); + var newParent = new Parent(JObjectKey.of("Parent2"), "John"); curTx.put(newParent); Log.warn("Thread 1 commit"); txm.commit(); @@ -164,8 +154,7 @@ public class ObjectsTest { Log.warn("Thread 2"); txm.begin(); barrier.await(); - var newParent = alloc.create(Parent.class, new JObjectKey("Parent2")); - newParent.setLastName("John2"); + var newParent = new Parent(JObjectKey.of("Parent2"), "John2"); curTx.put(newParent); Log.warn("Thread 2 commit"); txm.commit(); @@ -184,9 +173,9 @@ public class ObjectsTest { if (!thread1Failed.get()) { Assertions.assertTrue(thread2Failed.get()); - Assertions.assertEquals("John", got.getLastName()); + Assertions.assertEquals("John", got.name()); } else if (!thread2Failed.get()) { - Assertions.assertEquals("John2", got.getLastName()); + Assertions.assertEquals("John2", got.name()); } else { Assertions.fail("No thread succeeded"); } @@ -198,8 +187,7 @@ public class ObjectsTest { String key = "Parent4" + strategy.name(); { txm.begin(); - var newParent = alloc.create(Parent.class, new JObjectKey(key)); - newParent.setLastName("John3"); + var newParent = new Parent(JObjectKey.of(key), "John3"); curTx.put(newParent); txm.commit(); } @@ -216,7 +204,7 @@ public class ObjectsTest { txm.begin(); barrier.await(); var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null); - parent.setLastName("John"); + curTx.put(parent.toBuilder().name("John").build()); Log.warn("Thread 1 commit"); txm.commit(); thread1Failed.set(false); @@ -231,7 +219,7 @@ public class ObjectsTest { txm.begin(); barrier.await(); var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null); - parent.setLastName("John2"); + curTx.put(parent.toBuilder().name("John2").build()); Log.warn("Thread 2 commit"); txm.commit(); thread2Failed.set(false); @@ -249,9 +237,9 @@ public class ObjectsTest { if (!thread1Failed.get()) { Assertions.assertTrue(thread2Failed.get()); - Assertions.assertEquals("John", got.getLastName()); + Assertions.assertEquals("John", got.name()); } else if (!thread2Failed.get()) { - Assertions.assertEquals("John2", got.getLastName()); + Assertions.assertEquals("John2", got.name()); } else { Assertions.fail("No thread succeeded"); } diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java index 25e9a194..73de1ca4 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java @@ -2,7 +2,6 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.data.Parent; import com.usatiuk.dhfs.objects.transaction.Transaction; -import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import com.usatiuk.objects.common.runtime.JData; import com.usatiuk.objects.common.runtime.JObjectKey; import io.quarkus.test.junit.QuarkusTest; @@ -22,9 +21,6 @@ public class PreCommitTxHookTest { @Inject Transaction curTx; - @Inject - ObjectAllocator alloc; - @ApplicationScoped public static class DummyPreCommitTxHook implements PreCommitTxHook { } @@ -36,8 +32,8 @@ public class PreCommitTxHookTest { void createObject() { { txm.begin(); - var newParent = alloc.create(Parent.class, new JObjectKey("ParentCreate")); - newParent.setLastName("John"); + var newParent = new Parent(JObjectKey.of("ParentCreate"), "John"); + curTx.put(newParent); curTx.put(newParent); txm.commit(); } @@ -45,14 +41,14 @@ public class PreCommitTxHookTest { { txm.begin(); var parent = curTx.get(Parent.class, new JObjectKey("ParentCreate")).orElse(null); - Assertions.assertEquals("John", parent.getLastName()); + Assertions.assertEquals("John", parent.name()); txm.commit(); } ArgumentCaptor dataCaptor = ArgumentCaptor.forClass(JData.class); ArgumentCaptor keyCaptor = ArgumentCaptor.forClass(JObjectKey.class); Mockito.verify(spyHook, Mockito.times(1)).onCreate(keyCaptor.capture(), dataCaptor.capture()); - Assertions.assertEquals("John", ((Parent) dataCaptor.getValue()).getLastName()); + Assertions.assertEquals("John", ((Parent) dataCaptor.getValue()).name()); Assertions.assertEquals(new JObjectKey("ParentCreate"), keyCaptor.getValue()); } @@ -60,8 +56,7 @@ public class PreCommitTxHookTest { void deleteObject() { { txm.begin(); - var newParent = alloc.create(Parent.class, new JObjectKey("ParentDel")); - newParent.setLastName("John"); + var newParent = new Parent(JObjectKey.of("ParentDel"), "John"); curTx.put(newParent); txm.commit(); } @@ -69,7 +64,7 @@ public class PreCommitTxHookTest { { txm.begin(); var parent = curTx.get(Parent.class, new JObjectKey("ParentDel")).orElse(null); - Assertions.assertEquals("John", parent.getLastName()); + Assertions.assertEquals("John", parent.name()); txm.commit(); } @@ -82,7 +77,7 @@ public class PreCommitTxHookTest { ArgumentCaptor dataCaptor = ArgumentCaptor.forClass(JData.class); ArgumentCaptor keyCaptor = ArgumentCaptor.forClass(JObjectKey.class); Mockito.verify(spyHook, Mockito.times(1)).onDelete(keyCaptor.capture(), dataCaptor.capture()); - Assertions.assertEquals("John", ((Parent) dataCaptor.getValue()).getLastName()); + Assertions.assertEquals("John", ((Parent) dataCaptor.getValue()).name()); Assertions.assertEquals(new JObjectKey("ParentDel"), keyCaptor.getValue()); } @@ -90,16 +85,14 @@ public class PreCommitTxHookTest { void editObject() { { txm.begin(); - var newParent = alloc.create(Parent.class, new JObjectKey("ParentEdit")); - newParent.setLastName("John"); + var newParent = new Parent(JObjectKey.of("ParentEdit"), "John"); curTx.put(newParent); txm.commit(); } { txm.begin(); - var newParent = alloc.create(Parent.class, new JObjectKey("ParentEdit")); - newParent.setLastName("John changed"); + var newParent = new Parent(JObjectKey.of("ParentEdit"), "John changed"); curTx.put(newParent); txm.commit(); } @@ -108,8 +101,8 @@ public class PreCommitTxHookTest { ArgumentCaptor dataCaptorNew = ArgumentCaptor.forClass(JData.class); ArgumentCaptor keyCaptor = ArgumentCaptor.forClass(JObjectKey.class); Mockito.verify(spyHook, Mockito.times(1)).onChange(keyCaptor.capture(), dataCaptorOld.capture(), dataCaptorNew.capture()); - Assertions.assertEquals("John", ((Parent) dataCaptorOld.getValue()).getLastName()); - Assertions.assertEquals("John changed", ((Parent) dataCaptorNew.getValue()).getLastName()); + Assertions.assertEquals("John", ((Parent) dataCaptorOld.getValue()).name()); + Assertions.assertEquals("John changed", ((Parent) dataCaptorNew.getValue()).name()); Assertions.assertEquals(new JObjectKey("ParentEdit"), keyCaptor.getValue()); } @@ -117,8 +110,7 @@ public class PreCommitTxHookTest { void editObjectWithGet() { { txm.begin(); - var newParent = alloc.create(Parent.class, new JObjectKey("ParentEdit2")); - newParent.setLastName("John"); + var newParent = new Parent(JObjectKey.of("ParentEdit2"), "John"); curTx.put(newParent); txm.commit(); } @@ -126,10 +118,8 @@ public class PreCommitTxHookTest { { txm.begin(); var parent = curTx.get(Parent.class, new JObjectKey("ParentEdit2")).orElse(null); - Assertions.assertEquals("John", parent.getLastName()); - var newParent = alloc.create(Parent.class, new JObjectKey("ParentEdit2")); - newParent.setLastName("John changed"); - curTx.put(newParent); + Assertions.assertEquals("John", parent.name()); + curTx.put(parent.toBuilder().name("John changed").build()); txm.commit(); } @@ -137,8 +127,8 @@ public class PreCommitTxHookTest { ArgumentCaptor dataCaptorNew = ArgumentCaptor.forClass(JData.class); ArgumentCaptor keyCaptor = ArgumentCaptor.forClass(JObjectKey.class); Mockito.verify(spyHook, Mockito.times(1)).onChange(keyCaptor.capture(), dataCaptorOld.capture(), dataCaptorNew.capture()); - Assertions.assertEquals("John", ((Parent) dataCaptorOld.getValue()).getLastName()); - Assertions.assertEquals("John changed", ((Parent) dataCaptorNew.getValue()).getLastName()); + Assertions.assertEquals("John", ((Parent) dataCaptorOld.getValue()).name()); + Assertions.assertEquals("John changed", ((Parent) dataCaptorNew.getValue()).name()); Assertions.assertEquals(new JObjectKey("ParentEdit2"), keyCaptor.getValue()); } diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java index a3665e4a..99ab5fc3 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java @@ -1,9 +1,9 @@ package com.usatiuk.dhfs.objects.data; import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; +import lombok.Builder; -public interface Kid extends JData { - String getName(); - - void setName(String name); -} +@Builder(toBuilder = true) +public record Kid(JObjectKey key, String name) implements JData { +} \ No newline at end of file diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java index 424fdf6f..8955e3d7 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java @@ -2,13 +2,8 @@ package com.usatiuk.dhfs.objects.data; import com.usatiuk.objects.common.runtime.JData; import com.usatiuk.objects.common.runtime.JObjectKey; +import lombok.Builder; -public interface Parent extends JData { - String getLastName(); - - void setLastName(String lastName); - - JObjectKey getKidKey(); - - void setKidKey(JObjectKey kid); -} +@Builder(toBuilder = true) +public record Parent(JObjectKey key, String name) implements JData { +} \ No newline at end of file diff --git a/dhfs-parent/pom.xml b/dhfs-parent/pom.xml index a54dda6e..447727d4 100644 --- a/dhfs-parent/pom.xml +++ b/dhfs-parent/pom.xml @@ -17,7 +17,6 @@ autoprotomap objects utils - objects-alloc objects-common From 5e02ecbf8aed6e8292f1920237b1978f62bb05db Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Wed, 1 Jan 2025 16:48:59 +0100 Subject: [PATCH 027/105] seemingly working fs with record classes --- dhfs-parent/kleppmanntree/pom.xml | 4 + .../usatiuk/kleppmanntree/AtomicClock.java | 5 - .../usatiuk/kleppmanntree/KleppmannTree.java | 452 +++++++----------- .../kleppmanntree/StorageInterface.java | 19 +- .../com/usatiuk/kleppmanntree/TreeNode.java | 38 +- .../kleppmanntree/TreeNodeWrapper.java | 21 - .../kleppmanntree/KleppmanTreeSimpleTest.java | 16 +- .../com/usatiuk/kleppmanntree/TestNode.java | 2 +- .../kleppmanntree/TestNodeWrapper.java | 52 -- .../kleppmanntree/TestStorageInterface.java | 52 +- .../usatiuk/kleppmanntree/TestTreeNode.java | 37 ++ .../usatiuk/dhfs/objects/JObjectManager.java | 24 +- .../transaction/ReadTrackingObjectSource.java | 4 +- .../transaction/TransactionFactoryImpl.java | 16 +- .../com/usatiuk/dhfs/objects/ObjectsTest.java | 21 + .../src/test/resources/application.properties | 2 + .../usatiuk/dhfs/files/objects/ChunkData.java | 27 +- .../com/usatiuk/dhfs/files/objects/File.java | 30 +- .../usatiuk/dhfs/files/objects/FsNode.java | 17 +- .../files/service/DhfsFileServiceImpl.java | 102 ++-- .../usatiuk/dhfs/objects/DeleterTxHook.java | 6 +- .../usatiuk/dhfs/objects/JDataRefcounted.java | 8 +- .../dhfs/objects/RefcounterTxHook.java | 12 +- .../jkleppmanntree/JKleppmannTreeManager.java | 130 +++-- .../JKleppmannTreeNodeWrapper.java | 61 --- .../structs/JKleppmannTreeNode.java | 53 +- .../structs/JKleppmannTreePersistentData.java | 54 ++- .../benchmarks/DhfsFileBenchmarkTest.java | 3 +- .../files/DhfsFileServiceSimpleTestImpl.java | 141 +++--- .../FileObjectPersistentStoreTest.java | 95 ---- .../persistence/ProtoSerializationTest.java | 24 - 31 files changed, 604 insertions(+), 924 deletions(-) delete mode 100644 dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNodeWrapper.java delete mode 100644 dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeWrapper.java create mode 100644 dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestTreeNode.java delete mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java delete mode 100644 dhfs-parent/server/src/test/java/com/usatiuk/dhfs/persistence/FileObjectPersistentStoreTest.java delete mode 100644 dhfs-parent/server/src/test/java/com/usatiuk/dhfs/persistence/ProtoSerializationTest.java diff --git a/dhfs-parent/kleppmanntree/pom.xml b/dhfs-parent/kleppmanntree/pom.xml index e3d133cd..be348e62 100644 --- a/dhfs-parent/kleppmanntree/pom.xml +++ b/dhfs-parent/kleppmanntree/pom.xml @@ -23,6 +23,10 @@ junit-jupiter-engine test + + org.apache.commons + commons-collections4 + org.apache.commons commons-lang3 diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/AtomicClock.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/AtomicClock.java index 32e9b89e..f524473a 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/AtomicClock.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/AtomicClock.java @@ -18,11 +18,6 @@ public class AtomicClock implements Clock, Serializable { _max = timestamp; } - // FIXME: - public void ungetTimestamp() { - --_max; - } - @Override public Long peekTimestamp() { return _max; diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java index eef681ab..2e2cc27c 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java @@ -8,15 +8,16 @@ import java.util.function.Function; import java.util.logging.Level; import java.util.logging.Logger; -public class KleppmannTree, PeerIdT extends Comparable, MetaT extends NodeMeta, NodeIdT, WrapperT extends TreeNodeWrapper> { +public class KleppmannTree, PeerIdT extends Comparable, MetaT extends NodeMeta, NodeIdT> { private static final Logger LOGGER = Logger.getLogger(KleppmannTree.class.getName()); - private final StorageInterface _storage; + + private final StorageInterface _storage; private final PeerInterface _peers; private final Clock _clock; private final OpRecorder _opRecorder; - private HashMap _undoCtx = null; + private HashMap> _undoCtx = null; - public KleppmannTree(StorageInterface storage, + public KleppmannTree(StorageInterface storage, PeerInterface peers, Clock clock, OpRecorder opRecorder) { @@ -30,13 +31,8 @@ public class KleppmannTree, PeerIdT ex if (names.isEmpty()) return fromId; var from = _storage.getById(fromId); - from.rLock(); NodeIdT childId; - try { - childId = from.getNode().getChildren().get(names.getFirst()); - } finally { - from.rUnlock(); - } + childId = from.children().get(names.getFirst()); if (childId == null) return null; @@ -45,63 +41,52 @@ public class KleppmannTree, PeerIdT ex } public NodeIdT traverse(NodeIdT fromId, List names) { - _storage.rLock(); - try { - return traverseImpl(fromId, names.subList(1, names.size())); - } finally { - _storage.rUnlock(); - } + return traverseImpl(fromId, names.subList(1, names.size())); } public NodeIdT traverse(List names) { - _storage.rLock(); - try { - return traverseImpl(_storage.getRootId(), names); - } finally { - _storage.rUnlock(); - } + return traverseImpl(_storage.getRootId(), names); } private void undoEffect(LogEffect effect) { - _storage.assertRwLock(); if (effect.oldInfo() != null) { var node = _storage.getById(effect.childId()); var oldParent = _storage.getById(effect.oldInfo().oldParent()); var curParent = _storage.getById(effect.newParentId()); - curParent.rwLock(); - oldParent.rwLock(); - node.rwLock(); - try { - curParent.getNode().getChildren().remove(node.getNode().getMeta().getName()); - if (!node.getNode().getMeta().getClass().equals(effect.oldInfo().oldMeta().getClass())) - throw new IllegalArgumentException("Class mismatch for meta for node " + node.getNode().getId()); - node.getNode().setMeta(effect.oldInfo().oldMeta()); - node.getNode().setParent(oldParent.getNode().getId()); - oldParent.getNode().getChildren().put(node.getNode().getMeta().getName(), node.getNode().getId()); - node.notifyRmRef(curParent.getNode().getId()); - node.notifyRef(oldParent.getNode().getId()); - node.getNode().setLastEffectiveOp(effect.oldInfo().oldEffectiveMove()); - } finally { - node.rwUnlock(); - oldParent.rwUnlock(); - curParent.rwUnlock(); + { + var newCurParentChildren = new HashMap<>(curParent.children()); + newCurParentChildren.remove(node.meta().getName()); + curParent = curParent.withChildren(newCurParentChildren); + _storage.putNode(curParent); } + + if (!node.meta().getClass().equals(effect.oldInfo().oldMeta().getClass())) + throw new IllegalArgumentException("Class mismatch for meta for node " + node.key()); + { + var newOldParentChildren = new HashMap<>(oldParent.children()); + newOldParentChildren.put(node.meta().getName(), node.key()); + oldParent = oldParent.withChildren(newOldParentChildren); + _storage.putNode(oldParent); + } + _storage.putNode( + node.withMeta(effect.oldInfo().oldMeta()) + .withParent(effect.oldInfo().oldParent()) + .withLastEffectiveOp(effect.oldInfo().oldEffectiveMove()) + ); } else { var node = _storage.getById(effect.childId()); var curParent = _storage.getById(effect.newParentId()); - curParent.rwLock(); - node.rwLock(); - try { - curParent.getNode().getChildren().remove(node.getNode().getMeta().getName()); - node.freeze(); - node.getNode().setParent(null); - node.getNode().setLastEffectiveOp(null); - node.notifyRmRef(curParent.getNode().getId()); - _undoCtx.put(node.getNode().getId(), node); - } finally { - node.rwUnlock(); - curParent.rwUnlock(); + { + var newCurParentChildren = new HashMap<>(curParent.children()); + newCurParentChildren.remove(node.meta().getName()); + curParent = curParent.withChildren(newCurParentChildren); + _storage.putNode(curParent); } + _storage.putNode( + node.withParent(null) + .withLastEffectiveOp(null) + ); + _undoCtx.put(node.key(), node); } } @@ -116,7 +101,6 @@ public class KleppmannTree, PeerIdT ex } private void doAndPut(OpMove op, boolean failCreatingIfExists) { - _storage.assertRwLock(); var res = doOp(op, failCreatingIfExists); _storage.getLog().put(res.op().timestamp(), res); } @@ -160,22 +144,16 @@ public class KleppmannTree, PeerIdT ex } if (!inTrash.isEmpty()) { var trash = _storage.getById(_storage.getTrashId()); - trash.rwLock(); - try { - for (var n : inTrash) { - var node = _storage.getById(n); - node.rwLock(); - try { - if (trash.getNode().getChildren().remove(n.toString()) == null) - LOGGER.severe("Node " + node.getNode().getId() + " not found in trash but should be there"); - node.notifyRmRef(trash.getNode().getId()); - } finally { - node.rwUnlock(); - } - _storage.removeNode(n); + for (var n : inTrash) { + var node = _storage.getById(n); + { + var newTrashChildren = new HashMap<>(trash.children()); + if (newTrashChildren.remove(n.toString()) == null) + LOGGER.severe("Node " + node.key() + " not found in trash but should be there"); + trash = trash.withChildren(newTrashChildren); + _storage.putNode(trash); } - } finally { - trash.rwUnlock(); + _storage.removeNode(n); } } } else { @@ -188,29 +166,18 @@ public class KleppmannTree, PeerIdT ex } public void move(NodeIdT newParent, MetaT newMeta, NodeIdT child, boolean failCreatingIfExists) { - _storage.rwLock(); - try { - var createdMove = createMove(newParent, newMeta, child); - _opRecorder.recordOp(createdMove); - applyOp(_peers.getSelfId(), createdMove, failCreatingIfExists); - } finally { - _storage.rwUnlock(); - } + var createdMove = createMove(newParent, newMeta, child); + _opRecorder.recordOp(createdMove); + applyOp(_peers.getSelfId(), createdMove, failCreatingIfExists); } public void applyExternalOp(PeerIdT from, OpMove op) { - _storage.rwLock(); - try { - _clock.updateTimestamp(op.timestamp().timestamp()); - applyOp(from, op, false); - } finally { - _storage.rwUnlock(); - } + _clock.updateTimestamp(op.timestamp().timestamp()); + applyOp(from, op, false); } // Returns true if the timestamp is newer than what's seen, false otherwise private boolean updateTimestampImpl(PeerIdT from, TimestampT newTimestamp) { - _storage.assertRwLock(); TimestampT oldRef = _storage.getPeerTimestampLog().getForPeer(from); if (oldRef != null && oldRef.compareTo(newTimestamp) > 0) { // FIXME? LOGGER.warning("Wrong op order: received older than known from " + from.toString()); @@ -221,31 +188,18 @@ public class KleppmannTree, PeerIdT ex } public boolean updateExternalTimestamp(PeerIdT from, TimestampT timestamp) { - _storage.rLock(); - try { - // TODO: Ideally no point in this separate locking? - var gotExt = _storage.getPeerTimestampLog().getForPeer(from); - var gotSelf = _storage.getPeerTimestampLog().getForPeer(_peers.getSelfId()); - if ((gotExt != null && gotExt.compareTo(timestamp) >= 0) - && (gotSelf != null && gotSelf.compareTo(_clock.peekTimestamp()) >= 0)) return false; - } finally { - _storage.rUnlock(); - } - _storage.rwLock(); - try { - updateTimestampImpl(_peers.getSelfId(), _clock.peekTimestamp()); // FIXME:? Kind of a hack? - updateTimestampImpl(from, timestamp); - tryTrimLog(); - } finally { - _storage.rwUnlock(); - } - + // TODO: Ideally no point in this separate locking? + var gotExt = _storage.getPeerTimestampLog().getForPeer(from); + var gotSelf = _storage.getPeerTimestampLog().getForPeer(_peers.getSelfId()); + if ((gotExt != null && gotExt.compareTo(timestamp) >= 0) + && (gotSelf != null && gotSelf.compareTo(_clock.peekTimestamp()) >= 0)) return false; + updateTimestampImpl(_peers.getSelfId(), _clock.peekTimestamp()); // FIXME:? Kind of a hack? + updateTimestampImpl(from, timestamp); + tryTrimLog(); return true; } private void applyOp(PeerIdT from, OpMove op, boolean failCreatingIfExists) { - _storage.assertRwLock(); - if (!updateTimestampImpl(from, op.timestamp().timestamp())) return; var log = _storage.getLog(); @@ -276,7 +230,6 @@ public class KleppmannTree, PeerIdT ex if (!_undoCtx.isEmpty()) { for (var e : _undoCtx.entrySet()) { LOGGER.log(Level.FINE, "Dropping node " + e.getKey()); - e.getValue().unfreeze(); _storage.removeNode(e.getKey()); } } @@ -292,12 +245,10 @@ public class KleppmannTree, PeerIdT ex } private CombinedTimestamp getTimestamp() { - _storage.assertRwLock(); return new CombinedTimestamp<>(_clock.getTimestamp(), _peers.getSelfId()); } private OpMove createMove(NodeIdT newParent, LocalMetaT newMeta, NodeIdT node) { - _storage.assertRwLock(); return new OpMove<>(getTimestamp(), newParent, newMeta, node); } @@ -317,91 +268,73 @@ public class KleppmannTree, PeerIdT ex return computed; } - private WrapperT getNewNode(TreeNode desired) { - _storage.assertRwLock(); + private TreeNode getNewNode(NodeIdT key, NodeIdT parent, MetaT meta) { if (_undoCtx != null) { - var node = _undoCtx.get(desired.getId()); + var node = _undoCtx.get(key); if (node != null) { - node.rwLock(); try { - if (!node.getNode().getChildren().isEmpty()) { - LOGGER.log(Level.WARNING, "Not empty children for undone node " + desired.getId()); + if (!node.children().isEmpty()) { + LOGGER.log(Level.WARNING, "Not empty children for undone node " + key); } - node.getNode().setParent(desired.getParent()); - node.notifyRef(desired.getParent()); - node.getNode().setMeta(desired.getMeta()); - node.unfreeze(); + node = node.withParent(parent).withMeta(meta); } catch (Exception e) { - LOGGER.log(Level.SEVERE, "Error while fixing up node " + desired.getId(), e); - node.rwUnlock(); + LOGGER.log(Level.SEVERE, "Error while fixing up node " + key, e); node = null; } } if (node != null) { - _undoCtx.remove(desired.getId()); + _undoCtx.remove(key); return node; } } - return _storage.createNewNode(desired); + return _storage.createNewNode(key, parent, meta); } private void applyEffects(OpMove sourceOp, List> effects) { - _storage.assertRwLock(); for (var effect : effects) { - WrapperT oldParentNode = null; - WrapperT newParentNode; - WrapperT node; + TreeNode oldParentNode = null; + TreeNode newParentNode; + TreeNode node; newParentNode = _storage.getById(effect.newParentId()); - newParentNode.rwLock(); - try { - if (effect.oldInfo() != null) { - oldParentNode = _storage.getById(effect.oldInfo().oldParent()); - oldParentNode.rwLock(); - } - try { - if (oldParentNode == null) { - node = getNewNode(new TreeNode<>(effect.childId(), effect.newParentId(), effect.newMeta())); - } else { - node = _storage.getById(effect.childId()); - node.rwLock(); - } - try { - - if (oldParentNode != null) { - oldParentNode.getNode().getChildren().remove(effect.oldInfo().oldMeta().getName()); - node.notifyRmRef(effect.oldInfo().oldParent()); - } - - newParentNode.getNode().getChildren().put(effect.newMeta().getName(), effect.childId()); - if (effect.newParentId().equals(_storage.getTrashId()) && - !Objects.equals(effect.newMeta().getName(), effect.childId().toString())) - throw new IllegalArgumentException("Move to trash should have id of node as name"); - node.getNode().setParent(effect.newParentId()); - node.getNode().setMeta(effect.newMeta()); - node.getNode().setLastEffectiveOp(effect.effectiveOp()); - node.notifyRef(effect.newParentId()); - - } finally { - node.rwUnlock(); - } - } finally { - if (oldParentNode != null) - oldParentNode.rwUnlock(); - } - } finally { - newParentNode.rwUnlock(); + if (effect.oldInfo() != null) { + oldParentNode = _storage.getById(effect.oldInfo().oldParent()); } + if (oldParentNode == null) { + node = getNewNode(effect.childId(), effect.newParentId(), effect.newMeta()); + } else { + node = _storage.getById(effect.childId()); + } + if (oldParentNode != null) { + var newOldParentChildren = new HashMap<>(oldParentNode.children()); + newOldParentChildren.remove(effect.oldInfo().oldMeta().getName()); + oldParentNode = oldParentNode.withChildren(newOldParentChildren); + _storage.putNode(oldParentNode); + } + + { + var newNewParentChildren = new HashMap<>(newParentNode.children()); + newNewParentChildren.put(effect.newMeta().getName(), effect.childId()); + newParentNode = newParentNode.withChildren(newNewParentChildren); + _storage.putNode(newParentNode); + } + if (effect.newParentId().equals(_storage.getTrashId()) && + !Objects.equals(effect.newMeta().getName(), effect.childId().toString())) + throw new IllegalArgumentException("Move to trash should have id of node as name"); + _storage.putNode( + node.withParent(effect.newParentId()) + .withMeta(effect.newMeta()) + .withLastEffectiveOp(sourceOp) + ); } } private LogRecord computeEffects(OpMove op, boolean failCreatingIfExists) { - _storage.assertRwLock(); var node = _storage.getById(op.childId()); - NodeIdT oldParentId = (node != null && node.getNode().getParent() != null) ? node.getNode().getParent() : null; + NodeIdT oldParentId = (node != null && node.parent() != null) ? node.parent() : null; NodeIdT newParentId = op.newParentId(); - WrapperT newParent = _storage.getById(newParentId); + TreeNode newParent = _storage.getById(newParentId); if (newParent == null) { LOGGER.log(Level.SEVERE, "New parent not found " + op.newMeta().getName() + " " + op.childId()); @@ -409,34 +342,24 @@ public class KleppmannTree, PeerIdT ex } if (oldParentId == null) { - newParent.rLock(); - try { - var conflictNodeId = newParent.getNode().getChildren().get(op.newMeta().getName()); + var conflictNodeId = newParent.children().get(op.newMeta().getName()); - if (conflictNodeId != null) { - if (failCreatingIfExists) - throw new AlreadyExistsException("Already exists: " + op.newMeta().getName() + ": " + conflictNodeId); + if (conflictNodeId != null) { + if (failCreatingIfExists) + throw new AlreadyExistsException("Already exists: " + op.newMeta().getName() + ": " + conflictNodeId); - var conflictNode = _storage.getById(conflictNodeId); - conflictNode.rLock(); - try { - MetaT conflictNodeMeta = conflictNode.getNode().getMeta(); - String newConflictNodeName = conflictNodeMeta.getName() + ".conflict." + conflictNode.getNode().getId(); - String newOursName = op.newMeta().getName() + ".conflict." + op.childId(); - return new LogRecord<>(op, List.of( - new LogEffect<>(new LogEffectOld<>(conflictNode.getNode().getLastEffectiveOp(), newParentId, conflictNodeMeta), conflictNode.getNode().getLastEffectiveOp(), newParentId, (MetaT) conflictNodeMeta.withName(newConflictNodeName), conflictNodeId), - new LogEffect<>(null, op, op.newParentId(), (MetaT) op.newMeta().withName(newOursName), op.childId()) - )); - } finally { - conflictNode.rUnlock(); - } - } else { - return new LogRecord<>(op, List.of( - new LogEffect<>(null, op, newParentId, op.newMeta(), op.childId()) - )); - } - } finally { - newParent.rUnlock(); + var conflictNode = _storage.getById(conflictNodeId); + MetaT conflictNodeMeta = conflictNode.meta(); + String newConflictNodeName = conflictNodeMeta.getName() + ".conflict." + conflictNode.key(); + String newOursName = op.newMeta().getName() + ".conflict." + op.childId(); + return new LogRecord<>(op, List.of( + new LogEffect<>(new LogEffectOld<>(conflictNode.lastEffectiveOp(), newParentId, conflictNodeMeta), conflictNode.lastEffectiveOp(), newParentId, (MetaT) conflictNodeMeta.withName(newConflictNodeName), conflictNodeId), + new LogEffect<>(null, op, op.newParentId(), (MetaT) op.newMeta().withName(newOursName), op.childId()) + )); + } else { + return new LogRecord<>(op, List.of( + new LogEffect<>(null, op, newParentId, op.newMeta(), op.childId()) + )); } } @@ -444,96 +367,64 @@ public class KleppmannTree, PeerIdT ex return new LogRecord<>(op, null); } - node.rLock(); - newParent.rLock(); - try { - MetaT oldMeta = node.getNode().getMeta(); - if (!oldMeta.getClass().equals(op.newMeta().getClass())) { - LOGGER.log(Level.SEVERE, "Class mismatch for meta for node " + node.getNode().getId()); - return new LogRecord<>(op, null); - } - var replaceNodeId = newParent.getNode().getChildren().get(op.newMeta().getName()); - if (replaceNodeId != null) { - var replaceNode = _storage.getById(replaceNodeId); - try { - replaceNode.rLock(); - var replaceNodeMeta = replaceNode.getNode().getMeta(); - return new LogRecord<>(op, List.of( - new LogEffect<>(new LogEffectOld<>(replaceNode.getNode().getLastEffectiveOp(), newParentId, replaceNodeMeta), replaceNode.getNode().getLastEffectiveOp(), _storage.getTrashId(), (MetaT) replaceNodeMeta.withName(replaceNodeId.toString()), replaceNodeId), - new LogEffect<>(new LogEffectOld<>(node.getNode().getLastEffectiveOp(), oldParentId, oldMeta), op, op.newParentId(), op.newMeta(), op.childId()) - )); - } finally { - replaceNode.rUnlock(); - } - } - return new LogRecord<>(op, List.of( - new LogEffect<>(new LogEffectOld<>(node.getNode().getLastEffectiveOp(), oldParentId, oldMeta), op, op.newParentId(), op.newMeta(), op.childId()) - )); - } finally { - newParent.rUnlock(); - node.rUnlock(); + MetaT oldMeta = node.meta(); + if (!oldMeta.getClass().equals(op.newMeta().getClass())) { + LOGGER.log(Level.SEVERE, "Class mismatch for meta for node " + node.key()); + return new LogRecord<>(op, null); } + var replaceNodeId = newParent.children().get(op.newMeta().getName()); + if (replaceNodeId != null) { + var replaceNode = _storage.getById(replaceNodeId); + var replaceNodeMeta = replaceNode.meta(); + return new LogRecord<>(op, List.of( + new LogEffect<>(new LogEffectOld<>(replaceNode.lastEffectiveOp(), newParentId, replaceNodeMeta), replaceNode.lastEffectiveOp(), _storage.getTrashId(), (MetaT) replaceNodeMeta.withName(replaceNodeId.toString()), replaceNodeId), + new LogEffect<>(new LogEffectOld<>(node.lastEffectiveOp(), oldParentId, oldMeta), op, op.newParentId(), op.newMeta(), op.childId()) + )); + } + return new LogRecord<>(op, List.of( + new LogEffect<>(new LogEffectOld<>(node.lastEffectiveOp(), oldParentId, oldMeta), op, op.newParentId(), op.newMeta(), op.childId()) + )); } private boolean isAncestor(NodeIdT child, NodeIdT parent) { var node = _storage.getById(parent); NodeIdT curParent; - while ((curParent = node.getNode().getParent()) != null) { + while ((curParent = node.parent()) != null) { if (Objects.equals(child, curParent)) return true; node = _storage.getById(curParent); } return false; } - public void walkTree(Consumer consumer) { - _storage.rLock(); - try { - ArrayDeque queue = new ArrayDeque<>(); - queue.push(_storage.getRootId()); + public void walkTree(Consumer> consumer) { + ArrayDeque queue = new ArrayDeque<>(); + queue.push(_storage.getRootId()); - while (!queue.isEmpty()) { - var id = queue.pop(); - var node = _storage.getById(id); - if (node == null) continue; - node.rLock(); - try { - queue.addAll(node.getNode().getChildren().values()); - consumer.accept(node); - } finally { - node.rUnlock(); - } - } - } finally { - _storage.rUnlock(); + while (!queue.isEmpty()) { + var id = queue.pop(); + var node = _storage.getById(id); + if (node == null) continue; + queue.addAll(node.children().values()); + consumer.accept(node); } } - public Pair findParent(Function kidPredicate) { - _storage.rLock(); - try { - ArrayDeque queue = new ArrayDeque<>(); - queue.push(_storage.getRootId()); + public Pair findParent(Function, Boolean> kidPredicate) { + ArrayDeque queue = new ArrayDeque<>(); + queue.push(_storage.getRootId()); - while (!queue.isEmpty()) { - var id = queue.pop(); - var node = _storage.getById(id); - if (node == null) continue; - node.rLock(); - try { - var children = node.getNode().getChildren(); - for (var childEntry : children.entrySet()) { - var child = _storage.getById(childEntry.getValue()); - if (kidPredicate.apply(child)) { - return Pair.of(childEntry.getKey(), node.getNode().getId()); - } - } - queue.addAll(children.values()); - } finally { - node.rUnlock(); + while (!queue.isEmpty()) { + var id = queue.pop(); + var node = _storage.getById(id); + if (node == null) continue; + var children = node.children(); + for (var childEntry : children.entrySet()) { + var child = _storage.getById(childEntry.getValue()); + if (kidPredicate.apply(child)) { + return Pair.of(childEntry.getKey(), node.key()); } } - } finally { - _storage.rUnlock(); + queue.addAll(children.values()); } return null; } @@ -541,27 +432,22 @@ public class KleppmannTree, PeerIdT ex public void recordBoostrapFor(PeerIdT host) { TreeMap, OpMove> result = new TreeMap<>(); - _storage.rwLock(); - try { - walkTree(node -> { - var op = node.getNode().getLastEffectiveOp(); - if (node.getNode().getLastEffectiveOp() == null) return; - LOGGER.info("visited bootstrap op for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId()); - result.put(node.getNode().getLastEffectiveOp().timestamp(), node.getNode().getLastEffectiveOp()); - }); + walkTree(node -> { + var op = node.lastEffectiveOp(); + if (node.lastEffectiveOp() == null) return; + LOGGER.info("visited bootstrap op for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId()); + result.put(node.lastEffectiveOp().timestamp(), node.lastEffectiveOp()); + }); - for (var le : _storage.getLog().getAll()) { - var op = le.getValue().op(); - LOGGER.info("bootstrap op from log for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId()); - result.put(le.getKey(), le.getValue().op()); - } + for (var le : _storage.getLog().getAll()) { + var op = le.getValue().op(); + LOGGER.info("bootstrap op from log for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId()); + result.put(le.getKey(), le.getValue().op()); + } - for (var op : result.values()) { - LOGGER.info("Recording bootstrap op for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId()); - _opRecorder.recordOpForPeer(host, op); - } - } finally { - _storage.rwUnlock(); + for (var op : result.values()) { + LOGGER.info("Recording bootstrap op for " + host + ": " + op.timestamp().toString() + " " + op.newMeta().getName() + " " + op.childId() + "->" + op.newParentId()); + _opRecorder.recordOpForPeer(host, op); } } } diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/StorageInterface.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/StorageInterface.java index 69467386..af55b35b 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/StorageInterface.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/StorageInterface.java @@ -4,32 +4,23 @@ public interface StorageInterface< TimestampT extends Comparable, PeerIdT extends Comparable, MetaT extends NodeMeta, - NodeIdT, - WrapperT extends TreeNodeWrapper> { + NodeIdT> { NodeIdT getRootId(); NodeIdT getTrashId(); NodeIdT getNewNodeId(); - WrapperT getById(NodeIdT id); + TreeNode getById(NodeIdT id); // Creates a node, returned wrapper is RW-locked - WrapperT createNewNode(TreeNode node); + TreeNode createNewNode(NodeIdT key, NodeIdT parent, MetaT meta); + + void putNode(TreeNode node); void removeNode(NodeIdT id); LogInterface getLog(); PeerTimestampLogInterface getPeerTimestampLog(); - - void rLock(); - - void rUnlock(); - - void rwLock(); - - void rwUnlock(); - - void assertRwLock(); } diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNode.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNode.java index 4eaad710..852c5870 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNode.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNode.java @@ -1,32 +1,24 @@ package com.usatiuk.kleppmanntree; -import lombok.Getter; -import lombok.Setter; - import java.io.Serializable; -import java.util.HashMap; import java.util.Map; -@Getter -@Setter -public class TreeNode, PeerIdT extends Comparable, MetaT extends NodeMeta, NodeIdT> implements Serializable { - private final NodeIdT _id; - private NodeIdT _parent = null; - private OpMove _lastEffectiveOp = null; - private MetaT _meta = null; - private Map _children = new HashMap<>(); +public interface TreeNode, PeerIdT extends Comparable, MetaT extends NodeMeta, NodeIdT> extends Serializable { + NodeIdT key(); - public TreeNode(NodeIdT id, NodeIdT parent, MetaT meta) { - _id = id; - _meta = meta; - _parent = parent; - } + NodeIdT parent(); - public TreeNode(NodeIdT id, NodeIdT parent, MetaT meta, Map children) { - _id = id; - _meta = meta; - _parent = parent; - _children = children; - } + OpMove lastEffectiveOp(); + MetaT meta(); + + Map children(); + + TreeNode withParent(NodeIdT parent); + + TreeNode withLastEffectiveOp(OpMove lastEffectiveOp); + + TreeNode withMeta(MetaT meta); + + TreeNode withChildren(Map children); } diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNodeWrapper.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNodeWrapper.java deleted file mode 100644 index 57869231..00000000 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNodeWrapper.java +++ /dev/null @@ -1,21 +0,0 @@ -package com.usatiuk.kleppmanntree; - -public interface TreeNodeWrapper, PeerIdT extends Comparable, MetaT extends NodeMeta, NodeIdT> { - void rLock(); - - void rUnlock(); - - void rwLock(); - - void rwUnlock(); - - void freeze(); - - void unfreeze(); - - void notifyRef(NodeIdT id); - - void notifyRmRef(NodeIdT id); - - TreeNode getNode(); -} diff --git a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/KleppmanTreeSimpleTest.java b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/KleppmanTreeSimpleTest.java index e95ce17a..a52ce207 100644 --- a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/KleppmanTreeSimpleTest.java +++ b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/KleppmanTreeSimpleTest.java @@ -32,8 +32,8 @@ public class KleppmanTreeSimpleTest { Assertions.assertEquals(d1id, testNode2._tree.traverse(List.of("Test1"))); Assertions.assertEquals(d2id, testNode2._tree.traverse(List.of("Test2"))); - Assertions.assertIterableEquals(List.of("Test1", "Test2"), testNode1._storageInterface.getById(testNode2._storageInterface.getRootId()).getNode().getChildren().keySet()); - Assertions.assertIterableEquals(List.of("Test1", "Test2"), testNode2._storageInterface.getById(testNode2._storageInterface.getRootId()).getNode().getChildren().keySet()); + Assertions.assertIterableEquals(List.of("Test1", "Test2"), testNode1._storageInterface.getById(testNode2._storageInterface.getRootId()).children().keySet()); + Assertions.assertIterableEquals(List.of("Test1", "Test2"), testNode2._storageInterface.getById(testNode2._storageInterface.getRootId()).children().keySet()); var f1id = testNode1._storageInterface.getNewNodeId(); @@ -54,10 +54,10 @@ public class KleppmanTreeSimpleTest { testNode1._tree.move(d1id, new TestNodeMetaDir("Test2"), d2id); Assertions.assertEquals(d1id, testNode1._tree.traverse(List.of("Test1"))); Assertions.assertEquals(d2id, testNode1._tree.traverse(List.of("Test1", "Test2"))); - Assertions.assertIterableEquals(List.of("Test1"), testNode1._storageInterface.getById(testNode2._storageInterface.getRootId()).getNode().getChildren().keySet()); + Assertions.assertIterableEquals(List.of("Test1"), testNode1._storageInterface.getById(testNode2._storageInterface.getRootId()).children().keySet()); testNode2._tree.move(d2id, new TestNodeMetaDir("Test1"), d1id); - Assertions.assertIterableEquals(List.of("Test2"), testNode2._storageInterface.getById(testNode2._storageInterface.getRootId()).getNode().getChildren().keySet()); + Assertions.assertIterableEquals(List.of("Test2"), testNode2._storageInterface.getById(testNode2._storageInterface.getRootId()).children().keySet()); Assertions.assertEquals(d2id, testNode2._tree.traverse(List.of("Test2"))); Assertions.assertEquals(d1id, testNode2._tree.traverse(List.of("Test2", "Test1"))); @@ -72,8 +72,8 @@ public class KleppmanTreeSimpleTest { } // Second node wins as it has smaller timestamp - Assertions.assertIterableEquals(List.of("Test2"), testNode1._storageInterface.getById(testNode2._storageInterface.getRootId()).getNode().getChildren().keySet()); - Assertions.assertIterableEquals(List.of("Test1", "TestFile"), testNode1._storageInterface.getById(d2id).getNode().getChildren().keySet()); + Assertions.assertIterableEquals(List.of("Test2"), testNode1._storageInterface.getById(testNode2._storageInterface.getRootId()).children().keySet()); + Assertions.assertIterableEquals(List.of("Test1", "TestFile"), testNode1._storageInterface.getById(d2id).children().keySet()); Assertions.assertEquals(d2id, testNode1._tree.traverse(List.of("Test2"))); Assertions.assertEquals(d1id, testNode1._tree.traverse(List.of("Test2", "Test1"))); Assertions.assertEquals(f1id, testNode1._tree.traverse(List.of("Test2", "TestFile"))); @@ -81,8 +81,8 @@ public class KleppmanTreeSimpleTest { var f11 = testNode1._storageInterface.getById(f1id); var f12 = testNode2._storageInterface.getById(f1id); - Assertions.assertEquals(f11.getNode().getMeta(), f12.getNode().getMeta()); - Assertions.assertInstanceOf(TestNodeMetaFile.class, f11.getNode().getMeta()); + Assertions.assertEquals(f11.meta(), f12.meta()); + Assertions.assertInstanceOf(TestNodeMetaFile.class, f11.meta()); // Trim test Assertions.assertTrue(testNode1._storageInterface.getLog().size() <= 1); diff --git a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNode.java b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNode.java index 005cf2b0..53f2a7c6 100644 --- a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNode.java +++ b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNode.java @@ -9,7 +9,7 @@ public class TestNode { protected final TestClock _clock; protected final TestPeerInterface _peerInterface; protected final TestStorageInterface _storageInterface; - protected final KleppmannTree _tree; + protected final KleppmannTree _tree; private final TestOpRecorder _recorder; public TestNode(long id) { diff --git a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeWrapper.java b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeWrapper.java deleted file mode 100644 index 57a4f600..00000000 --- a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeWrapper.java +++ /dev/null @@ -1,52 +0,0 @@ -package com.usatiuk.kleppmanntree; - -public class TestNodeWrapper implements TreeNodeWrapper { - private final TreeNode _backingNode; - - public TestNodeWrapper(TreeNode backingNode) {_backingNode = backingNode;} - - @Override - public void rLock() { - - } - - @Override - public void rUnlock() { - - } - - @Override - public void rwLock() { - - } - - @Override - public void rwUnlock() { - - } - - @Override - public void freeze() { - - } - - @Override - public void unfreeze() { - - } - - @Override - public void notifyRef(Long id) { - - } - - @Override - public void notifyRmRef(Long id) { - - } - - @Override - public TreeNode getNode() { - return _backingNode; - } -} diff --git a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestStorageInterface.java b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestStorageInterface.java index 0228d9bf..415f146a 100644 --- a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestStorageInterface.java +++ b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestStorageInterface.java @@ -3,17 +3,17 @@ package com.usatiuk.kleppmanntree; import java.util.HashMap; import java.util.Map; -public class TestStorageInterface implements StorageInterface { +public class TestStorageInterface implements StorageInterface { private final long _peerId; - private final Map> _nodes = new HashMap<>(); + private final Map _nodes = new HashMap<>(); private final TestLog _log = new TestLog(); private final TestPeerLog _peerLog = new TestPeerLog(); private long _curId = 1; public TestStorageInterface(long peerId) { _peerId = peerId; - _nodes.put(getRootId(), new TreeNode<>(getRootId(), null, null)); - _nodes.put(getTrashId(), new TreeNode<>(getTrashId(), null, null)); + _nodes.put(getRootId(), new TestTreeNode(getRootId(), null, null)); + _nodes.put(getTrashId(), new TestTreeNode(getTrashId(), null, null)); } @Override @@ -32,18 +32,18 @@ public class TestStorageInterface implements StorageInterface node) { - if (!_nodes.containsKey(node.getId())) { - _nodes.put(node.getId(), node); - return new TestNodeWrapper(node); - } - throw new IllegalStateException("Node with id " + node.getId() + " already exists"); + public TestTreeNode createNewNode(Long key, Long parent, TestNodeMeta meta) { + return new TestTreeNode(key, parent, meta); + } + + @Override + public void putNode(TreeNode node) { + _nodes.put(node.key(), (TestTreeNode) node); } @Override @@ -53,7 +53,6 @@ public class TestStorageInterface implements StorageInterface getLog() { return _log; @@ -64,29 +63,4 @@ public class TestStorageInterface implements StorageInterface getPeerTimestampLog() { return _peerLog; } - - @Override - public void rLock() { - - } - - @Override - public void rUnlock() { - - } - - @Override - public void rwLock() { - - } - - @Override - public void rwUnlock() { - - } - - @Override - public void assertRwLock() { - - } } diff --git a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestTreeNode.java b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestTreeNode.java new file mode 100644 index 00000000..7db8967b --- /dev/null +++ b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestTreeNode.java @@ -0,0 +1,37 @@ +package com.usatiuk.kleppmanntree; + +import lombok.Builder; + +import java.util.Collection; +import java.util.Collections; +import java.util.Map; + +@Builder(toBuilder = true) +public record TestTreeNode(Long key, Long parent, OpMove lastEffectiveOp, + TestNodeMeta meta, + Map children) implements TreeNode { + + public TestTreeNode(Long id, Long parent, TestNodeMeta meta) { + this(id, parent, null, meta, Collections.emptyMap()); + } + + @Override + public TreeNode withParent(Long parent) { + return this.toBuilder().parent(parent).build(); + } + + @Override + public TreeNode withLastEffectiveOp(OpMove lastEffectiveOp) { + return this.toBuilder().lastEffectiveOp(lastEffectiveOp).build(); + } + + @Override + public TreeNode withMeta(TestNodeMeta meta) { + return this.toBuilder().meta(meta).build(); + } + + @Override + public TreeNode withChildren(Map children) { + return this.toBuilder().children(children).build(); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 520149df..05192f14 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -232,19 +232,17 @@ public class JObjectManager { for (var hook : _preCommitTxHooks) { for (var entry : drained) { Log.trace("Running pre-commit hook " + hook.getClass() + " for" + entry.toString()); - switch (entry) { - case TxRecord.TxObjectRecordWrite write -> { - var oldObj = getCurrent.apply(write.key()); - if (oldObj == null) { - hook.onCreate(write.key(), write.data()); - } else { - hook.onChange(write.key(), oldObj, write.data()); - } - } - case TxRecord.TxObjectRecordDeleted deleted -> { - hook.onDelete(deleted.key(), getCurrent.apply(deleted.key())); - } - default -> throw new IllegalStateException("Unexpected value: " + entry); + var oldObj = getCurrent.apply(entry.key()); + var curObj = tx.get(JData.class, entry.key()).orElse(null); + + assert (curObj == null) == (entry instanceof TxRecord.TxObjectRecordDeleted); + + if (curObj == null) { + hook.onDelete(entry.key(), oldObj); + } else if (oldObj == null) { + hook.onCreate(entry.key(), curObj); + } else { + hook.onChange(entry.key(), oldObj, curObj); } current.put(entry.key(), entry); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSource.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSource.java index ff8931f9..540360dd 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSource.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSource.java @@ -31,7 +31,7 @@ public class ReadTrackingObjectSource implements TransactionObjectSource { } got.data().ifPresent(data -> { - if (!type.isInstance(data)) + if (!type.isInstance(data.data())) throw new IllegalStateException("Type mismatch for " + got + ": expected " + type + ", got " + data.getClass()); }); @@ -49,7 +49,7 @@ public class ReadTrackingObjectSource implements TransactionObjectSource { } got.data().ifPresent(data -> { - if (!type.isInstance(data)) + if (!type.isInstance(data.data())) throw new IllegalStateException("Type mismatch for " + got + ": expected " + type + ", got " + data.getClass()); }); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index edeab7ca..9109d93d 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -4,7 +4,6 @@ import com.usatiuk.dhfs.objects.JDataVersionedWrapper; import com.usatiuk.objects.common.runtime.JData; import com.usatiuk.objects.common.runtime.JObjectKey; import jakarta.enterprise.context.ApplicationScoped; -import jakarta.inject.Inject; import lombok.AccessLevel; import lombok.Getter; @@ -30,6 +29,21 @@ public class TransactionFactoryImpl implements TransactionFactory { @Override public Optional get(Class type, JObjectKey key, LockingStrategy strategy) { + switch (_writes.get(key)) { + case TxRecord.TxObjectRecordWrite write -> { + if (type.isInstance(write.data())) { + return Optional.of((T) write.data()); + } else { + throw new IllegalStateException("Type mismatch for " + key + ": expected " + type + ", got " + write.data().getClass()); + } + } + case TxRecord.TxObjectRecordDeleted deleted -> { + return Optional.empty(); + } + case null, default -> { + } + } + return switch (strategy) { case OPTIMISTIC -> _source.get(type, key).data().map(JDataVersionedWrapper::data); case WRITE -> _source.getWriteLocked(type, key).data().map(JDataVersionedWrapper::data); diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index 60c66348..d6f93676 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -41,6 +41,25 @@ public class ObjectsTest { } } + @Test + void createGetObject() { + { + txm.begin(); + var newParent = new Parent(JObjectKey.of("ParentCreateGet"), "John"); + curTx.put(newParent); + var parent = curTx.get(Parent.class, JObjectKey.of("ParentCreateGet")).orElse(null); + Assertions.assertEquals("John", parent.name()); + txm.commit(); + } + + { + txm.begin(); + var parent = curTx.get(Parent.class, JObjectKey.of("ParentCreateGet")).orElse(null); + Assertions.assertEquals("John", parent.name()); + txm.commit(); + } + } + @Test void createDeleteObject() { { @@ -207,6 +226,7 @@ public class ObjectsTest { curTx.put(parent.toBuilder().name("John").build()); Log.warn("Thread 1 commit"); txm.commit(); + Log.warn("Thread 1 commit done"); thread1Failed.set(false); return null; } finally { @@ -222,6 +242,7 @@ public class ObjectsTest { curTx.put(parent.toBuilder().name("John2").build()); Log.warn("Thread 2 commit"); txm.commit(); + Log.warn("Thread 2 commit done"); thread2Failed.set(false); return null; } finally { diff --git a/dhfs-parent/objects/src/test/resources/application.properties b/dhfs-parent/objects/src/test/resources/application.properties index 41617308..3fab7b8f 100644 --- a/dhfs-parent/objects/src/test/resources/application.properties +++ b/dhfs-parent/objects/src/test/resources/application.properties @@ -1,3 +1,5 @@ dhfs.objects.persistence=memory quarkus.log.category."com.usatiuk".level=TRACE quarkus.log.category."com.usatiuk".min-level=TRACE +quarkus.http.test-port=0 +quarkus.http.test-ssl-port=0 diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java index 407e73da..59b67811 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java @@ -2,12 +2,27 @@ package com.usatiuk.dhfs.files.objects; import com.google.protobuf.ByteString; import com.usatiuk.dhfs.objects.JDataRefcounted; -import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; +import lombok.Builder; -import java.io.Serializable; +import java.util.Collection; +import java.util.HashSet; +import java.util.LinkedHashSet; -public interface ChunkData extends JDataRefcounted, Serializable { - ByteString getData(); +@Builder(toBuilder = true) +public record ChunkData(JObjectKey key, Collection refsFrom, boolean frozen, + ByteString data) implements JDataRefcounted { + public ChunkData(JObjectKey key, ByteString data) { + this(key, new LinkedHashSet<>(), false, data); + } - void setData(ByteString data); -} + @Override + public ChunkData withRefsFrom(Collection refs) { + return this.toBuilder().refsFrom(refs).build(); + } + + @Override + public ChunkData withFrozen(boolean frozen) { + return this.toBuilder().frozen(frozen).build(); + } +} \ No newline at end of file diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java index 2b8e2054..15f9f292 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java @@ -1,25 +1,27 @@ package com.usatiuk.dhfs.files.objects; import com.usatiuk.objects.common.runtime.JObjectKey; +import lombok.Builder; import java.util.Collection; import java.util.NavigableMap; -public interface File extends FsNode { - NavigableMap getChunks(); - - void setChunks(NavigableMap chunks); - - boolean getSymlink(); - - void setSymlink(boolean symlink); - - long getSize(); - - void setSize(long size); +@Builder(toBuilder = true) +public record File(JObjectKey key, Collection refsFrom, boolean frozen, long mode, long cTime, long mTime, + NavigableMap chunks, boolean symlink, long size +) implements FsNode { + @Override + public File withRefsFrom(Collection refs) { + return this.toBuilder().refsFrom(refs).build(); + } @Override - default Collection collectRefsTo() { - return getChunks().values().stream().toList(); + public File withFrozen(boolean frozen) { + return this.toBuilder().frozen(frozen).build(); + } + + @Override + public Collection collectRefsTo() { + return chunks().values().stream().toList(); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java index 7c09f7dc..09b76015 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java @@ -1,20 +1,11 @@ package com.usatiuk.dhfs.files.objects; import com.usatiuk.dhfs.objects.JDataRefcounted; -import com.usatiuk.objects.common.runtime.JData; -import java.io.Serializable; +public interface FsNode extends JDataRefcounted { + long mode(); -public interface FsNode extends JDataRefcounted, Serializable { - long getMode(); + long cTime(); - void setMode(long mode); - - long getCtime(); - - void setCtime(long ctime); - - long getMtime(); - - void setMtime(long mtime); + long mTime(); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java index 67b51b87..5d1bfa8e 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java @@ -12,7 +12,6 @@ import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaDir import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile; import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; -import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import com.usatiuk.objects.common.runtime.JData; import com.usatiuk.objects.common.runtime.JObjectKey; import io.grpc.Status; @@ -37,8 +36,6 @@ public class DhfsFileServiceImpl implements DhfsFileService { Transaction curTx; @Inject TransactionManager jObjectTxManager; - @Inject - ObjectAllocator objectAllocator; @ConfigProperty(name = "dhfs.files.target_chunk_size") int targetChunkSize; @@ -75,9 +72,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { } private ChunkData createChunk(ByteString bytes) { - var newChunk = objectAllocator.create(ChunkData.class, new JObjectKey(UUID.randomUUID().toString())); - newChunk.setData(bytes); - newChunk.setRefsFrom(List.of()); + var newChunk = new ChunkData(JObjectKey.of(UUID.randomUUID().toString()), bytes); curTx.put(newChunk); return newChunk; } @@ -108,11 +103,11 @@ public class DhfsFileServiceImpl implements DhfsFileService { if (ref == null) return Optional.empty(); GetattrRes ret; if (ref instanceof File f) { - ret = new GetattrRes(f.getMtime(), f.getCtime(), f.getMode(), f.getSymlink() ? GetattrType.SYMLINK : GetattrType.FILE); + ret = new GetattrRes(f.mTime(), f.cTime(), f.mode(), f.symlink() ? GetattrType.SYMLINK : GetattrType.FILE); } else if (ref instanceof JKleppmannTreeNode) { ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY); } else { - throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.getKey())); + throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key())); } return Optional.of(ret); }); @@ -123,9 +118,9 @@ public class DhfsFileServiceImpl implements DhfsFileService { return jObjectTxManager.executeTx(() -> { try { var ret = getDirEntry(name); - return switch (ret.getNode().getMeta()) { + return switch (ret.meta()) { case JKleppmannTreeNodeMetaFile f -> Optional.of(f.getFileIno()); - case JKleppmannTreeNodeMetaDirectory f -> Optional.of(ret.getKey()); + case JKleppmannTreeNodeMetaDirectory f -> Optional.of(ret.key()); default -> Optional.empty(); }; } catch (StatusRuntimeException e) { @@ -138,8 +133,8 @@ public class DhfsFileServiceImpl implements DhfsFileService { } private void ensureDir(JKleppmannTreeNode entry) { - if (!(entry.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory)) - throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription("Not a directory: " + entry.getKey())); + if (!(entry.meta() instanceof JKleppmannTreeNodeMetaDirectory)) + throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription("Not a directory: " + entry.key())); } @Override @@ -154,24 +149,18 @@ public class DhfsFileServiceImpl implements DhfsFileService { var fuuid = UUID.randomUUID(); Log.debug("Creating file " + fuuid); - File f = objectAllocator.create(File.class, new JObjectKey(fuuid.toString())); - f.setMode(mode); - f.setMtime(System.currentTimeMillis()); - f.setCtime(f.getMtime()); - f.setSymlink(false); - f.setChunks(new TreeMap<>()); - f.setRefsFrom(List.of()); + File f = new File(JObjectKey.of(fuuid.toString()), new HashSet<>(), false, mode, System.currentTimeMillis(), System.currentTimeMillis(), new TreeMap<>(), false, 0); curTx.put(f); try { - getTree().move(parent.getKey(), new JKleppmannTreeNodeMetaFile(fname, f.getKey()), getTree().getNewNodeId()); + getTree().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTree().getNewNodeId()); } catch (Exception e) { // fobj.getMeta().removeRef(newNodeId); throw e; } finally { // fobj.rwUnlock(); } - return Optional.of(f.getKey()); + return Optional.of(f.key()); }); } @@ -180,7 +169,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { public Pair inoToParent(JObjectKey ino) { return jObjectTxManager.executeTx(() -> { return getTree().findParent(w -> { - if (w.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f) + if (w.meta() instanceof JKleppmannTreeNodeMetaFile f) if (f.getFileIno().equals(ino)) return true; return false; @@ -199,7 +188,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { Log.debug("Creating directory " + name); - getTree().move(parent.getKey(), new JKleppmannTreeNodeMetaDirectory(dname), getTree().getNewNodeId()); + getTree().move(parent.key(), new JKleppmannTreeNodeMetaDirectory(dname), getTree().getNewNodeId()); }); } @@ -207,11 +196,11 @@ public class DhfsFileServiceImpl implements DhfsFileService { public void unlink(String name) { jObjectTxManager.executeTx(() -> { var node = getDirEntryOpt(name).orElse(null); - if (node.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f) { - if (!allowRecursiveDelete && !node.getNode().getChildren().isEmpty()) + if (node.meta() instanceof JKleppmannTreeNodeMetaDirectory f) { + if (!allowRecursiveDelete && !node.children().isEmpty()) throw new DirectoryNotEmptyException(); } - getTree().trash(node.getNode().getMeta(), node.getKey()); + getTree().trash(node.meta(), node.key()); }); } @@ -219,13 +208,13 @@ public class DhfsFileServiceImpl implements DhfsFileService { public Boolean rename(String from, String to) { return jObjectTxManager.executeTx(() -> { var node = getDirEntry(from); - JKleppmannTreeNodeMeta meta = node.getNode().getMeta(); + JKleppmannTreeNodeMeta meta = node.meta(); var toPath = Path.of(to); var toDentry = getDirEntry(toPath.getParent().toString()); ensureDir(toDentry); - getTree().move(toDentry.getKey(), meta.withName(toPath.getFileName().toString()), node.getKey()); + getTree().move(toDentry.key(), meta.withName(toPath.getFileName().toString()), node.key()); return true; }); } @@ -238,8 +227,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { if (dent instanceof JKleppmannTreeNode) { return true; } else if (dent instanceof File f) { - f.setMode(mode); - f.setMtime(System.currentTimeMillis()); + curTx.put(f.toBuilder().mode(mode).mTime(System.currentTimeMillis()).build()); return true; } else { throw new IllegalArgumentException(uuid + " is not a file"); @@ -252,10 +240,10 @@ public class DhfsFileServiceImpl implements DhfsFileService { return jObjectTxManager.executeTx(() -> { var found = getDirEntry(name); - if (!(found.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory md)) + if (!(found.meta() instanceof JKleppmannTreeNodeMetaDirectory md)) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - return found.getNode().getChildren().keySet(); + return found.children().keySet(); }); } @@ -274,7 +262,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { } try { - var chunksAll = file.getChunks(); + var chunksAll = file.chunks(); if (chunksAll.isEmpty()) { return Optional.of(ByteString.empty()); } @@ -334,7 +322,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { throw new StatusRuntimeException(Status.NOT_FOUND); } - return chunkRead.getData(); + return chunkRead.data(); } private int getChunkSize(JObjectKey uuid) { @@ -373,7 +361,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { } if (writeLogging) { - Log.info("Writing to file: " + file.getKey() + " size=" + size(fileUuid) + " " + Log.info("Writing to file: " + file.key() + " size=" + size(fileUuid) + " " + offset + " " + data.size()); } @@ -381,7 +369,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { truncate(fileUuid, offset); // FIXME: Some kind of immutable interface? - var chunksAll = Collections.unmodifiableNavigableMap(file.getChunks()); + var chunksAll = Collections.unmodifiableNavigableMap(file.chunks()); var first = chunksAll.floorEntry(offset); var last = chunksAll.lowerEntry(offset + data.size()); NavigableMap removedChunks = new TreeMap<>(); @@ -496,14 +484,15 @@ public class DhfsFileServiceImpl implements DhfsFileService { var thisChunk = pendingWrites.substring(cur, end); ChunkData newChunkData = createChunk(thisChunk); - newChunks.put(start, newChunkData.getKey()); + newChunks.put(start, newChunkData.key()); start += thisChunk.size(); cur = end; } } - file.setChunks(newChunks); + file = file.toBuilder().chunks(newChunks).mTime(System.currentTimeMillis()).build(); + curTx.put(file); cleanupChunks(file, removedChunks.values()); updateFileSize(file); @@ -524,10 +513,10 @@ public class DhfsFileServiceImpl implements DhfsFileService { } if (length == 0) { - var oldChunks = Collections.unmodifiableNavigableMap(new TreeMap<>(file.getChunks())); + var oldChunks = Collections.unmodifiableNavigableMap(new TreeMap<>(file.chunks())); - file.setChunks(new TreeMap<>()); - file.setMtime(System.currentTimeMillis()); + file = file.toBuilder().chunks(new TreeMap<>()).mTime(System.currentTimeMillis()).build(); + curTx.put(file); cleanupChunks(file, oldChunks.values()); updateFileSize(file); return true; @@ -536,7 +525,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { var curSize = size(fileUuid); if (curSize == length) return true; - var chunksAll = Collections.unmodifiableNavigableMap(file.getChunks()); + var chunksAll = Collections.unmodifiableNavigableMap(file.chunks()); NavigableMap removedChunks = new TreeMap<>(); NavigableMap newChunks = new TreeMap<>(); @@ -567,9 +556,9 @@ public class DhfsFileServiceImpl implements DhfsFileService { zeroCache.put(end - cur, UnsafeByteOperations.unsafeWrap(new byte[Math.toIntExact(end - cur)])); ChunkData newChunkData = createChunk(zeroCache.get(end - cur)); - newChunks.put(start, newChunkData.getKey()); + newChunks.put(start, newChunkData.key()); - start += newChunkData.getData().size(); + start += newChunkData.data().size(); cur = end; } } @@ -584,10 +573,11 @@ public class DhfsFileServiceImpl implements DhfsFileService { var newChunk = tailBytes.substring(0, (int) (length - tail.getKey())); ChunkData newChunkData = createChunk(newChunk); - newChunks.put(tail.getKey(), newChunkData.getKey()); + newChunks.put(tail.getKey(), newChunkData.key()); } - file.setChunks(newChunks); + file = file.toBuilder().chunks(newChunks).mTime(System.currentTimeMillis()).build(); + curTx.put(file); cleanupChunks(file, removedChunks.values()); updateFileSize(file); return true; @@ -622,16 +612,14 @@ public class DhfsFileServiceImpl implements DhfsFileService { var fuuid = UUID.randomUUID(); Log.debug("Creating file " + fuuid); - File f = objectAllocator.create(File.class, new JObjectKey(fuuid.toString())); - f.setSymlink(true); - f.setRefsFrom(List.of()); + File f = new File(JObjectKey.of(fuuid.toString()), new HashSet<>(), false, 0, System.currentTimeMillis(), System.currentTimeMillis(), new TreeMap<>(), true, 0); ChunkData newChunkData = createChunk(UnsafeByteOperations.unsafeWrap(oldpath.getBytes(StandardCharsets.UTF_8))); - f.getChunks().put(0L, newChunkData.getKey()); + f.chunks().put(0L, newChunkData.key()); updateFileSize(f); - getTree().move(parent.getKey(), new JKleppmannTreeNodeMetaFile(fname, f.getKey()), getTree().getNewNodeId()); - return f.getKey(); + getTree().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTree().getNewNodeId()); + return f.key(); }); } @@ -643,7 +631,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { "File not found for setTimes: " + fileUuid)) ); - file.setMtime(mtimeMs); + curTx.put(file.toBuilder().cTime(atimeMs).mTime(mtimeMs).build()); return true; }); } @@ -653,14 +641,14 @@ public class DhfsFileServiceImpl implements DhfsFileService { jObjectTxManager.executeTx(() -> { long realSize = 0; - var last = file.getChunks().lastEntry(); + var last = file.chunks().lastEntry(); if (last != null) { var lastSize = getChunkSize(last.getValue()); realSize = last.getKey() + lastSize; } - if (realSize != file.getSize()) { - file.setSize(realSize); + if (realSize != file.size()) { + curTx.put(file.toBuilder().size(realSize).build()); } }); } @@ -671,7 +659,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { var read = curTx.get(File.class, uuid) .orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND)); - return read.getSize(); + return read.size(); }); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java index c1ad75d3..16f50422 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java @@ -1,7 +1,6 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.transaction.Transaction; -import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import com.usatiuk.objects.common.runtime.JData; import com.usatiuk.objects.common.runtime.JObjectKey; import io.quarkus.logging.Log; @@ -13,11 +12,8 @@ public class DeleterTxHook implements PreCommitTxHook { @Inject Transaction curTx; - @Inject - ObjectAllocator alloc; - private boolean canDelete(JDataRefcounted data) { - return !data.getFrozen() && data.getRefsFrom().isEmpty(); + return !data.frozen() && data.refsFrom().isEmpty(); } @Override diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java index 848f59d2..5e120d93 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java @@ -7,13 +7,13 @@ import java.util.Collection; import java.util.List; public interface JDataRefcounted extends JData { - Collection getRefsFrom(); + Collection refsFrom(); - void setRefsFrom(Collection refs); + JDataRefcounted withRefsFrom(Collection refs); - boolean getFrozen(); + boolean frozen(); - void setFrozen(boolean frozen); + JDataRefcounted withFrozen(boolean frozen); default Collection collectRefsTo() { return List.of(); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java index 47c3de43..2791e2d8 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java @@ -1,7 +1,6 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.transaction.Transaction; -import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import com.usatiuk.objects.common.runtime.JData; import com.usatiuk.objects.common.runtime.JObjectKey; import jakarta.enterprise.context.ApplicationScoped; @@ -15,9 +14,6 @@ public class RefcounterTxHook implements PreCommitTxHook { @Inject Transaction curTx; - @Inject - ObjectAllocator alloc; - @Override public void onChange(JObjectKey key, JData old, JData cur) { if (!(cur instanceof JDataRefcounted refCur)) { @@ -27,12 +23,12 @@ public class RefcounterTxHook implements PreCommitTxHook { for (var newRef : CollectionUtils.subtract(refCur.collectRefsTo(), refOld.collectRefsTo())) { var referenced = curTx.get(JDataRefcounted.class, newRef).orElse(null); - referenced.setRefsFrom(CollectionUtils.union(referenced.getRefsFrom(), Set.of(key))); + curTx.put(referenced.withRefsFrom(CollectionUtils.union(referenced.refsFrom(), Set.of(key)))); } for (var removedRef : CollectionUtils.subtract(refOld.collectRefsTo(), refCur.collectRefsTo())) { var referenced = curTx.get(JDataRefcounted.class, removedRef).orElse(null); - referenced.setRefsFrom(CollectionUtils.subtract(referenced.getRefsFrom(), Set.of(key))); + curTx.put(referenced.withRefsFrom(CollectionUtils.subtract(referenced.refsFrom(), Set.of(key)))); } } @@ -44,7 +40,7 @@ public class RefcounterTxHook implements PreCommitTxHook { for (var newRef : refCur.collectRefsTo()) { var referenced = curTx.get(JDataRefcounted.class, newRef).orElse(null); - referenced.setRefsFrom(CollectionUtils.union(referenced.getRefsFrom(), Set.of(key))); + curTx.put(referenced.withRefsFrom(CollectionUtils.union(referenced.refsFrom(), Set.of(key)))); } } @@ -57,7 +53,7 @@ public class RefcounterTxHook implements PreCommitTxHook { for (var removedRef : refCur.collectRefsTo()) { var referenced = curTx.get(JDataRefcounted.class, removedRef).orElse(null); - referenced.setRefsFrom(CollectionUtils.subtract(referenced.getRefsFrom(), Set.of(key))); + curTx.put(referenced.withRefsFrom(CollectionUtils.subtract(referenced.refsFrom(), Set.of(key)))); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java index d443ab3e..c3cf95ee 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java @@ -7,7 +7,6 @@ import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaDir import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreePersistentData; import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.kleppmanntree.*; -import com.usatiuk.objects.alloc.runtime.ObjectAllocator; import com.usatiuk.objects.common.runtime.JObjectKey; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; @@ -29,21 +28,26 @@ public class JKleppmannTreeManager { @Inject TransactionManager txManager; @Inject - ObjectAllocator objectAllocator; - @Inject JKleppmannTreePeerInterface peerInterface; public JKleppmannTree getTree(JObjectKey name) { return txManager.executeTx(() -> { var data = curTx.get(JKleppmannTreePersistentData.class, name).orElse(null); if (data == null) { - data = objectAllocator.create(JKleppmannTreePersistentData.class, name); - data.setClock(new AtomicClock(1L)); - data.setQueues(new HashMap<>()); - data.setLog(new TreeMap<>()); - data.setPeerTimestampLog(new HashMap<>()); - data.setFrozen(true); + data = new JKleppmannTreePersistentData( + name, + List.of(), + true, + 1L, + new HashMap<>(), + new HashMap<>(), + new TreeMap<>() + ); curTx.put(data); + var rootNode = new JKleppmannTreeNode(JObjectKey.of(name.name() + "_jt_root"), null, new JKleppmannTreeNodeMetaDirectory("")); + curTx.put(rootNode); + var trashNode = new JKleppmannTreeNode(JObjectKey.of(name.name() + "_jt_trash"), null, new JKleppmannTreeNodeMetaDirectory("")); + curTx.put(trashNode); } return new JKleppmannTree(data); // opObjectRegistry.registerObject(tree); @@ -51,9 +55,9 @@ public class JKleppmannTreeManager { } public class JKleppmannTree { - private final KleppmannTree _tree; + private final KleppmannTree _tree; - private final JKleppmannTreePersistentData _data; + private JKleppmannTreePersistentData _data; private final JKleppmannTreeStorageInterface _storageInterface; private final JKleppmannTreeClock _clock; @@ -61,7 +65,7 @@ public class JKleppmannTreeManager { private final JObjectKey _treeName; JKleppmannTree(JKleppmannTreePersistentData data) { - _treeName = data.getKey(); + _treeName = data.key(); _data = data; _storageInterface = new JKleppmannTreeStorageInterface(); @@ -149,7 +153,7 @@ public class JKleppmannTreeManager { // _tree.recordBoostrapFor(host); // } - public Pair findParent(Function predicate) { + public Pair findParent(Function, Boolean> predicate) { return _tree.findParent(predicate); } @@ -273,35 +277,31 @@ public class JKleppmannTreeManager { private class JKleppmannTreeClock implements Clock { @Override public Long getTimestamp() { - return _data.getClock().getTimestamp(); + var res = _data.clock() + 1; + _data = _data.toBuilder().clock(res).build(); + curTx.put(_data); + return res; } @Override public Long peekTimestamp() { - return _data.getClock().peekTimestamp(); + return _data.clock(); } @Override public Long updateTimestamp(Long receivedTimestamp) { - return _data.getClock().updateTimestamp(receivedTimestamp); + var old = _data.clock(); + _data = _data.toBuilder().clock(Math.max(old, receivedTimestamp) + 1).build(); + curTx.put(_data); + return old; } } - public class JKleppmannTreeStorageInterface implements StorageInterface { + public class JKleppmannTreeStorageInterface implements StorageInterface { private final LogWrapper _logWrapper = new LogWrapper(); private final PeerLogWrapper _peerLogWrapper = new PeerLogWrapper(); public JKleppmannTreeStorageInterface() { - if (curTx.get(JKleppmannTreeNode.class, getRootId()).isEmpty()) { - var rootNode = objectAllocator.create(JKleppmannTreeNode.class, getRootId()); - rootNode.setNode(new TreeNode<>(getRootId(), null, new JKleppmannTreeNodeMetaDirectory(""))); - rootNode.setRefsFrom(List.of()); - curTx.put(rootNode); - var trashNode = objectAllocator.create(JKleppmannTreeNode.class, getTrashId()); - trashNode.setRefsFrom(List.of()); - trashNode.setNode(new TreeNode<>(getTrashId(), null, new JKleppmannTreeNodeMetaDirectory(""))); - curTx.put(trashNode); - } } @Override @@ -320,19 +320,19 @@ public class JKleppmannTreeManager { } @Override - public JKleppmannTreeNodeWrapper getById(JObjectKey id) { + public JKleppmannTreeNode getById(JObjectKey id) { var got = curTx.get(JKleppmannTreeNode.class, id); - if (got.isEmpty()) return null; - return new JKleppmannTreeNodeWrapper(got.get()); + return got.orElse(null); } @Override - public JKleppmannTreeNodeWrapper createNewNode(TreeNode node) { - var created = objectAllocator.create(JKleppmannTreeNode.class, node.getId()); - created.setNode(node); - created.setRefsFrom(List.of()); - curTx.put(created); - return new JKleppmannTreeNodeWrapper(created); + public JKleppmannTreeNode createNewNode(JObjectKey key, JObjectKey parent, JKleppmannTreeNodeMeta meta) { + return new JKleppmannTreeNode(key, parent, meta); + } + + @Override + public void putNode(TreeNode node) { + curTx.put(((JKleppmannTreeNode) node)); } @Override @@ -350,95 +350,87 @@ public class JKleppmannTreeManager { return _peerLogWrapper; } - @Override - public void rLock() { - } - - @Override - public void rUnlock() { - } - - @Override - public void rwLock() { - } - - @Override - public void rwUnlock() { - } - - @Override - public void assertRwLock() { - } - private class PeerLogWrapper implements PeerTimestampLogInterface { @Override public Long getForPeer(UUID peerId) { - return _data.getPeerTimestampLog().get(peerId); + return _data.peerTimestampLog().get(peerId); } @Override public void putForPeer(UUID peerId, Long timestamp) { - _data.getPeerTimestampLog().put(peerId, timestamp); + var newPeerTimestampLog = new HashMap<>(_data.peerTimestampLog()); + newPeerTimestampLog.put(peerId, timestamp); + _data = _data.toBuilder().peerTimestampLog(newPeerTimestampLog).build(); + curTx.put(_data); } } private class LogWrapper implements LogInterface { @Override public Pair, LogRecord> peekOldest() { - var ret = _data.getLog().firstEntry(); + var ret = _data.log().firstEntry(); if (ret == null) return null; return Pair.of(ret); } @Override public Pair, LogRecord> takeOldest() { - var ret = _data.getLog().pollFirstEntry(); + var newLog = new TreeMap<>(_data.log()); + var ret = newLog.pollFirstEntry(); + _data = _data.toBuilder().log(newLog).build(); + curTx.put(_data); if (ret == null) return null; return Pair.of(ret); } @Override public Pair, LogRecord> peekNewest() { - var ret = _data.getLog().lastEntry(); + var ret = _data.log().lastEntry(); if (ret == null) return null; return Pair.of(ret); } @Override public List, LogRecord>> newestSlice(CombinedTimestamp since, boolean inclusive) { - return _data.getLog().tailMap(since, inclusive).entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); + return _data.log().tailMap(since, inclusive).entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); } @Override public List, LogRecord>> getAll() { - return _data.getLog().entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); + return _data.log().entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); } @Override public boolean isEmpty() { - return _data.getLog().isEmpty(); + return _data.log().isEmpty(); } @Override public boolean containsKey(CombinedTimestamp timestamp) { - return _data.getLog().containsKey(timestamp); + return _data.log().containsKey(timestamp); } @Override public long size() { - return (long) _data.getLog().size(); + return (long) _data.log().size(); } @Override public void put(CombinedTimestamp timestamp, LogRecord record) { - if (_data.getLog().containsKey(timestamp)) + if (_data.log().containsKey(timestamp)) throw new IllegalStateException("Overwriting log entry?"); - _data.getLog().put(timestamp, record); + var newLog = new TreeMap<>(_data.log()); + newLog.put(timestamp, record); + _data = _data.toBuilder().log(newLog).build(); + curTx.put(_data); } @Override public void replace(CombinedTimestamp timestamp, LogRecord record) { - _data.getLog().put(timestamp, record); + var newLog = new TreeMap<>(_data.log()); + newLog.put(timestamp, record); + _data = _data.toBuilder().log(newLog).build(); + curTx.put(_data); } } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java deleted file mode 100644 index 7f060fc0..00000000 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java +++ /dev/null @@ -1,61 +0,0 @@ -package com.usatiuk.dhfs.objects.jkleppmanntree; - -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; -import com.usatiuk.kleppmanntree.TreeNode; -import com.usatiuk.kleppmanntree.TreeNodeWrapper; -import com.usatiuk.objects.common.runtime.JObjectKey; - -import java.util.UUID; - -public class JKleppmannTreeNodeWrapper implements TreeNodeWrapper { - private final JKleppmannTreeNode _backing; - - public JKleppmannTreeNodeWrapper(JKleppmannTreeNode backing) { - assert backing != null; - assert backing.getNode() != null; - _backing = backing; - } - - @Override - public void rLock() { - } - - @Override - public void rUnlock() { - } - - @Override - public void rwLock() { - } - - @Override - public void rwUnlock() { - } - - @Override - public void freeze() { - } - - @Override - public void unfreeze() { - } - - @Override - public void notifyRef(JObjectKey id) { - } - - @Override - public void notifyRmRef(JObjectKey id) { - } - - @Override - public TreeNode getNode() { - // TODO: - return _backing.getNode(); -// _backing.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); -// if (_backing.getData() == null) -// throw new IllegalStateException("Node " + _backing.getMeta().getName() + " data lost!"); -// return _backing.getData().getNode(); - } -} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java index eb8851bc..e4509a0e 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java @@ -1,27 +1,66 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; import com.usatiuk.dhfs.objects.JDataRefcounted; +import com.usatiuk.kleppmanntree.OpMove; import com.usatiuk.kleppmanntree.TreeNode; import com.usatiuk.objects.common.runtime.JObjectKey; +import lombok.Builder; import java.io.Serializable; import java.util.Collection; +import java.util.Collections; +import java.util.Map; import java.util.UUID; import java.util.stream.Stream; // FIXME: Ideally this is two classes? -public interface JKleppmannTreeNode extends JDataRefcounted, Serializable { - TreeNode getNode(); +@Builder(toBuilder = true) +public record JKleppmannTreeNode(JObjectKey key, Collection refsFrom, boolean frozen, JObjectKey parent, + OpMove lastEffectiveOp, + JKleppmannTreeNodeMeta meta, + Map children) implements TreeNode, JDataRefcounted, Serializable { - void setNode(TreeNode node); + public JKleppmannTreeNode(JObjectKey id, JObjectKey parent, JKleppmannTreeNodeMeta meta) { + this(id, Collections.emptyList(), false, parent, null, meta, Collections.emptyMap()); + } @Override - default Collection collectRefsTo() { - return Stream.concat(getNode().getChildren().values().stream(), - switch (getNode().getMeta()) { + public JKleppmannTreeNode withParent(JObjectKey parent) { + return this.toBuilder().parent(parent).build(); + } + + @Override + public JKleppmannTreeNode withLastEffectiveOp(OpMove lastEffectiveOp) { + return this.toBuilder().lastEffectiveOp(lastEffectiveOp).build(); + } + + @Override + public JKleppmannTreeNode withMeta(JKleppmannTreeNodeMeta meta) { + return this.toBuilder().meta(meta).build(); + } + + @Override + public JKleppmannTreeNode withChildren(Map children) { + return this.toBuilder().children(children).build(); + } + + @Override + public JKleppmannTreeNode withRefsFrom(Collection refs) { + return this.toBuilder().refsFrom(refs).build(); + } + + @Override + public JKleppmannTreeNode withFrozen(boolean frozen) { + return this.toBuilder().frozen(frozen).build(); + } + + @Override + public Collection collectRefsTo() { + return Stream.concat(children().values().stream(), + switch (meta()) { case JKleppmannTreeNodeMetaDirectory dir -> Stream.of(); case JKleppmannTreeNodeMetaFile file -> Stream.of(file.getFileIno()); - default -> throw new IllegalStateException("Unexpected value: " + getNode().getMeta()); + default -> throw new IllegalStateException("Unexpected value: " + meta()); } ).toList(); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java index 84ca109d..6f773a7d 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java @@ -6,49 +6,51 @@ import com.usatiuk.kleppmanntree.CombinedTimestamp; import com.usatiuk.kleppmanntree.LogRecord; import com.usatiuk.kleppmanntree.OpMove; import com.usatiuk.objects.common.runtime.JObjectKey; +import lombok.Builder; import java.util.*; -public interface JKleppmannTreePersistentData extends JDataRefcounted { - AtomicClock getClock(); - - void setClock(AtomicClock clock); - - HashMap, OpMove>> getQueues(); - - void setQueues(HashMap, OpMove>> queues); - - HashMap getPeerTimestampLog(); - - void setPeerTimestampLog(HashMap peerTimestampLog); - - TreeMap, LogRecord> getLog(); - - void setLog(TreeMap, LogRecord> log); - - default void recordOp(UUID host, OpMove opMove) { - getQueues().computeIfAbsent(host, h -> new TreeMap<>()); - getQueues().get(host).put(opMove.timestamp(), opMove); +@Builder(toBuilder = true) +public record JKleppmannTreePersistentData( + JObjectKey key, Collection refsFrom, boolean frozen, + long clock, + HashMap, OpMove>> queues, + HashMap peerTimestampLog, + TreeMap, LogRecord> log +) implements JDataRefcounted { + void recordOp(UUID host, OpMove opMove) { + queues().computeIfAbsent(host, h -> new TreeMap<>()); + queues().get(host).put(opMove.timestamp(), opMove); } - default void removeOp(UUID host, OpMove opMove) { - getQueues().get(host).remove(opMove.timestamp(), opMove); + void removeOp(UUID host, OpMove opMove) { + queues().get(host).remove(opMove.timestamp(), opMove); } - default void recordOp(Collection hosts, OpMove opMove) { + void recordOp(Collection hosts, OpMove opMove) { for (var u : hosts) { recordOp(u, opMove); } } - default void removeOp(Collection hosts, OpMove opMove) { + void removeOp(Collection hosts, OpMove opMove) { for (var u : hosts) { removeOp(u, opMove); } } @Override - default Collection collectRefsTo() { - return List.of(new JObjectKey(getKey().name() + "_jt_trash"), new JObjectKey(getKey().name() + "_jt_root")); + public JKleppmannTreePersistentData withRefsFrom(Collection refs) { + return this.toBuilder().refsFrom(refs).build(); + } + + @Override + public JKleppmannTreePersistentData withFrozen(boolean frozen) { + return this.toBuilder().frozen(frozen).build(); + } + + @Override + public Collection collectRefsTo() { + return List.of(new JObjectKey(key().name() + "_jt_trash"), new JObjectKey(key().name() + "_jt_root")); } } diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java index 96acf3f5..d7cade2c 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java @@ -3,6 +3,7 @@ package com.usatiuk.dhfs.benchmarks; import com.google.protobuf.UnsafeByteOperations; import com.usatiuk.dhfs.TempDataProfile; import com.usatiuk.dhfs.files.service.DhfsFileService; +import com.usatiuk.objects.common.runtime.JObjectKey; import io.quarkus.test.junit.QuarkusTest; import io.quarkus.test.junit.TestProfile; import jakarta.inject.Inject; @@ -41,7 +42,7 @@ public class DhfsFileBenchmarkTest { @Test @Disabled void writeMbTest() { - String file = dhfsFileService.create("/writeMbTest", 0777).get(); + JObjectKey file = dhfsFileService.create("/writeMbTest", 0777).get(); var bb = ByteBuffer.allocateDirect(1024 * 1024); Benchmarker.runAndPrintMixSimple("dhfsFileService.write(\"\")", () -> { diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java index 8bea5c7e..796f3d36 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java @@ -1,23 +1,18 @@ package com.usatiuk.dhfs.files; -import com.google.protobuf.ByteString; import com.usatiuk.dhfs.TempDataProfile; -import com.usatiuk.dhfs.files.objects.ChunkData; import com.usatiuk.dhfs.files.objects.File; import com.usatiuk.dhfs.files.service.DhfsFileService; -import com.usatiuk.dhfs.objects.jrepository.DeletedObjectAccessException; -import com.usatiuk.dhfs.objects.jrepository.JObjectManager; -import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; +import com.usatiuk.dhfs.objects.TransactionManager; +import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.kleppmanntree.AlreadyExistsException; +import com.usatiuk.objects.common.runtime.JObjectKey; import jakarta.inject.Inject; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.util.Map; -import java.util.Optional; -import java.util.UUID; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; import static org.awaitility.Awaitility.await; @@ -50,54 +45,54 @@ public class DhfsFileServiceSimpleTestImpl { @Inject DhfsFileService fileService; @Inject - JObjectManager jObjectManager; + Transaction curTx; @Inject - JObjectTxManager jObjectTxManager; + TransactionManager jObjectTxManager; - @Test - void readTest() { - var fuuid = UUID.randomUUID(); - { - ChunkData c1 = new ChunkData(ByteString.copyFrom("12345".getBytes())); - ChunkData c2 = new ChunkData(ByteString.copyFrom("678".getBytes())); - ChunkData c3 = new ChunkData(ByteString.copyFrom("91011".getBytes())); - File f = new File(fuuid, 777, false); - f.getChunks().put(0L, c1.getName()); - f.getChunks().put((long) c1.getBytes().size(), c2.getName()); - f.getChunks().put((long) c1.getBytes().size() + c2.getBytes().size(), c3.getName()); - - // FIXME: dhfs_files - - var c1o = new AtomicReference(); - var c2o = new AtomicReference(); - var c3o = new AtomicReference(); - var fo = new AtomicReference(); - - jObjectTxManager.executeTx(() -> { - c1o.set(jObjectManager.put(c1, Optional.of(f.getName())).getMeta().getName()); - c2o.set(jObjectManager.put(c2, Optional.of(f.getName())).getMeta().getName()); - c3o.set(jObjectManager.put(c3, Optional.of(f.getName())).getMeta().getName()); - fo.set(jObjectManager.put(f, Optional.empty()).getMeta().getName()); - }); - - var all = jObjectManager.findAll(); - Assertions.assertTrue(all.contains(c1o.get())); - Assertions.assertTrue(all.contains(c2o.get())); - Assertions.assertTrue(all.contains(c3o.get())); - Assertions.assertTrue(all.contains(fo.get())); - } - - String all = "1234567891011"; - - { - for (int start = 0; start < all.length(); start++) { - for (int end = start; end <= all.length(); end++) { - var read = fileService.read(fuuid.toString(), start, end - start); - Assertions.assertArrayEquals(all.substring(start, end).getBytes(), read.get().toByteArray()); - } - } - } - } +// @Test +// void readTest() { +// var fuuid = UUID.randomUUID(); +// { +// ChunkData c1 = new ChunkData(ByteString.copyFrom("12345".getBytes())); +// ChunkData c2 = new ChunkData(ByteString.copyFrom("678".getBytes())); +// ChunkData c3 = new ChunkData(ByteString.copyFrom("91011".getBytes())); +// File f = new File(fuuid, 777, false); +// f.chunks().put(0L, c1.getName()); +// f.chunks().put((long) c1.getBytes().size(), c2.getName()); +// f.chunks().put((long) c1.getBytes().size() + c2.getBytes().size(), c3.getName()); +// +// // FIXME: dhfs_files +// +// var c1o = new AtomicReference(); +// var c2o = new AtomicReference(); +// var c3o = new AtomicReference(); +// var fo = new AtomicReference(); +// +// jObjectTxManager.executeTx(() -> { +// c1o.set(curTx.put(c1, Optional.of(f.getName())).getMeta().getName()); +// c2o.set(curTx.put(c2, Optional.of(f.getName())).getMeta().getName()); +// c3o.set(curTx.put(c3, Optional.of(f.getName())).getMeta().getName()); +// fo.set(curTx.put(f, Optional.empty()).getMeta().getName()); +// }); +// +// var all = jObjectManager.findAll(); +// Assertions.assertTrue(all.contains(c1o.get())); +// Assertions.assertTrue(all.contains(c2o.get())); +// Assertions.assertTrue(all.contains(c3o.get())); +// Assertions.assertTrue(all.contains(fo.get())); +// } +// +// String all = "1234567891011"; +// +// { +// for (int start = 0; start < all.length(); start++) { +// for (int end = start; end <= all.length(); end++) { +// var read = fileService.read(fuuid.toString(), start, end - start); +// Assertions.assertArrayEquals(all.substring(start, end).getBytes(), read.get().toByteArray()); +// } +// } +// } +// } @Test void dontMkdirTwiceTest() { @@ -213,9 +208,12 @@ public class DhfsFileServiceSimpleTestImpl { fileService.write(uuid2, 0, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29}); Assertions.assertArrayEquals(new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29}, fileService.read(uuid2, 0, 10).get().toByteArray()); - var oldfile = jObjectManager.get(ret2.get()).orElseThrow(IllegalStateException::new); - var chunk = oldfile.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.extractRefs()).stream().toList().get(0); - var chunkObj = jObjectManager.get(chunk).orElseThrow(IllegalStateException::new); + + jObjectTxManager.run(() -> { + var oldfile = curTx.get(File.class, ret2.get()).orElseThrow(IllegalStateException::new); + var chunk = oldfile.chunks().get(0); + var chunkObj = curTx.get(File.class, chunk).orElseThrow(IllegalStateException::new); + }); Assertions.assertTrue(fileService.rename("/moveOverTest1", "/moveOverTest2")); Assertions.assertFalse(fileService.open("/moveOverTest1").isPresent()); @@ -224,14 +222,13 @@ public class DhfsFileServiceSimpleTestImpl { Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(fileService.open("/moveOverTest2").get(), 0, 10).get().toByteArray()); - await().atMost(5, TimeUnit.SECONDS).until(() -> { - try { - return chunkObj.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, - (m, d) -> !m.getReferrers().contains(uuid)); - } catch (DeletedObjectAccessException ignored) { - return true; - } - }); +// await().atMost(5, TimeUnit.SECONDS).until(() -> { +// jObjectTxManager.run(() -> { +// +// return chunkObj.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, +// (m, d) -> !m.getReferrers().contains(uuid)); +// }); +// }); } @Test @@ -270,13 +267,13 @@ public class DhfsFileServiceSimpleTestImpl { fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); - var oldfile = jObjectManager.get(uuid).orElseThrow(IllegalStateException::new); - var chunk = oldfile.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.extractRefs()).stream().toList().get(0); - var chunkObj = jObjectManager.get(chunk).orElseThrow(IllegalStateException::new); - - chunkObj.runReadLockedVoid(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - Assertions.assertTrue(m.getReferrers().contains(uuid)); - }); +// var oldfile = jObjectManager.get(uuid).orElseThrow(IllegalStateException::new); +// var chunk = oldfile.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.extractRefs()).stream().toList().get(0); +// var chunkObj = jObjectManager.get(chunk).orElseThrow(IllegalStateException::new); +// +// chunkObj.runReadLockedVoid(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { +// Assertions.assertTrue(m.getReferrers().contains(uuid)); +// }); Assertions.assertTrue(fileService.rename("/moveTest2", "/movedTest2")); Assertions.assertFalse(fileService.open("/moveTest2").isPresent()); diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/persistence/FileObjectPersistentStoreTest.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/persistence/FileObjectPersistentStoreTest.java deleted file mode 100644 index 16e78c86..00000000 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/persistence/FileObjectPersistentStoreTest.java +++ /dev/null @@ -1,95 +0,0 @@ -package com.usatiuk.dhfs.persistence; - -import com.google.protobuf.ByteString; -import com.usatiuk.dhfs.TempDataProfile; -import com.usatiuk.dhfs.objects.persistence.ChunkDataP; -import com.usatiuk.dhfs.objects.persistence.JObjectDataP; -import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP; -import com.usatiuk.dhfs.objects.repository.persistence.FileObjectPersistentStore; -import io.quarkus.test.junit.QuarkusTest; -import io.quarkus.test.junit.TestProfile; -import jakarta.inject.Inject; -import org.apache.commons.lang3.RandomStringUtils; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -import java.util.Map; -import java.util.concurrent.ThreadLocalRandom; - - -class Profiles { - public static class FileObjectPersistentStoreTestProfile extends TempDataProfile { - @Override - protected void getConfigOverrides(Map ret) { - ret.put("quarkus.log.category.\"com.usatiuk.dhfs\".level", "TRACE"); - ret.put("dhfs.fuse.enabled", "false"); - ret.put("dhfs.objects.ref_verification", "true"); - } - } -} - -@QuarkusTest -@TestProfile(Profiles.FileObjectPersistentStoreTestProfile.class) -public class FileObjectPersistentStoreTest { - @Inject - FileObjectPersistentStore fileObjectPersistentStore; - - @Test - public void writeReadFullObject() { - String name = "writeReadFullObjectSmallMeta"; - - var bytes = new byte[100000]; - ThreadLocalRandom.current().nextBytes(bytes); - - ObjectMetadataP meta = ObjectMetadataP.newBuilder().setName("verycoolname123456789").build(); - JObjectDataP data = JObjectDataP.newBuilder().setChunkData(ChunkDataP.newBuilder().setData(ByteString.copyFrom(bytes)).build()).build(); - - fileObjectPersistentStore.writeObjectDirect(name, meta, data); - var readMeta = fileObjectPersistentStore.readObjectMeta(name); - var readData = fileObjectPersistentStore.readObject(name); - Assertions.assertEquals(meta, readMeta); - Assertions.assertEquals(data, readData); - - var bigString = RandomStringUtils.random(100000); - - var newMeta = ObjectMetadataP.newBuilder().setName(String.valueOf(bigString)).build(); - fileObjectPersistentStore.writeObjectMetaDirect(name, newMeta); - readMeta = fileObjectPersistentStore.readObjectMeta(name); - readData = fileObjectPersistentStore.readObject(name); - Assertions.assertEquals(newMeta, readMeta); - Assertions.assertEquals(data, readData); - - fileObjectPersistentStore.writeObjectDirect(name, newMeta, null); - readMeta = fileObjectPersistentStore.readObjectMeta(name); - Assertions.assertEquals(newMeta, readMeta); - Assertions.assertThrows(Throwable.class, () -> fileObjectPersistentStore.readObject(name)); - - fileObjectPersistentStore.writeObjectMetaDirect(name, meta); - readMeta = fileObjectPersistentStore.readObjectMeta(name); - Assertions.assertEquals(meta, readMeta); - Assertions.assertThrows(Throwable.class, () -> fileObjectPersistentStore.readObject(name)); - - fileObjectPersistentStore.writeObjectDirect(name, newMeta, null); - readMeta = fileObjectPersistentStore.readObjectMeta(name); - Assertions.assertEquals(newMeta, readMeta); - Assertions.assertThrows(Throwable.class, () -> fileObjectPersistentStore.readObject(name)); - - fileObjectPersistentStore.writeObjectDirect(name, newMeta, data); - readMeta = fileObjectPersistentStore.readObjectMeta(name); - readData = fileObjectPersistentStore.readObject(name); - Assertions.assertEquals(newMeta, readMeta); - Assertions.assertEquals(data, readData); - - fileObjectPersistentStore.writeObjectMetaDirect(name, meta); - readMeta = fileObjectPersistentStore.readObjectMeta(name); - readData = fileObjectPersistentStore.readObject(name); - Assertions.assertEquals(meta, readMeta); - Assertions.assertEquals(data, readData); - - fileObjectPersistentStore.writeObjectMetaDirect(name, newMeta); - readMeta = fileObjectPersistentStore.readObjectMeta(name); - readData = fileObjectPersistentStore.readObject(name); - Assertions.assertEquals(newMeta, readMeta); - Assertions.assertEquals(data, readData); - } -} diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/persistence/ProtoSerializationTest.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/persistence/ProtoSerializationTest.java deleted file mode 100644 index fd6f10e7..00000000 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/persistence/ProtoSerializationTest.java +++ /dev/null @@ -1,24 +0,0 @@ -package com.usatiuk.dhfs.persistence; - -import io.quarkus.test.junit.QuarkusTest; - -@QuarkusTest -public class ProtoSerializationTest { - -// @Inject -// ProtoSerializerService protoSerializerService; -// -// @Test -// void SerializeDeserializePeerDirectory() { -// var pd = new PeerDirectory(); -// pd.getPeers().add(UUID.randomUUID()); -// var ser = JObjectDataP.newBuilder().setPeerDirectory((PeerDirectoryP) protoSerializerService.serialize(pd)).build(); -// var deser = (PeerDirectory) protoSerializerService.deserialize(ser); -// Assertions.assertIterableEquals(pd.getPeers(), deser.getPeers()); -// -// var ser2 = protoSerializerService.serializeToJObjectDataP(pd); -// var deser2 = (PeerDirectory) protoSerializerService.deserialize(ser2); -// Assertions.assertIterableEquals(pd.getPeers(), deser2.getPeers()); -// } -// -} From c4ce15b196778359cabf9b247e9332ee171f4f73 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Wed, 1 Jan 2025 17:01:45 +0100 Subject: [PATCH 028/105] tests fix check data instance, so it fails if it's changed --- .../usatiuk/dhfs/objects/JObjectManager.java | 22 ++++++++++++++----- .../com/usatiuk/dhfs/objects/ObjectsTest.java | 2 ++ 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 05192f14..406f06e1 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -186,6 +186,7 @@ public class JObjectManager { var current = new LinkedHashMap>(); var dependenciesLocked = new LinkedHashMap>(); + Map> reads; var toUnlock = new ArrayList(); Consumer addDependency = @@ -220,8 +221,6 @@ public class JObjectManager { try { Collection> drained; while (!(drained = tx.drainNewWrites()).isEmpty()) { - var toLock = new ArrayList(); - Log.trace("Commit iteration with " + drained.size() + " records"); drained.stream() @@ -249,7 +248,8 @@ public class JObjectManager { } } - for (var read : tx.reads().entrySet()) { + reads = tx.reads(); + for (var read : reads.entrySet()) { addDependency.accept(read.getKey()); if (read.getValue() instanceof TransactionObjectLocked locked) { toUnlock.add(locked.lock); @@ -257,13 +257,23 @@ public class JObjectManager { } for (var dep : dependenciesLocked.entrySet()) { - Log.trace("Checking dependency " + dep.getKey()); - - if (dep.getValue().data().isEmpty()) continue; + if (dep.getValue().data().isEmpty()) { + Log.trace("Checking dependency " + dep.getKey() + " - not found"); + continue; + } if (dep.getValue().data().get().version() >= tx.getId()) { + Log.trace("Checking dependency " + dep.getKey() + " - newer than"); throw new IllegalStateException("Serialization hazard: " + dep.getValue().data().get().version() + " vs " + tx.getId()); } + + var read = reads.get(dep.getKey()); + if (read != null && read.data().orElse(null) != dep.getValue().data().orElse(null)) { + Log.trace("Checking dependency " + dep.getKey() + " - read mismatch"); + throw new IllegalStateException("Read mismatch for " + dep.getKey() + ": " + read + " vs " + dep.getValue()); + } + + Log.trace("Checking dependency " + dep.getKey() + " - ok"); } Log.tracef("Flushing transaction %d to storage", tx.getId()); diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index d6f93676..2298d491 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -158,6 +158,7 @@ public class ObjectsTest { Log.warn("Thread 1"); txm.begin(); barrier.await(); + var got = curTx.get(Parent.class, new JObjectKey("Parent2")).orElse(null); var newParent = new Parent(JObjectKey.of("Parent2"), "John"); curTx.put(newParent); Log.warn("Thread 1 commit"); @@ -173,6 +174,7 @@ public class ObjectsTest { Log.warn("Thread 2"); txm.begin(); barrier.await(); + var got = curTx.get(Parent.class, new JObjectKey("Parent2")).orElse(null); var newParent = new Parent(JObjectKey.of("Parent2"), "John2"); curTx.put(newParent); Log.warn("Thread 2 commit"); From e0fbe80636b85b14daf23aeb023bf6ca211dc37a Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Wed, 1 Jan 2025 17:05:04 +0100 Subject: [PATCH 029/105] disable parallel tests for now --- dhfs-parent/server/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dhfs-parent/server/pom.xml b/dhfs-parent/server/pom.xml index bb74c72a..2c1003b6 100644 --- a/dhfs-parent/server/pom.xml +++ b/dhfs-parent/server/pom.xml @@ -175,7 +175,7 @@ - true + false concurrent From 18a133abdc5f0f143f01168b9e7677b8476e1bdb Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Wed, 1 Jan 2025 19:19:15 +0100 Subject: [PATCH 030/105] more sensible hook order --- .../usatiuk/dhfs/objects/JObjectManager.java | 53 +++++++++++-------- 1 file changed, 30 insertions(+), 23 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 406f06e1..69a556a7 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -220,34 +220,41 @@ public class JObjectManager { // TODO: check deletions, inserts try { Collection> drained; - while (!(drained = tx.drainNewWrites()).isEmpty()) { - Log.trace("Commit iteration with " + drained.size() + " records"); + { + boolean somethingChanged; + do { + somethingChanged = false; + for (var hook : _preCommitTxHooks) { + drained = tx.drainNewWrites(); + Log.trace("Commit iteration with " + drained.size() + " records for hook " + hook.getClass()); - drained.stream() - .map(TxRecord.TxObjectRecord::key) - .sorted(Comparator.comparing(JObjectKey::toString)) - .forEach(addDependency); + drained.stream() + .map(TxRecord.TxObjectRecord::key) + .sorted(Comparator.comparing(JObjectKey::toString)) + .forEach(addDependency); - for (var hook : _preCommitTxHooks) { - for (var entry : drained) { - Log.trace("Running pre-commit hook " + hook.getClass() + " for" + entry.toString()); - var oldObj = getCurrent.apply(entry.key()); - var curObj = tx.get(JData.class, entry.key()).orElse(null); - - assert (curObj == null) == (entry instanceof TxRecord.TxObjectRecordDeleted); - - if (curObj == null) { - hook.onDelete(entry.key(), oldObj); - } else if (oldObj == null) { - hook.onCreate(entry.key(), curObj); - } else { - hook.onChange(entry.key(), oldObj, curObj); + for (var entry : drained) { + somethingChanged = true; + Log.trace("Running pre-commit hook " + hook.getClass() + " for" + entry.toString()); + var oldObj = getCurrent.apply(entry.key()); + switch (entry) { + case TxRecord.TxObjectRecordWrite write -> { + if (oldObj == null) { + hook.onCreate(write.key(), write.data()); + } else { + hook.onChange(write.key(), oldObj, write.data()); + } + } + case TxRecord.TxObjectRecordDeleted deleted -> { + hook.onDelete(deleted.key(), oldObj); + } + default -> throw new IllegalStateException("Unexpected value: " + entry); + } + current.put(entry.key(), entry); } - current.put(entry.key(), entry); } - } + } while (somethingChanged); } - reads = tx.reads(); for (var read : reads.entrySet()) { addDependency.accept(read.getKey()); From 57ea21a3b2e893ccefe8390bf870c4854b10ea6d Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Wed, 1 Jan 2025 20:41:36 +0100 Subject: [PATCH 031/105] fix some file bugs --- .../files/service/DhfsFileServiceImpl.java | 26 ++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java index 5d1bfa8e..42336a25 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java @@ -365,8 +365,10 @@ public class DhfsFileServiceImpl implements DhfsFileService { + offset + " " + data.size()); } - if (size(fileUuid) < offset) + if (size(fileUuid) < offset) { truncate(fileUuid, offset); + file = curTx.get(File.class, fileUuid).orElse(null); + } // FIXME: Some kind of immutable interface? var chunksAll = Collections.unmodifiableNavigableMap(file.chunks()); @@ -491,7 +493,16 @@ public class DhfsFileServiceImpl implements DhfsFileService { } } - file = file.toBuilder().chunks(newChunks).mTime(System.currentTimeMillis()).build(); + NavigableMap realNewChunks = new TreeMap<>(); + for (var chunk : chunksAll.entrySet()) { + if (!removedChunks.containsKey(chunk.getKey())) { + realNewChunks.put(chunk.getKey(), chunk.getValue()); + } + } + + realNewChunks.putAll(newChunks); + + file = file.toBuilder().chunks(Collections.unmodifiableNavigableMap(realNewChunks)).mTime(System.currentTimeMillis()).build(); curTx.put(file); cleanupChunks(file, removedChunks.values()); updateFileSize(file); @@ -576,7 +587,16 @@ public class DhfsFileServiceImpl implements DhfsFileService { newChunks.put(tail.getKey(), newChunkData.key()); } - file = file.toBuilder().chunks(newChunks).mTime(System.currentTimeMillis()).build(); + NavigableMap realNewChunks = new TreeMap<>(); + for (var chunk : chunksAll.entrySet()) { + if (!removedChunks.containsKey(chunk.getKey())) { + realNewChunks.put(chunk.getKey(), chunk.getValue()); + } + } + + realNewChunks.putAll(newChunks); + + file = file.toBuilder().chunks(Collections.unmodifiableNavigableMap(realNewChunks)).mTime(System.currentTimeMillis()).build(); curTx.put(file); cleanupChunks(file, removedChunks.values()); updateFileSize(file); From f5ceb2361522ec58e7adf9318f77e7b53eb42764 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Wed, 1 Jan 2025 20:50:16 +0100 Subject: [PATCH 032/105] pass file tests --- .../java/com/usatiuk/kleppmanntree/KleppmannTree.java | 8 ++++++-- .../java/com/usatiuk/dhfs/objects/JObjectManager.java | 2 +- .../dhfs/files/DhfsFileServiceSimpleTestImpl.java | 9 +++------ 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java index 2e2cc27c..e92a1fa2 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java @@ -51,7 +51,6 @@ public class KleppmannTree, PeerIdT ex private void undoEffect(LogEffect effect) { if (effect.oldInfo() != null) { var node = _storage.getById(effect.childId()); - var oldParent = _storage.getById(effect.oldInfo().oldParent()); var curParent = _storage.getById(effect.newParentId()); { var newCurParentChildren = new HashMap<>(curParent.children()); @@ -62,6 +61,9 @@ public class KleppmannTree, PeerIdT ex if (!node.meta().getClass().equals(effect.oldInfo().oldMeta().getClass())) throw new IllegalArgumentException("Class mismatch for meta for node " + node.key()); + + // Needs to be read after changing curParent, as it might be the same node + var oldParent = _storage.getById(effect.oldInfo().oldParent()); { var newOldParentChildren = new HashMap<>(oldParent.children()); newOldParentChildren.put(node.meta().getName(), node.key()); @@ -296,7 +298,6 @@ public class KleppmannTree, PeerIdT ex TreeNode newParentNode; TreeNode node; - newParentNode = _storage.getById(effect.newParentId()); if (effect.oldInfo() != null) { oldParentNode = _storage.getById(effect.oldInfo().oldParent()); } @@ -312,6 +313,9 @@ public class KleppmannTree, PeerIdT ex _storage.putNode(oldParentNode); } + // Needs to be read after changing oldParentNode, as it might be the same node + newParentNode = _storage.getById(effect.newParentId()); + { var newNewParentChildren = new HashMap<>(newParentNode.children()); newNewParentChildren.put(effect.newMeta().getName(), effect.childId()); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 69a556a7..e005b9e1 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -75,7 +75,7 @@ public class JObjectManager { } else if (type.isInstance(ref.data())) { return (JDataVersionedWrapper) ref; } else { - throw new IllegalArgumentException("Object type mismatch: " + ref.getClass() + " vs " + type); + throw new IllegalArgumentException("Object type mismatch: " + ref.data().getClass() + " vs " + type); } } } diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java index 796f3d36..381fca46 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java @@ -1,20 +1,17 @@ package com.usatiuk.dhfs.files; import com.usatiuk.dhfs.TempDataProfile; +import com.usatiuk.dhfs.files.objects.ChunkData; import com.usatiuk.dhfs.files.objects.File; import com.usatiuk.dhfs.files.service.DhfsFileService; import com.usatiuk.dhfs.objects.TransactionManager; import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.kleppmanntree.AlreadyExistsException; -import com.usatiuk.objects.common.runtime.JObjectKey; import jakarta.inject.Inject; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.util.Map; -import java.util.concurrent.TimeUnit; - -import static org.awaitility.Awaitility.await; class Profiles { public static class DhfsFileServiceSimpleTestProfile extends TempDataProfile { @@ -211,8 +208,8 @@ public class DhfsFileServiceSimpleTestImpl { jObjectTxManager.run(() -> { var oldfile = curTx.get(File.class, ret2.get()).orElseThrow(IllegalStateException::new); - var chunk = oldfile.chunks().get(0); - var chunkObj = curTx.get(File.class, chunk).orElseThrow(IllegalStateException::new); + var chunk = oldfile.chunks().get(0L); + var chunkObj = curTx.get(ChunkData.class, chunk).orElseThrow(IllegalStateException::new); }); Assertions.assertTrue(fileService.rename("/moveOverTest1", "/moveOverTest2")); From 23f5d60c612435e5d9bea4f1c8bda10c039d9cab Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Wed, 1 Jan 2025 21:01:39 +0100 Subject: [PATCH 033/105] some locking fixes --- .../main/java/com/usatiuk/dhfs/objects/JObjectManager.java | 6 ++++++ .../com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java | 3 ++- .../dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java | 3 ++- .../src/main/java/com/usatiuk/dhfs/utils/DataLocker.java | 4 +++- 4 files changed, 13 insertions(+), 3 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index e005b9e1..bc357b0a 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -324,5 +324,11 @@ public class JObjectManager { } public void rollback(TransactionPrivate tx) { + Log.trace("Rolling back transaction " + tx.getId()); + tx.reads().forEach((key, value) -> { + if (value instanceof TransactionObjectLocked locked) { + locked.lock.close(); + } + }); } } \ No newline at end of file diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java index 42336a25..a169453e 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java @@ -10,6 +10,7 @@ import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaDirectory; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile; +import com.usatiuk.dhfs.objects.transaction.LockingStrategy; import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; import com.usatiuk.objects.common.runtime.JData; @@ -354,7 +355,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset)); // FIXME: - var file = curTx.get(File.class, fileUuid).orElse(null); + var file = curTx.get(File.class, fileUuid, LockingStrategy.WRITE).orElse(null); if (file == null) { Log.error("File not found when trying to write: " + fileUuid); return -1L; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java index c3cf95ee..35a74978 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java @@ -5,6 +5,7 @@ import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaDirectory; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreePersistentData; +import com.usatiuk.dhfs.objects.transaction.LockingStrategy; import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.kleppmanntree.*; import com.usatiuk.objects.common.runtime.JObjectKey; @@ -32,7 +33,7 @@ public class JKleppmannTreeManager { public JKleppmannTree getTree(JObjectKey name) { return txManager.executeTx(() -> { - var data = curTx.get(JKleppmannTreePersistentData.class, name).orElse(null); + var data = curTx.get(JKleppmannTreePersistentData.class, name, LockingStrategy.WRITE).orElse(null); if (data == null) { data = new JKleppmannTreePersistentData( name, diff --git a/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java index 35e882da..8a8fb89f 100644 --- a/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java @@ -32,7 +32,9 @@ public class DataLocker { public void close() { synchronized (_tag) { _tag.released = true; - _tag.notify(); + // Notify all because when the object is locked again, + // it's a different lock tag + _tag.notifyAll(); _locks.remove(_key, _tag); } } From 7b71d405e1500ba2bd02b307fefb4fbe32512002 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Thu, 2 Jan 2025 10:40:42 +0100 Subject: [PATCH 034/105] writeback --- .../usatiuk/objects/common/runtime/JData.java | 4 + .../usatiuk/dhfs/objects/JObjectManager.java | 37 +- .../com/usatiuk/dhfs/objects/TxBundle.java | 5 +- .../usatiuk/dhfs/objects/TxWritebackImpl.java | 785 +++++++++--------- .../FileObjectPersistentStore.java | 8 +- .../MemoryObjectPersistentStore.java | 4 +- .../dhfs/objects/persistence/TxManifest.java | 7 +- .../usatiuk/dhfs/files/objects/ChunkData.java | 6 +- 8 files changed, 396 insertions(+), 460 deletions(-) diff --git a/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JData.java b/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JData.java index 76bd14f0..b15418fe 100644 --- a/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JData.java +++ b/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JData.java @@ -9,4 +9,8 @@ import java.io.Serializable; // It is immutable, its version is filled in by the allocator from the AllocVersionProvider public interface JData extends Serializable { JObjectKey key(); + + default int estimateSize() { + return 100; + } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index bc357b0a..7bffb427 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -1,7 +1,6 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.persistence.ObjectPersistentStore; -import com.usatiuk.dhfs.objects.persistence.TxManifest; import com.usatiuk.dhfs.objects.transaction.*; import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; import com.usatiuk.dhfs.utils.DataLocker; @@ -12,7 +11,6 @@ import jakarta.enterprise.context.ApplicationScoped; import jakarta.enterprise.inject.Instance; import jakarta.inject.Inject; -import java.io.Serializable; import java.lang.ref.Cleaner; import java.lang.ref.WeakReference; import java.util.*; @@ -33,6 +31,8 @@ public class JObjectManager { ObjectSerializer objectSerializer; @Inject TransactionFactory transactionFactory; + @Inject + TxWriteback txWriteback; private final List _preCommitTxHooks; @@ -160,27 +160,6 @@ public class JObjectManager { return transactionFactory.createTransaction(counter, new TransactionObjectSourceImpl(counter)); } - // FIXME: - private static class SimpleTxManifest implements Serializable, TxManifest { - private final List _written; - private final List _deleted; - - public SimpleTxManifest(List written, List deleted) { - _written = written; - _deleted = deleted; - } - - @Override - public List getWritten() { - return _written; - } - - @Override - public List getDeleted() { - return _deleted; - } - } - public void commit(TransactionPrivate tx) { Log.trace("Committing transaction " + tx.getId()); @@ -285,22 +264,19 @@ public class JObjectManager { Log.tracef("Flushing transaction %d to storage", tx.getId()); - var toDelete = new ArrayList(); - var toWrite = new ArrayList(); + var bundle = txWriteback.createBundle(); for (var action : current.entrySet()) { switch (action.getValue()) { case TxRecord.TxObjectRecordWrite write -> { Log.trace("Flushing object " + action.getKey()); - toWrite.add(action.getKey()); var wrapped = new JDataVersionedWrapper<>(write.data(), tx.getId()); - var data = objectSerializer.serialize(wrapped); - objectStorage.writeObject(action.getKey(), data); + bundle.commit(wrapped); _objects.put(action.getKey(), new JDataWrapper<>(wrapped)); } case TxRecord.TxObjectRecordDeleted deleted -> { Log.trace("Deleting object " + action.getKey()); - toDelete.add(action.getKey()); + bundle.delete(action.getKey()); _objects.remove(action.getKey()); } default -> { @@ -310,8 +286,7 @@ public class JObjectManager { } Log.tracef("Committing transaction %d to storage", tx.getId()); - - objectStorage.commitTx(new SimpleTxManifest(toWrite, toDelete)); + txWriteback.commitBundle(bundle); } catch ( Throwable t) { Log.error("Error when committing transaction", t); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java index 1cd63774..8c35a36b 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java @@ -1,11 +1,12 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.objects.common.runtime.JObjectKey; public interface TxBundle { long getId(); - void commit(JData obj); + void commit(JDataVersionedWrapper obj); - void delete(JData obj); + void delete(JObjectKey obj); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java index 92705e43..ab85f53e 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java @@ -1,415 +1,370 @@ -//package com.usatiuk.dhfs.objects; -// -//import com.usatiuk.dhfs.objects.persistence.ObjectPersistentStore; -//import com.usatiuk.dhfs.utils.VoidFn; -//import io.quarkus.logging.Log; -//import io.quarkus.runtime.ShutdownEvent; -//import io.quarkus.runtime.StartupEvent; -//import jakarta.annotation.Priority; -//import jakarta.enterprise.context.ApplicationScoped; -//import jakarta.enterprise.event.Observes; -//import jakarta.inject.Inject; -//import lombok.Getter; -//import org.apache.commons.lang3.concurrent.BasicThreadFactory; -//import org.eclipse.microprofile.config.inject.ConfigProperty; -// -//import java.util.*; -//import java.util.concurrent.ConcurrentLinkedQueue; -//import java.util.concurrent.CountDownLatch; -//import java.util.concurrent.ExecutorService; -//import java.util.concurrent.Executors; -//import java.util.concurrent.atomic.AtomicLong; -//import java.util.stream.Collectors; -//import java.util.stream.Stream; -// -//@ApplicationScoped -//public class TxWritebackImpl implements TxWriteback { -// private final LinkedList _pendingBundles = new LinkedList<>(); -// private final LinkedHashMap _notFlushedBundles = new LinkedHashMap<>(); -// -// private final Object _flushWaitSynchronizer = new Object(); -// private final AtomicLong _lastWrittenTx = new AtomicLong(-1); -// private final AtomicLong _counter = new AtomicLong(); -// private final AtomicLong _waitedTotal = new AtomicLong(0); -// @Inject -// ObjectPersistentStore objectPersistentStore; -// @ConfigProperty(name = "dhfs.objects.writeback.limit") -// long sizeLimit; -// private long currentSize = 0; -// private ExecutorService _writebackExecutor; -// private ExecutorService _commitExecutor; -// private ExecutorService _statusExecutor; -// private volatile boolean _ready = false; -// -// void init(@Observes @Priority(110) StartupEvent event) { -// { -// BasicThreadFactory factory = new BasicThreadFactory.Builder() -// .namingPattern("tx-writeback-%d") -// .build(); -// -// _writebackExecutor = Executors.newSingleThreadExecutor(factory); -// _writebackExecutor.submit(this::writeback); -// } -// -// { -// BasicThreadFactory factory = new BasicThreadFactory.Builder() -// .namingPattern("writeback-commit-%d") -// .build(); -// -// _commitExecutor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(), factory); -// } -// _statusExecutor = Executors.newSingleThreadExecutor(); -// _statusExecutor.submit(() -> { -// try { -// while (true) { -// Thread.sleep(1000); -// if (currentSize > 0) -// Log.info("Tx commit status: size=" -// + currentSize / 1024 / 1024 + "MB"); -// } -// } catch (InterruptedException ignored) { -// } -// }); -// _ready = true; -// } -// -// void shutdown(@Observes @Priority(890) ShutdownEvent event) throws InterruptedException { -// Log.info("Waiting for all transactions to drain"); -// -// synchronized (_flushWaitSynchronizer) { -// _ready = false; -// while (currentSize > 0) { -// _flushWaitSynchronizer.wait(); -// } -// } -// -// _writebackExecutor.shutdownNow(); -// Log.info("Total tx bundle wait time: " + _waitedTotal.get() + "ms"); -// } -// -// private void verifyReady() { -// if (!_ready) throw new IllegalStateException("Not doing transactions while shutting down!"); -// } -// -// private void writeback() { -// while (!Thread.interrupted()) { -// try { -// TxBundle bundle = new TxBundle(0); -// synchronized (_pendingBundles) { -// while (_pendingBundles.isEmpty() || !_pendingBundles.peek()._ready) -// _pendingBundles.wait(); -// -// long diff = 0; -// while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { -// var toCompress = _pendingBundles.poll(); -// diff -= toCompress.calculateTotalSize(); -// bundle.compress(toCompress); -// } -// diff += bundle.calculateTotalSize(); -// synchronized (_flushWaitSynchronizer) { -// currentSize += diff; -// } -// } -// -// var latch = new CountDownLatch(bundle._committed.size() + bundle._meta.size()); -// ConcurrentLinkedQueue errors = new ConcurrentLinkedQueue<>(); -// -// for (var c : bundle._committed.values()) { -// _commitExecutor.execute(() -> { -// try { -// Log.trace("Writing new " + c.newMeta.getName()); -// objectPersistentStore.writeNewObject(c.newMeta.getName(), c.newMeta, c.newData); -// } catch (Throwable t) { -// Log.error("Error writing " + c.newMeta.getName(), t); -// errors.add(t); -// } finally { -// latch.countDown(); -// } -// }); -// } -// for (var c : bundle._meta.values()) { -// _commitExecutor.execute(() -> { -// try { -// Log.trace("Writing (meta) " + c.newMeta.getName()); -// objectPersistentStore.writeNewObjectMeta(c.newMeta.getName(), c.newMeta); -// } catch (Throwable t) { -// Log.error("Error writing " + c.newMeta.getName(), t); -// errors.add(t); -// } finally { -// latch.countDown(); -// } -// }); -// } -// if (Log.isDebugEnabled()) -// for (var d : bundle._deleted.keySet()) -// Log.debug("Deleting from persistent storage " + d.getMeta().getName()); // FIXME: For tests -// -// latch.await(); -// if (!errors.isEmpty()) { -// throw new RuntimeException("Errors in writeback!"); -// } -// objectPersistentStore.commitTx( -// new TxManifest( -// Stream.concat(bundle._committed.keySet().stream().map(t -> t.getMeta().getName()), -// bundle._meta.keySet().stream().map(t -> t.getMeta().getName())).collect(Collectors.toCollection(ArrayList::new)), -// bundle._deleted.keySet().stream().map(t -> t.getMeta().getName()).collect(Collectors.toCollection(ArrayList::new)) -// )); -// Log.trace("Bundle " + bundle.getId() + " committed"); -// -// -// List> callbacks = new ArrayList<>(); -// synchronized (_notFlushedBundles) { -// _lastWrittenTx.set(bundle.getId()); -// while (!_notFlushedBundles.isEmpty() && _notFlushedBundles.firstEntry().getKey() <= bundle.getId()) { -// callbacks.add(_notFlushedBundles.pollFirstEntry().getValue().setCommitted()); -// } -// } -// callbacks.forEach(l -> l.forEach(VoidFn::apply)); -// -// synchronized (_flushWaitSynchronizer) { -// currentSize -= ((TxBundle) bundle).calculateTotalSize(); -// // FIXME: -// if (currentSize <= sizeLimit || !_ready) -// _flushWaitSynchronizer.notifyAll(); -// } -// } catch (InterruptedException ignored) { -// } catch (Exception e) { -// Log.error("Uncaught exception in writeback", e); -// } catch (Throwable o) { -// Log.error("Uncaught THROWABLE in writeback", o); -// } -// } -// Log.info("Writeback thread exiting"); -// } -// -// @Override -// public com.usatiuk.dhfs.objects.jrepository.TxBundle createBundle() { -// verifyReady(); -// boolean wait = false; -// while (true) { -// if (wait) { -// synchronized (_flushWaitSynchronizer) { -// long started = System.currentTimeMillis(); -// while (currentSize > sizeLimit) { -// try { -// _flushWaitSynchronizer.wait(); -// } catch (InterruptedException e) { -// throw new RuntimeException(e); -// } -// } -// long waited = System.currentTimeMillis() - started; -// _waitedTotal.addAndGet(waited); -// if (Log.isTraceEnabled()) -// Log.trace("Thread " + Thread.currentThread().getName() + " waited for tx bundle for " + waited + " ms"); -// wait = false; -// } -// } -// synchronized (_pendingBundles) { -// synchronized (_flushWaitSynchronizer) { -// if (currentSize > sizeLimit) { -// if (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { -// var target = _pendingBundles.poll(); -// -// long diff = -target.calculateTotalSize(); -// while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { -// var toCompress = _pendingBundles.poll(); -// diff -= toCompress.calculateTotalSize(); -// target.compress(toCompress); -// } -// diff += target.calculateTotalSize(); -// currentSize += diff; -// _pendingBundles.addFirst(target); -// } -// } -// -// if (currentSize > sizeLimit) { -// wait = true; -// continue; -// } -// } -// synchronized (_notFlushedBundles) { -// var bundle = new TxBundle(_counter.incrementAndGet()); -// _pendingBundles.addLast(bundle); -// _notFlushedBundles.put(bundle.getId(), bundle); -// return bundle; -// } -// } -// } -// } -// -// @Override -// public void commitBundle(com.usatiuk.dhfs.objects.TxBundle bundle) { -// verifyReady(); -// synchronized (_pendingBundles) { -// ((TxBundle) bundle).setReady(); -// if (_pendingBundles.peek() == bundle) -// _pendingBundles.notify(); -// synchronized (_flushWaitSynchronizer) { -// currentSize += ((TxBundle) bundle).calculateTotalSize(); -// } -// } -// } -// -// @Override -// public void dropBundle(com.usatiuk.dhfs.objects.TxBundle bundle) { -// verifyReady(); -// synchronized (_pendingBundles) { -// Log.warn("Dropped bundle: " + bundle); -// _pendingBundles.remove((TxBundle) bundle); -// synchronized (_flushWaitSynchronizer) { -// currentSize -= ((TxBundle) bundle).calculateTotalSize(); -// } -// } -// } -// -// @Override -// public void fence(long bundleId) { -// var latch = new CountDownLatch(1); -// asyncFence(bundleId, latch::countDown); -// try { -// latch.await(); -// } catch (InterruptedException e) { -// throw new RuntimeException(e); -// } -// } -// -// @Override -// public void asyncFence(long bundleId, VoidFn fn) { -// verifyReady(); -// if (bundleId < 0) throw new IllegalArgumentException("txId should be >0!"); -// if (_lastWrittenTx.get() >= bundleId) { -// fn.apply(); -// return; -// } -// synchronized (_notFlushedBundles) { -// if (_lastWrittenTx.get() >= bundleId) { -// fn.apply(); -// return; -// } -// _notFlushedBundles.get(bundleId).addCallback(fn); -// } -// } -// -// @Getter -// private static class TxManifest implements com.usatiuk.dhfs.objects.repository.persistence.TxManifest { -// private final ArrayList _written; -// private final ArrayList _deleted; -// -// private TxManifest(ArrayList written, ArrayList deleted) { -// _written = written; -// _deleted = deleted; -// } -// } -// -// private class TxBundle implements com.usatiuk.dhfs.objects.jrepository.TxBundle { -// private final HashMap, CommittedEntry> _committed = new HashMap<>(); -// private final HashMap, CommittedMeta> _meta = new HashMap<>(); -// private final HashMap, Integer> _deleted = new HashMap<>(); -// private final ArrayList _callbacks = new ArrayList<>(); -// private long _txId; -// @Getter -// private volatile boolean _ready = false; -// private long _size = -1; -// private boolean _wasCommitted = false; -// -// private TxBundle(long txId) {_txId = txId;} -// -// @Override -// public long getId() { -// return _txId; -// } -// -// public void setReady() { -// _ready = true; -// } -// -// public void addCallback(VoidFn callback) { -// synchronized (_callbacks) { -// if (_wasCommitted) throw new IllegalStateException(); -// _callbacks.add(callback); -// } -// } -// -// public List setCommitted() { -// synchronized (_callbacks) { -// _wasCommitted = true; -// return Collections.unmodifiableList(_callbacks); -// } -// } -// -// @Override -// public void commit(JObject obj, ObjectMetadataP meta, JObjectDataP data) { -// synchronized (_committed) { -// _committed.put(obj, new CommittedEntry(meta, data, obj.estimateSize())); -// } -// } -// -// @Override -// public void commitMetaChange(JObject obj, ObjectMetadataP meta) { -// synchronized (_meta) { -// _meta.put(obj, new CommittedMeta(meta, obj.estimateSize())); -// } -// } -// -// @Override -// public void delete(JObject obj) { -// synchronized (_deleted) { -// _deleted.put(obj, obj.estimateSize()); -// } -// } -// -// -// public long calculateTotalSize() { -// if (_size >= 0) return _size; -// long out = 0; -// for (var c : _committed.values()) -// out += c.size; -// for (var c : _meta.values()) -// out += c.size; -// for (var c : _deleted.entrySet()) -// out += c.getValue(); -// _size = out; -// return _size; -// } -// -// public void compress(TxBundle other) { -// if (_txId >= other._txId) -// throw new IllegalArgumentException("Compressing an older bundle into newer"); -// -// _txId = other._txId; -// _size = -1; -// -// for (var d : other._deleted.entrySet()) { -// _committed.remove(d.getKey()); -// _meta.remove(d.getKey()); -// _deleted.put(d.getKey(), d.getValue()); -// } -// -// for (var c : other._committed.entrySet()) { -// _committed.put(c.getKey(), c.getValue()); -// _meta.remove(c.getKey()); -// _deleted.remove(c.getKey()); -// } -// -// for (var m : other._meta.entrySet()) { -// var deleted = _deleted.remove(m.getKey()); -// if (deleted != null) { -// _committed.put(m.getKey(), new CommittedEntry(m.getValue().newMeta, null, m.getKey().estimateSize())); -// continue; -// } -// var committed = _committed.remove(m.getKey()); -// if (committed != null) { -// _committed.put(m.getKey(), new CommittedEntry(m.getValue().newMeta, committed.newData, m.getKey().estimateSize())); -// continue; -// } -// _meta.put(m.getKey(), m.getValue()); -// } -// } -// -// private record CommittedEntry(ObjectMetadataP newMeta, JObjectDataP newData, int size) {} -// -// private record CommittedMeta(ObjectMetadataP newMeta, int size) {} -// -// private record Deleted(JObject handle) {} -// } -//} +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.ObjectPersistentStore; +import com.usatiuk.dhfs.objects.persistence.TxManifest; +import com.usatiuk.dhfs.utils.VoidFn; +import com.usatiuk.objects.common.runtime.JObjectKey; +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import lombok.Getter; +import org.apache.commons.lang3.concurrent.BasicThreadFactory; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.util.*; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicLong; + +@ApplicationScoped +public class TxWritebackImpl implements TxWriteback { + private final LinkedList _pendingBundles = new LinkedList<>(); + private final LinkedHashMap _notFlushedBundles = new LinkedHashMap<>(); + + private final Object _flushWaitSynchronizer = new Object(); + private final AtomicLong _lastWrittenTx = new AtomicLong(-1); + private final AtomicLong _counter = new AtomicLong(); + private final AtomicLong _waitedTotal = new AtomicLong(0); + @Inject + ObjectPersistentStore objectPersistentStore; + @Inject + ObjectSerializer objectSerializer; + @ConfigProperty(name = "dhfs.objects.writeback.limit") + long sizeLimit; + private long currentSize = 0; + private ExecutorService _writebackExecutor; + private ExecutorService _commitExecutor; + private ExecutorService _statusExecutor; + private volatile boolean _ready = false; + + void init(@Observes @Priority(110) StartupEvent event) { + { + BasicThreadFactory factory = new BasicThreadFactory.Builder() + .namingPattern("tx-writeback-%d") + .build(); + + _writebackExecutor = Executors.newSingleThreadExecutor(factory); + _writebackExecutor.submit(this::writeback); + } + + { + BasicThreadFactory factory = new BasicThreadFactory.Builder() + .namingPattern("writeback-commit-%d") + .build(); + + _commitExecutor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(), factory); + } + _statusExecutor = Executors.newSingleThreadExecutor(); + _statusExecutor.submit(() -> { + try { + while (true) { + Thread.sleep(1000); + if (currentSize > 0) + Log.info("Tx commit status: size=" + + currentSize / 1024 / 1024 + "MB"); + } + } catch (InterruptedException ignored) { + } + }); + _ready = true; + } + + void shutdown(@Observes @Priority(890) ShutdownEvent event) throws InterruptedException { + Log.info("Waiting for all transactions to drain"); + + synchronized (_flushWaitSynchronizer) { + _ready = false; + while (currentSize > 0) { + _flushWaitSynchronizer.wait(); + } + } + + _writebackExecutor.shutdownNow(); + Log.info("Total tx bundle wait time: " + _waitedTotal.get() + "ms"); + } + + private void verifyReady() { + if (!_ready) throw new IllegalStateException("Not doing transactions while shutting down!"); + } + + private void writeback() { + while (!Thread.interrupted()) { + try { + TxBundleImpl bundle = new TxBundleImpl(0); + synchronized (_pendingBundles) { + while (_pendingBundles.isEmpty() || !_pendingBundles.peek()._ready) + _pendingBundles.wait(); + + long diff = 0; + while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { + var toCompress = _pendingBundles.poll(); + diff -= toCompress.calculateTotalSize(); + bundle.compress(toCompress); + } + diff += bundle.calculateTotalSize(); + synchronized (_flushWaitSynchronizer) { + currentSize += diff; + } + } + + var latch = new CountDownLatch((int) bundle._entries.values().stream().filter(e -> e instanceof TxBundleImpl.CommittedEntry).count()); + ConcurrentLinkedQueue errors = new ConcurrentLinkedQueue<>(); + + for (var e : bundle._entries.values()) { + switch (e) { + case TxBundleImpl.CommittedEntry c -> _commitExecutor.execute(() -> { + try { + Log.trace("Writing new " + c.key()); + objectPersistentStore.writeObject(c.key(), objectSerializer.serialize(c.data())); + } catch (Throwable t) { + Log.error("Error writing " + c.key(), t); + errors.add(t); + } finally { + latch.countDown(); + } + }); + case TxBundleImpl.DeletedEntry d -> { + if (Log.isDebugEnabled()) + Log.debug("Deleting from persistent storage " + d.key()); // FIXME: For tests + } + default -> throw new IllegalStateException("Unexpected value: " + e); + } + } + + latch.await(); + if (!errors.isEmpty()) { + throw new RuntimeException("Errors in writeback!"); + } + + objectPersistentStore.commitTx( + new TxManifest( + bundle._entries.values().stream().filter(e -> e instanceof TxBundleImpl.CommittedEntry).map(TxBundleImpl.BundleEntry::key).toList(), + bundle._entries.values().stream().filter(e -> e instanceof TxBundleImpl.DeletedEntry).map(TxBundleImpl.BundleEntry::key).toList() + )); + + Log.trace("Bundle " + bundle.getId() + " committed"); + + List> callbacks = new ArrayList<>(); + synchronized (_notFlushedBundles) { + _lastWrittenTx.set(bundle.getId()); + while (!_notFlushedBundles.isEmpty() && _notFlushedBundles.firstEntry().getKey() <= bundle.getId()) { + callbacks.add(_notFlushedBundles.pollFirstEntry().getValue().setCommitted()); + } + } + callbacks.forEach(l -> l.forEach(VoidFn::apply)); + + synchronized (_flushWaitSynchronizer) { + currentSize -= ((TxBundleImpl) bundle).calculateTotalSize(); + // FIXME: + if (currentSize <= sizeLimit || !_ready) + _flushWaitSynchronizer.notifyAll(); + } + } catch (InterruptedException ignored) { + } catch (Exception e) { + Log.error("Uncaught exception in writeback", e); + } catch (Throwable o) { + Log.error("Uncaught THROWABLE in writeback", o); + } + } + Log.info("Writeback thread exiting"); + } + + @Override + public TxBundle createBundle() { + verifyReady(); + boolean wait = false; + while (true) { + if (wait) { + synchronized (_flushWaitSynchronizer) { + long started = System.currentTimeMillis(); + while (currentSize > sizeLimit) { + try { + _flushWaitSynchronizer.wait(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + long waited = System.currentTimeMillis() - started; + _waitedTotal.addAndGet(waited); + if (Log.isTraceEnabled()) + Log.trace("Thread " + Thread.currentThread().getName() + " waited for tx bundle for " + waited + " ms"); + wait = false; + } + } + synchronized (_pendingBundles) { + synchronized (_flushWaitSynchronizer) { + if (currentSize > sizeLimit) { + if (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { + var target = _pendingBundles.poll(); + + long diff = -target.calculateTotalSize(); + while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { + var toCompress = _pendingBundles.poll(); + diff -= toCompress.calculateTotalSize(); + target.compress(toCompress); + } + diff += target.calculateTotalSize(); + currentSize += diff; + _pendingBundles.addFirst(target); + } + } + + if (currentSize > sizeLimit) { + wait = true; + continue; + } + } + synchronized (_notFlushedBundles) { + var bundle = new TxBundleImpl(_counter.incrementAndGet()); + _pendingBundles.addLast(bundle); + _notFlushedBundles.put(bundle.getId(), bundle); + return bundle; + } + } + } + } + + @Override + public void commitBundle(TxBundle bundle) { + verifyReady(); + synchronized (_pendingBundles) { + ((TxBundleImpl) bundle).setReady(); + if (_pendingBundles.peek() == bundle) + _pendingBundles.notify(); + synchronized (_flushWaitSynchronizer) { + currentSize += ((TxBundleImpl) bundle).calculateTotalSize(); + } + } + } + + @Override + public void dropBundle(TxBundle bundle) { + verifyReady(); + synchronized (_pendingBundles) { + Log.warn("Dropped bundle: " + bundle); + _pendingBundles.remove((TxBundleImpl) bundle); + synchronized (_flushWaitSynchronizer) { + currentSize -= ((TxBundleImpl) bundle).calculateTotalSize(); + } + } + } + + @Override + public void fence(long bundleId) { + var latch = new CountDownLatch(1); + asyncFence(bundleId, latch::countDown); + try { + latch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + @Override + public void asyncFence(long bundleId, VoidFn fn) { + verifyReady(); + if (bundleId < 0) throw new IllegalArgumentException("txId should be >0!"); + if (_lastWrittenTx.get() >= bundleId) { + fn.apply(); + return; + } + synchronized (_notFlushedBundles) { + if (_lastWrittenTx.get() >= bundleId) { + fn.apply(); + return; + } + _notFlushedBundles.get(bundleId).addCallback(fn); + } + } + + private class TxBundleImpl implements TxBundle { + private final LinkedHashMap _entries = new LinkedHashMap<>(); + private final ArrayList _callbacks = new ArrayList<>(); + private long _txId; + @Getter + private volatile boolean _ready = false; + private long _size = -1; + private boolean _wasCommitted = false; + + private TxBundleImpl(long txId) { + _txId = txId; + } + + @Override + public long getId() { + return _txId; + } + + public void setReady() { + _ready = true; + } + + public void addCallback(VoidFn callback) { + synchronized (_callbacks) { + if (_wasCommitted) throw new IllegalStateException(); + _callbacks.add(callback); + } + } + + public List setCommitted() { + synchronized (_callbacks) { + _wasCommitted = true; + return Collections.unmodifiableList(_callbacks); + } + } + + @Override + public void commit(JDataVersionedWrapper obj) { + synchronized (_entries) { + _entries.put(obj.data().key(), new CommittedEntry(obj.data().key(), obj, obj.data().estimateSize())); + } + } + + @Override + public void delete(JObjectKey obj) { + synchronized (_entries) { + _entries.put(obj, new DeletedEntry(obj)); + } + } + + public long calculateTotalSize() { + if (_size >= 0) return _size; + _size = _entries.values().stream().mapToInt(BundleEntry::size).sum(); + return _size; + } + + public void compress(TxBundleImpl other) { + if (_txId >= other._txId) + throw new IllegalArgumentException("Compressing an older bundle into newer"); + + _txId = other._txId; + _size = -1; + + _entries.putAll(other._entries); + } + + private interface BundleEntry { + JObjectKey key(); + + int size(); + } + + private record CommittedEntry(JObjectKey key, JDataVersionedWrapper data, int size) + implements BundleEntry { + } + + private record DeletedEntry(JObjectKey key) + implements BundleEntry { + @Override + public int size() { + return 64; + } + } + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java index 4c37c14f..e4956ef9 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java @@ -244,17 +244,17 @@ public class FileObjectPersistentStore implements ObjectPersistentStore { public void commitTxImpl(TxManifest manifest, boolean failIfNotFound) { try { - if (manifest.getDeleted().isEmpty() && manifest.getWritten().isEmpty()) { + if (manifest.deleted().isEmpty() && manifest.written().isEmpty()) { Log.debug("Empty manifest, skipping"); return; } putTxManifest(manifest); - var latch = new CountDownLatch(manifest.getWritten().size() + manifest.getDeleted().size()); + var latch = new CountDownLatch(manifest.written().size() + manifest.deleted().size()); ConcurrentLinkedQueue errors = new ConcurrentLinkedQueue<>(); - for (var n : manifest.getWritten()) { + for (var n : manifest.written()) { _flushExecutor.execute(() -> { try { Files.move(getTmpObjPath(n), getObjPath(n), ATOMIC_MOVE, REPLACE_EXISTING); @@ -267,7 +267,7 @@ public class FileObjectPersistentStore implements ObjectPersistentStore { } }); } - for (var d : manifest.getDeleted()) { + for (var d : manifest.deleted()) { _flushExecutor.execute(() -> { try { deleteImpl(getObjPath(d)); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java index 4d07aae2..d5c686b6 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java @@ -43,10 +43,10 @@ public class MemoryObjectPersistentStore implements ObjectPersistentStore { @Override public void commitTx(TxManifest names) { synchronized (this) { - for (JObjectKey key : names.getWritten()) { + for (JObjectKey key : names.written()) { _objects.put(key, _pending.get(key)); } - for (JObjectKey key : names.getDeleted()) { + for (JObjectKey key : names.deleted()) { _objects.remove(key); } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java index 3244b4cd..5a88d772 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java @@ -3,11 +3,8 @@ package com.usatiuk.dhfs.objects.persistence; import com.usatiuk.objects.common.runtime.JObjectKey; import java.io.Serializable; -import java.util.List; +import java.util.Collection; // FIXME: Serializable -public interface TxManifest extends Serializable { - List getWritten(); - - List getDeleted(); +public record TxManifest(Collection written, Collection deleted) implements Serializable { } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java index 59b67811..6a2c4665 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java @@ -6,7 +6,6 @@ import com.usatiuk.objects.common.runtime.JObjectKey; import lombok.Builder; import java.util.Collection; -import java.util.HashSet; import java.util.LinkedHashSet; @Builder(toBuilder = true) @@ -25,4 +24,9 @@ public record ChunkData(JObjectKey key, Collection refsFrom, boolean public ChunkData withFrozen(boolean frozen) { return this.toBuilder().frozen(frozen).build(); } + + @Override + public int estimateSize() { + return data.size(); + } } \ No newline at end of file From 2aa07b205f7fd583ca744674c133e56628ebd946 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Thu, 2 Jan 2025 11:06:06 +0100 Subject: [PATCH 035/105] commit retry --- .../usatiuk/dhfs/objects/JObjectManager.java | 51 ++++++++++--------- .../dhfs/objects/TransactionManager.java | 5 ++ .../dhfs/objects/TxCommitException.java | 11 ++++ 3 files changed, 43 insertions(+), 24 deletions(-) create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxCommitException.java diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 7bffb427..0e66c621 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -185,12 +185,12 @@ public class JObjectManager { case null -> { var dep = dependenciesLocked.get(key); if (dep == null) { - throw new IllegalStateException("No dependency for " + key); + throw new TxCommitException("No dependency for " + key); } yield dep.data.map(JDataVersionedWrapper::data).orElse(null); } default -> { - throw new IllegalStateException("Unexpected value: " + current.get(key)); + throw new TxCommitException("Unexpected value: " + current.get(key)); } }; @@ -227,7 +227,7 @@ public class JObjectManager { case TxRecord.TxObjectRecordDeleted deleted -> { hook.onDelete(deleted.key(), oldObj); } - default -> throw new IllegalStateException("Unexpected value: " + entry); + default -> throw new TxCommitException("Unexpected value: " + entry); } current.put(entry.key(), entry); } @@ -250,13 +250,13 @@ public class JObjectManager { if (dep.getValue().data().get().version() >= tx.getId()) { Log.trace("Checking dependency " + dep.getKey() + " - newer than"); - throw new IllegalStateException("Serialization hazard: " + dep.getValue().data().get().version() + " vs " + tx.getId()); + throw new TxCommitException("Serialization hazard: " + dep.getValue().data().get().version() + " vs " + tx.getId()); } var read = reads.get(dep.getKey()); if (read != null && read.data().orElse(null) != dep.getValue().data().orElse(null)) { Log.trace("Checking dependency " + dep.getKey() + " - read mismatch"); - throw new IllegalStateException("Read mismatch for " + dep.getKey() + ": " + read + " vs " + dep.getValue()); + throw new TxCommitException("Read mismatch for " + dep.getKey() + ": " + read + " vs " + dep.getValue()); } Log.trace("Checking dependency " + dep.getKey() + " - ok"); @@ -265,32 +265,35 @@ public class JObjectManager { Log.tracef("Flushing transaction %d to storage", tx.getId()); var bundle = txWriteback.createBundle(); - - for (var action : current.entrySet()) { - switch (action.getValue()) { - case TxRecord.TxObjectRecordWrite write -> { - Log.trace("Flushing object " + action.getKey()); - var wrapped = new JDataVersionedWrapper<>(write.data(), tx.getId()); - bundle.commit(wrapped); - _objects.put(action.getKey(), new JDataWrapper<>(wrapped)); - } - case TxRecord.TxObjectRecordDeleted deleted -> { - Log.trace("Deleting object " + action.getKey()); - bundle.delete(action.getKey()); - _objects.remove(action.getKey()); - } - default -> { - throw new IllegalStateException("Unexpected value: " + action.getValue()); + try { + for (var action : current.entrySet()) { + switch (action.getValue()) { + case TxRecord.TxObjectRecordWrite write -> { + Log.trace("Flushing object " + action.getKey()); + var wrapped = new JDataVersionedWrapper<>(write.data(), tx.getId()); + bundle.commit(wrapped); + _objects.put(action.getKey(), new JDataWrapper<>(wrapped)); + } + case TxRecord.TxObjectRecordDeleted deleted -> { + Log.trace("Deleting object " + action.getKey()); + bundle.delete(action.getKey()); + _objects.remove(action.getKey()); + } + default -> { + throw new TxCommitException("Unexpected value: " + action.getValue()); + } } } + } catch (Throwable t) { + txWriteback.dropBundle(bundle); + throw new TxCommitException(t.getMessage(), t); } Log.tracef("Committing transaction %d to storage", tx.getId()); txWriteback.commitBundle(bundle); - } catch ( - Throwable t) { + } catch (Throwable t) { Log.error("Error when committing transaction", t); - throw t; + throw new TxCommitException(t.getMessage(), t); } finally { for (var unlock : toUnlock) { unlock.close(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java index f50260ab..4f30a3f4 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java @@ -22,6 +22,8 @@ public interface TransactionManager { var ret = supplier.get(); commit(); return ret; + } catch (TxCommitException txCommitException) { + return run(supplier); } catch (Throwable e) { rollback(); throw e; @@ -38,6 +40,9 @@ public interface TransactionManager { try { fn.apply(); commit(); + } catch (TxCommitException txCommitException) { + run(fn); + return; } catch (Throwable e) { rollback(); throw e; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxCommitException.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxCommitException.java new file mode 100644 index 00000000..73e488d6 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxCommitException.java @@ -0,0 +1,11 @@ +package com.usatiuk.dhfs.objects; + +public class TxCommitException extends RuntimeException { + public TxCommitException(String message) { + super(message); + } + + public TxCommitException(String message, Throwable cause) { + super(message, cause); + } +} From 2d060d8140a4217ced92f547088a180429a0e1e6 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Thu, 2 Jan 2025 16:41:07 +0100 Subject: [PATCH 036/105] some moving around --- dhfs-parent/objects-common/deployment/pom.xml | 51 ---------- .../deployment/ObjectsCommonProcessor.java | 14 --- .../common/test/ObjectsCommonDevModeTest.java | 23 ----- .../common/test/ObjectsCommonTest.java | 23 ----- .../objects-common/integration-tests/pom.xml | 93 ------------------- .../src/main/resources/application.properties | 1 - dhfs-parent/objects-common/pom.xml | 23 ----- dhfs-parent/objects-common/runtime/pom.xml | 59 ------------ .../resources/META-INF/quarkus-extension.yaml | 9 -- dhfs-parent/objects/pom.xml | 5 - .../dhfs/objects/CurrentTransaction.java | 2 - .../java/com/usatiuk/dhfs/objects}/JData.java | 2 +- .../dhfs/objects/JDataVersionedWrapper.java | 1 - .../com/usatiuk/dhfs/objects}/JObjectKey.java | 2 +- .../usatiuk/dhfs/objects/JObjectManager.java | 4 +- .../usatiuk/dhfs/objects/PreCommitTxHook.java | 3 - .../com/usatiuk/dhfs/objects/TxBundle.java | 3 - .../usatiuk/dhfs/objects/TxWritebackImpl.java | 1 - .../FileObjectPersistentStore.java | 2 +- .../MemoryObjectPersistentStore.java | 2 +- .../persistence/ObjectPersistentStore.java | 2 +- .../dhfs/objects/persistence/TxManifest.java | 2 +- .../transaction/ReadTrackingObjectSource.java | 4 +- .../dhfs/objects/transaction/Transaction.java | 4 +- .../transaction/TransactionFactoryImpl.java | 4 +- .../transaction/TransactionObject.java | 2 +- .../transaction/TransactionObjectSource.java | 4 +- .../transaction/TransactionPrivate.java | 2 +- .../dhfs/objects/transaction/TxRecord.java | 4 +- .../src/main/resources/application.properties | 6 ++ .../com/usatiuk/dhfs/objects/ObjectsTest.java | 1 - .../dhfs/objects/PreCommitTxHookTest.java | 2 - .../com/usatiuk/dhfs/objects/data/Kid.java | 4 +- .../com/usatiuk/dhfs/objects/data/Parent.java | 4 +- dhfs-parent/pom.xml | 1 - .../usatiuk/dhfs/files/objects/ChunkData.java | 2 +- .../com/usatiuk/dhfs/files/objects/File.java | 2 +- .../dhfs/files/service/DhfsFileService.java | 2 +- .../files/service/DhfsFileServiceImpl.java | 4 +- .../usatiuk/dhfs/objects/DeleterTxHook.java | 2 - .../usatiuk/dhfs/objects/JDataRefcounted.java | 3 - .../dhfs/objects/RefcounterTxHook.java | 2 - .../jkleppmanntree/JKleppmannTreeManager.java | 2 +- .../JKleppmannTreeOpWrapper.java | 2 +- .../structs/JKleppmannTreeNode.java | 2 +- .../structs/JKleppmannTreeNodeMetaFile.java | 2 +- .../structs/JKleppmannTreePersistentData.java | 3 +- .../src/main/resources/application.properties | 9 -- .../benchmarks/DhfsFileBenchmarkTest.java | 2 +- 49 files changed, 40 insertions(+), 368 deletions(-) delete mode 100644 dhfs-parent/objects-common/deployment/pom.xml delete mode 100644 dhfs-parent/objects-common/deployment/src/main/java/com/usatiuk/objects/common/deployment/ObjectsCommonProcessor.java delete mode 100644 dhfs-parent/objects-common/deployment/src/test/java/com/usatiuk/objects/common/test/ObjectsCommonDevModeTest.java delete mode 100644 dhfs-parent/objects-common/deployment/src/test/java/com/usatiuk/objects/common/test/ObjectsCommonTest.java delete mode 100644 dhfs-parent/objects-common/integration-tests/pom.xml delete mode 100644 dhfs-parent/objects-common/integration-tests/src/main/resources/application.properties delete mode 100644 dhfs-parent/objects-common/pom.xml delete mode 100644 dhfs-parent/objects-common/runtime/pom.xml delete mode 100644 dhfs-parent/objects-common/runtime/src/main/resources/META-INF/quarkus-extension.yaml rename dhfs-parent/{objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime => objects/src/main/java/com/usatiuk/dhfs/objects}/JData.java (92%) rename dhfs-parent/{objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime => objects/src/main/java/com/usatiuk/dhfs/objects}/JObjectKey.java (80%) diff --git a/dhfs-parent/objects-common/deployment/pom.xml b/dhfs-parent/objects-common/deployment/pom.xml deleted file mode 100644 index 36504616..00000000 --- a/dhfs-parent/objects-common/deployment/pom.xml +++ /dev/null @@ -1,51 +0,0 @@ - - - 4.0.0 - - - com.usatiuk - objects-common-parent - 1.0-SNAPSHOT - - objects-common-deployment - DHFS objects common stuff - Deployment - - - - io.quarkus - quarkus-arc-deployment - - - com.usatiuk - objects-common - ${project.version} - - - io.quarkus - quarkus-junit5-internal - test - - - - - - - maven-compiler-plugin - - - default-compile - - - - io.quarkus - quarkus-extension-processor - ${quarkus.platform.version} - - - - - - - - - diff --git a/dhfs-parent/objects-common/deployment/src/main/java/com/usatiuk/objects/common/deployment/ObjectsCommonProcessor.java b/dhfs-parent/objects-common/deployment/src/main/java/com/usatiuk/objects/common/deployment/ObjectsCommonProcessor.java deleted file mode 100644 index e4316e72..00000000 --- a/dhfs-parent/objects-common/deployment/src/main/java/com/usatiuk/objects/common/deployment/ObjectsCommonProcessor.java +++ /dev/null @@ -1,14 +0,0 @@ -package com.usatiuk.objects.common.deployment; - -import io.quarkus.deployment.annotations.BuildStep; -import io.quarkus.deployment.builditem.FeatureBuildItem; - -class ObjectsCommonProcessor { - - private static final String FEATURE = "objects-common"; - - @BuildStep - FeatureBuildItem feature() { - return new FeatureBuildItem(FEATURE); - } -} diff --git a/dhfs-parent/objects-common/deployment/src/test/java/com/usatiuk/objects/common/test/ObjectsCommonDevModeTest.java b/dhfs-parent/objects-common/deployment/src/test/java/com/usatiuk/objects/common/test/ObjectsCommonDevModeTest.java deleted file mode 100644 index 78fbe4f7..00000000 --- a/dhfs-parent/objects-common/deployment/src/test/java/com/usatiuk/objects/common/test/ObjectsCommonDevModeTest.java +++ /dev/null @@ -1,23 +0,0 @@ -package com.usatiuk.objects.common.test; - -import org.jboss.shrinkwrap.api.ShrinkWrap; -import org.jboss.shrinkwrap.api.spec.JavaArchive; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.RegisterExtension; - -import io.quarkus.test.QuarkusDevModeTest; - -public class ObjectsCommonDevModeTest { - - // Start hot reload (DevMode) test with your extension loaded - @RegisterExtension - static final QuarkusDevModeTest devModeTest = new QuarkusDevModeTest() - .setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class)); - - @Test - public void writeYourOwnDevModeTest() { - // Write your dev mode tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-hot-reload for more information - Assertions.assertTrue(true, "Add dev mode assertions to " + getClass().getName()); - } -} diff --git a/dhfs-parent/objects-common/deployment/src/test/java/com/usatiuk/objects/common/test/ObjectsCommonTest.java b/dhfs-parent/objects-common/deployment/src/test/java/com/usatiuk/objects/common/test/ObjectsCommonTest.java deleted file mode 100644 index c74a2c67..00000000 --- a/dhfs-parent/objects-common/deployment/src/test/java/com/usatiuk/objects/common/test/ObjectsCommonTest.java +++ /dev/null @@ -1,23 +0,0 @@ -package com.usatiuk.objects.common.test; - -import org.jboss.shrinkwrap.api.ShrinkWrap; -import org.jboss.shrinkwrap.api.spec.JavaArchive; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.RegisterExtension; - -import io.quarkus.test.QuarkusUnitTest; - -public class ObjectsCommonTest { - - // Start unit test with your extension loaded - @RegisterExtension - static final QuarkusUnitTest unitTest = new QuarkusUnitTest() - .setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class)); - - @Test - public void writeYourOwnUnitTest() { - // Write your unit tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-extensions for more information - Assertions.assertTrue(true, "Add some assertions to " + getClass().getName()); - } -} diff --git a/dhfs-parent/objects-common/integration-tests/pom.xml b/dhfs-parent/objects-common/integration-tests/pom.xml deleted file mode 100644 index 80313213..00000000 --- a/dhfs-parent/objects-common/integration-tests/pom.xml +++ /dev/null @@ -1,93 +0,0 @@ - - - 4.0.0 - - - com.usatiuk - objects-common-parent - 1.0-SNAPSHOT - - objects-common-integration-tests - DHFS objects common stuff - Integration Tests - - - true - - - - - com.usatiuk - objects-common - ${project.version} - - - com.usatiuk - objects-common-deployment - ${project.version} - - - io.quarkus - quarkus-junit5 - test - - - - - - - io.quarkus - quarkus-maven-plugin - - - - build - - - - - - maven-failsafe-plugin - - - - integration-test - verify - - - - - - ${project.build.directory}/${project.build.finalName}-runner - org.jboss.logmanager.LogManager - ${maven.home} - - - - - - - - - native-image - - - native - - - - - - maven-surefire-plugin - - ${native.surefire.skip} - - - - - - false - true - - - - diff --git a/dhfs-parent/objects-common/integration-tests/src/main/resources/application.properties b/dhfs-parent/objects-common/integration-tests/src/main/resources/application.properties deleted file mode 100644 index b1645fe9..00000000 --- a/dhfs-parent/objects-common/integration-tests/src/main/resources/application.properties +++ /dev/null @@ -1 +0,0 @@ -quarkus.package.jar.decompiler.enabled=true \ No newline at end of file diff --git a/dhfs-parent/objects-common/pom.xml b/dhfs-parent/objects-common/pom.xml deleted file mode 100644 index 2d462191..00000000 --- a/dhfs-parent/objects-common/pom.xml +++ /dev/null @@ -1,23 +0,0 @@ - - - 4.0.0 - - - com.usatiuk.dhfs - parent - 1.0-SNAPSHOT - - - com.usatiuk - objects-common-parent - 1.0-SNAPSHOT - pom - DHFS objects common stuff - Parent - - - deployment - runtime - integration-tests - - - diff --git a/dhfs-parent/objects-common/runtime/pom.xml b/dhfs-parent/objects-common/runtime/pom.xml deleted file mode 100644 index d02d31aa..00000000 --- a/dhfs-parent/objects-common/runtime/pom.xml +++ /dev/null @@ -1,59 +0,0 @@ - - - 4.0.0 - - - com.usatiuk - objects-common-parent - 1.0-SNAPSHOT - - objects-common - DHFS objects common stuff - Runtime - - - - io.quarkus - quarkus-arc - - - - - - - io.quarkus - quarkus-extension-maven-plugin - ${quarkus.platform.version} - - - compile - - extension-descriptor - - - ${project.groupId}:${project.artifactId}-deployment:${project.version} - - - - - - - maven-compiler-plugin - - - default-compile - - - - io.quarkus - quarkus-extension-processor - ${quarkus.platform.version} - - - - - - - - - diff --git a/dhfs-parent/objects-common/runtime/src/main/resources/META-INF/quarkus-extension.yaml b/dhfs-parent/objects-common/runtime/src/main/resources/META-INF/quarkus-extension.yaml deleted file mode 100644 index b05649d2..00000000 --- a/dhfs-parent/objects-common/runtime/src/main/resources/META-INF/quarkus-extension.yaml +++ /dev/null @@ -1,9 +0,0 @@ -name: DHFS objects common stuff -#description: Do something useful. -metadata: -# keywords: -# - objects-common -# guide: ... # To create and publish this guide, see https://github.com/quarkiverse/quarkiverse/wiki#documenting-your-extension -# categories: -# - "miscellaneous" -# status: "preview" diff --git a/dhfs-parent/objects/pom.xml b/dhfs-parent/objects/pom.xml index 07629904..b1b35c5d 100644 --- a/dhfs-parent/objects/pom.xml +++ b/dhfs-parent/objects/pom.xml @@ -64,11 +64,6 @@ supportlib 1.0-SNAPSHOT - - com.usatiuk - objects-common - 1.0-SNAPSHOT - io.quarkus quarkus-junit5-mockito diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java index 552b45e8..52b97a5a 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java @@ -2,8 +2,6 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.transaction.LockingStrategy; import com.usatiuk.dhfs.objects.transaction.Transaction; -import com.usatiuk.objects.common.runtime.JData; -import com.usatiuk.objects.common.runtime.JObjectKey; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; diff --git a/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JData.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JData.java similarity index 92% rename from dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JData.java rename to dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JData.java index b15418fe..501e3c35 100644 --- a/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JData.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JData.java @@ -1,4 +1,4 @@ -package com.usatiuk.objects.common.runtime; +package com.usatiuk.dhfs.objects; import java.io.Serializable; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java index 5194b873..c20c3e46 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java @@ -1,6 +1,5 @@ package com.usatiuk.dhfs.objects; -import com.usatiuk.objects.common.runtime.JData; import jakarta.annotation.Nonnull; import lombok.Builder; diff --git a/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JObjectKey.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java similarity index 80% rename from dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JObjectKey.java rename to dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java index 1d66c9d6..9cf22d1c 100644 --- a/dhfs-parent/objects-common/runtime/src/main/java/com/usatiuk/objects/common/runtime/JObjectKey.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java @@ -1,4 +1,4 @@ -package com.usatiuk.objects.common.runtime; +package com.usatiuk.dhfs.objects; import java.io.Serializable; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 0e66c621..aaf9e57e 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -4,8 +4,6 @@ import com.usatiuk.dhfs.objects.persistence.ObjectPersistentStore; import com.usatiuk.dhfs.objects.transaction.*; import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; import com.usatiuk.dhfs.utils.DataLocker; -import com.usatiuk.objects.common.runtime.JData; -import com.usatiuk.objects.common.runtime.JObjectKey; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.enterprise.inject.Instance; @@ -19,7 +17,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; import java.util.function.Function; -// Manages all access to com.usatiuk.objects.common.runtime.JData objects. +// Manages all access to com.usatiuk.dhfs.objects.JData objects. // In particular, it serves as a source of truth for what is committed to the backing storage. // All data goes through it, it is responsible for transaction atomicity // TODO: persistent tx id diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PreCommitTxHook.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PreCommitTxHook.java index afc190dc..3b1b50e4 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PreCommitTxHook.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PreCommitTxHook.java @@ -1,8 +1,5 @@ package com.usatiuk.dhfs.objects; -import com.usatiuk.objects.common.runtime.JData; -import com.usatiuk.objects.common.runtime.JObjectKey; - public interface PreCommitTxHook { default void onChange(JObjectKey key, JData old, JData cur) { } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java index 8c35a36b..a31dc61d 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java @@ -1,8 +1,5 @@ package com.usatiuk.dhfs.objects; -import com.usatiuk.objects.common.runtime.JData; -import com.usatiuk.objects.common.runtime.JObjectKey; - public interface TxBundle { long getId(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java index ab85f53e..d9d8d977 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java @@ -3,7 +3,6 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.persistence.ObjectPersistentStore; import com.usatiuk.dhfs.objects.persistence.TxManifest; import com.usatiuk.dhfs.utils.VoidFn; -import com.usatiuk.objects.common.runtime.JObjectKey; import io.quarkus.logging.Log; import io.quarkus.runtime.ShutdownEvent; import io.quarkus.runtime.StartupEvent; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java index e4956ef9..1549c4e2 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java @@ -2,7 +2,7 @@ package com.usatiuk.dhfs.objects.persistence; import com.google.protobuf.ByteString; import com.google.protobuf.UnsafeByteOperations; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer; import com.usatiuk.dhfs.utils.ByteUtils; import com.usatiuk.dhfs.utils.SerializationHelper; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java index d5c686b6..dd73ce6a 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.persistence; import com.google.protobuf.ByteString; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JObjectKey; import io.quarkus.arc.properties.IfBuildProperty; import jakarta.enterprise.context.ApplicationScoped; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java index afcab147..f1db0be4 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.persistence; import com.google.protobuf.ByteString; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JObjectKey; import javax.annotation.Nonnull; import java.util.Collection; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java index 5a88d772..bd855980 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.objects.persistence; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JObjectKey; import java.io.Serializable; import java.util.Collection; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSource.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSource.java index 540360dd..8035eac6 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSource.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSource.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.transaction; -import com.usatiuk.objects.common.runtime.JData; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; import java.util.Collections; import java.util.HashMap; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java index dc6325ec..198c8f30 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.transaction; -import com.usatiuk.objects.common.runtime.JData; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; import java.util.Optional; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index 9109d93d..ed9bce57 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -1,8 +1,8 @@ package com.usatiuk.dhfs.objects.transaction; import com.usatiuk.dhfs.objects.JDataVersionedWrapper; -import com.usatiuk.objects.common.runtime.JData; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; import jakarta.enterprise.context.ApplicationScoped; import lombok.AccessLevel; import lombok.Getter; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java index 1c9fe912..fdf01178 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.transaction; import com.usatiuk.dhfs.objects.JDataVersionedWrapper; -import com.usatiuk.objects.common.runtime.JData; +import com.usatiuk.dhfs.objects.JData; import java.util.Optional; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java index acfae1ca..7fa8b516 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.transaction; -import com.usatiuk.objects.common.runtime.JData; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; public interface TransactionObjectSource { TransactionObject get(Class type, JObjectKey key); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java index c2e147ed..4229e939 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.objects.transaction; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JObjectKey; import java.util.Collection; import java.util.Map; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java index 60a698cf..f5510e6f 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TxRecord.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.transaction; -import com.usatiuk.objects.common.runtime.JData; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; public class TxRecord { public interface TxObjectRecord { diff --git a/dhfs-parent/objects/src/main/resources/application.properties b/dhfs-parent/objects/src/main/resources/application.properties index a9c2019e..6b41a553 100644 --- a/dhfs-parent/objects/src/main/resources/application.properties +++ b/dhfs-parent/objects/src/main/resources/application.properties @@ -1,2 +1,8 @@ dhfs.objects.persistence=files +dhfs.objects.writeback.limit=134217728 +dhfs.objects.lru.limit=134217728 +dhfs.objects.lru.print-stats=false +dhfs.objects.lock_timeout_secs=15 +dhfs.objects.persistence.files.root=${HOME}/dhfs_default/data/objs +dhfs.objects.root=${HOME}/dhfs_default/data/stuff quarkus.package.jar.decompiler.enabled=true diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index 2298d491..392cec00 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -3,7 +3,6 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.data.Parent; import com.usatiuk.dhfs.objects.transaction.LockingStrategy; import com.usatiuk.dhfs.objects.transaction.Transaction; -import com.usatiuk.objects.common.runtime.JObjectKey; import io.quarkus.logging.Log; import io.quarkus.test.junit.QuarkusTest; import jakarta.inject.Inject; diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java index 73de1ca4..4fe54c05 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java @@ -2,8 +2,6 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.data.Parent; import com.usatiuk.dhfs.objects.transaction.Transaction; -import com.usatiuk.objects.common.runtime.JData; -import com.usatiuk.objects.common.runtime.JObjectKey; import io.quarkus.test.junit.QuarkusTest; import io.quarkus.test.junit.mockito.InjectSpy; import jakarta.enterprise.context.ApplicationScoped; diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java index 99ab5fc3..0e597f4d 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.data; -import com.usatiuk.objects.common.runtime.JData; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; import lombok.Builder; @Builder(toBuilder = true) diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java index 8955e3d7..3527b6f4 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.data; -import com.usatiuk.objects.common.runtime.JData; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; import lombok.Builder; @Builder(toBuilder = true) diff --git a/dhfs-parent/pom.xml b/dhfs-parent/pom.xml index 447727d4..3140d94e 100644 --- a/dhfs-parent/pom.xml +++ b/dhfs-parent/pom.xml @@ -17,7 +17,6 @@ autoprotomap objects utils - objects-common diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java index 6a2c4665..e4810e19 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java @@ -2,7 +2,7 @@ package com.usatiuk.dhfs.files.objects; import com.google.protobuf.ByteString; import com.usatiuk.dhfs.objects.JDataRefcounted; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JObjectKey; import lombok.Builder; import java.util.Collection; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java index 15f9f292..f24227f8 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.files.objects; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JObjectKey; import lombok.Builder; import java.util.Collection; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java index 04797d08..e5cb03e3 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java @@ -3,7 +3,7 @@ package com.usatiuk.dhfs.files.service; import com.google.protobuf.ByteString; import com.google.protobuf.UnsafeByteOperations; import com.usatiuk.dhfs.files.objects.File; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JObjectKey; import org.apache.commons.lang3.tuple.Pair; import java.util.Optional; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java index a169453e..d7ddefa7 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java @@ -13,8 +13,8 @@ import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFil import com.usatiuk.dhfs.objects.transaction.LockingStrategy; import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; -import com.usatiuk.objects.common.runtime.JData; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.quarkus.logging.Log; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java index 16f50422..d91911be 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java @@ -1,8 +1,6 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.transaction.Transaction; -import com.usatiuk.objects.common.runtime.JData; -import com.usatiuk.objects.common.runtime.JObjectKey; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java index 5e120d93..a23f8a60 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java @@ -1,8 +1,5 @@ package com.usatiuk.dhfs.objects; -import com.usatiuk.objects.common.runtime.JData; -import com.usatiuk.objects.common.runtime.JObjectKey; - import java.util.Collection; import java.util.List; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java index 2791e2d8..ba2e7b9f 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java @@ -1,8 +1,6 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.transaction.Transaction; -import com.usatiuk.objects.common.runtime.JData; -import com.usatiuk.objects.common.runtime.JObjectKey; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; import org.apache.commons.collections4.CollectionUtils; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java index 35a74978..bbbbdb6a 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java @@ -8,7 +8,7 @@ import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreePersistentD import com.usatiuk.dhfs.objects.transaction.LockingStrategy; import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.kleppmanntree.*; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JObjectKey; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; import org.apache.commons.lang3.tuple.Pair; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java index 52749fb1..a0a6d7a4 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java @@ -2,7 +2,7 @@ package com.usatiuk.dhfs.objects.jkleppmanntree; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; import com.usatiuk.kleppmanntree.OpMove; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JObjectKey; import lombok.Getter; import java.util.UUID; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java index e4509a0e..096a32e5 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java @@ -3,7 +3,7 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; import com.usatiuk.dhfs.objects.JDataRefcounted; import com.usatiuk.kleppmanntree.OpMove; import com.usatiuk.kleppmanntree.TreeNode; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JObjectKey; import lombok.Builder; import java.io.Serializable; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java index 4e0e77ee..ae01c3ef 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java @@ -2,7 +2,7 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; import com.usatiuk.autoprotomap.runtime.ProtoMirror; import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaFileP; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JObjectKey; import lombok.Getter; import java.util.Objects; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java index 6f773a7d..30367594 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java @@ -1,11 +1,10 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; import com.usatiuk.dhfs.objects.JDataRefcounted; -import com.usatiuk.kleppmanntree.AtomicClock; import com.usatiuk.kleppmanntree.CombinedTimestamp; import com.usatiuk.kleppmanntree.LogRecord; import com.usatiuk.kleppmanntree.OpMove; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JObjectKey; import lombok.Builder; import java.util.*; diff --git a/dhfs-parent/server/src/main/resources/application.properties b/dhfs-parent/server/src/main/resources/application.properties index 1fb7caa0..aacd8c29 100644 --- a/dhfs-parent/server/src/main/resources/application.properties +++ b/dhfs-parent/server/src/main/resources/application.properties @@ -1,6 +1,4 @@ quarkus.grpc.server.use-separate-server=false -dhfs.objects.persistence.files.root=${HOME}/dhfs_default/data/objs -dhfs.objects.root=${HOME}/dhfs_default/data/stuff dhfs.objects.peerdiscovery.port=42069 dhfs.objects.peerdiscovery.interval=5000 dhfs.objects.sync.timeout=30 @@ -22,13 +20,6 @@ dhfs.files.write_merge_limit=1.2 # Don't take blocks of this size and above when merging dhfs.files.write_merge_max_chunk_to_take=1 dhfs.files.write_last_chunk_limit=1.5 -dhfs.objects.writeback.delay=100 -dhfs.objects.writeback.limit=134217728 -dhfs.objects.lru.limit=134217728 -dhfs.objects.lru.print-stats=false -dhfs.objects.writeback.watermark-high=0.6 -dhfs.objects.writeback.watermark-low=0.4 -dhfs.objects.writeback.threads=4 dhfs.objects.deletion.delay=1000 dhfs.objects.deletion.can-delete-retry-delay=10000 dhfs.objects.ref_verification=true diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java index d7cade2c..504c1cc6 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java @@ -3,7 +3,7 @@ package com.usatiuk.dhfs.benchmarks; import com.google.protobuf.UnsafeByteOperations; import com.usatiuk.dhfs.TempDataProfile; import com.usatiuk.dhfs.files.service.DhfsFileService; -import com.usatiuk.objects.common.runtime.JObjectKey; +import com.usatiuk.dhfs.objects.JObjectKey; import io.quarkus.test.junit.QuarkusTest; import io.quarkus.test.junit.TestProfile; import jakarta.inject.Inject; From 6540b51b5dc764c529a440781cc218ba592e6fa5 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Thu, 2 Jan 2025 17:48:38 +0100 Subject: [PATCH 037/105] some lombok cleanup --- dhfs-parent/autoprotomap/deployment/pom.xml | 5 - dhfs-parent/kleppmanntree/pom.xml | 5 - dhfs-parent/kleppmanntree/src/lombok.config | 1 - .../usatiuk/kleppmanntree/TestNodeMeta.java | 12 +- .../kleppmanntree/TestNodeMetaFile.java | 7 +- .../usatiuk/kleppmanntree/TestTreeNode.java | 12 +- dhfs-parent/objects/pom.xml | 5 - dhfs-parent/objects/src/lombok.config | 1 - .../dhfs/objects/JDataVersionedWrapper.java | 5 +- .../dhfs/objects/TransactionManager.java | 20 +- .../usatiuk/dhfs/objects/TxWritebackImpl.java | 2 - .../transaction/TransactionFactoryImpl.java | 10 +- .../com/usatiuk/dhfs/objects/ObjectsTest.java | 196 ++++++++---------- .../dhfs/objects/PreCommitTxHookTest.java | 63 ++---- .../com/usatiuk/dhfs/objects/data/Kid.java | 5 +- .../com/usatiuk/dhfs/objects/data/Parent.java | 5 +- dhfs-parent/utils/pom.xml | 5 - .../utils/HashSetDelayedBlockingQueue.java | 1 - .../com/usatiuk/dhfs/utils/VoidFnThrows.java | 7 + 19 files changed, 165 insertions(+), 202 deletions(-) delete mode 100644 dhfs-parent/kleppmanntree/src/lombok.config delete mode 100644 dhfs-parent/objects/src/lombok.config create mode 100644 dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/VoidFnThrows.java diff --git a/dhfs-parent/autoprotomap/deployment/pom.xml b/dhfs-parent/autoprotomap/deployment/pom.xml index 29c02d7a..13f90a9d 100644 --- a/dhfs-parent/autoprotomap/deployment/pom.xml +++ b/dhfs-parent/autoprotomap/deployment/pom.xml @@ -34,11 +34,6 @@ org.apache.commons commons-collections4 - - org.projectlombok - lombok - provided - diff --git a/dhfs-parent/kleppmanntree/pom.xml b/dhfs-parent/kleppmanntree/pom.xml index be348e62..c6b78ae1 100644 --- a/dhfs-parent/kleppmanntree/pom.xml +++ b/dhfs-parent/kleppmanntree/pom.xml @@ -13,11 +13,6 @@ kleppmanntree - - org.projectlombok - lombok - provided - org.junit.jupiter junit-jupiter-engine diff --git a/dhfs-parent/kleppmanntree/src/lombok.config b/dhfs-parent/kleppmanntree/src/lombok.config deleted file mode 100644 index f1c474ce..00000000 --- a/dhfs-parent/kleppmanntree/src/lombok.config +++ /dev/null @@ -1 +0,0 @@ -lombok.accessors.prefix += _ diff --git a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMeta.java b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMeta.java index 2c2e9f79..c02dd785 100644 --- a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMeta.java +++ b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMeta.java @@ -1,12 +1,16 @@ package com.usatiuk.kleppmanntree; -import lombok.Getter; - public abstract class TestNodeMeta implements NodeMeta { - @Getter private final String _name; - public TestNodeMeta(String name) {_name = name;} + @Override + public String getName() { + return _name; + } + + public TestNodeMeta(String name) { + _name = name; + } abstract public NodeMeta withName(String name); } diff --git a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMetaFile.java b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMetaFile.java index 8a5bc91d..bb1bbec6 100644 --- a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMetaFile.java +++ b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMetaFile.java @@ -1,11 +1,12 @@ package com.usatiuk.kleppmanntree; -import lombok.Getter; - public class TestNodeMetaFile extends TestNodeMeta { - @Getter private final long _inode; + public long getInode() { + return _inode; + } + public TestNodeMetaFile(String name, long inode) { super(name); _inode = inode; diff --git a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestTreeNode.java b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestTreeNode.java index 7db8967b..a8da2c14 100644 --- a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestTreeNode.java +++ b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestTreeNode.java @@ -1,12 +1,8 @@ package com.usatiuk.kleppmanntree; -import lombok.Builder; - -import java.util.Collection; import java.util.Collections; import java.util.Map; -@Builder(toBuilder = true) public record TestTreeNode(Long key, Long parent, OpMove lastEffectiveOp, TestNodeMeta meta, Map children) implements TreeNode { @@ -17,21 +13,21 @@ public record TestTreeNode(Long key, Long parent, OpMove withParent(Long parent) { - return this.toBuilder().parent(parent).build(); + return new TestTreeNode(key, parent, lastEffectiveOp, meta, children); } @Override public TreeNode withLastEffectiveOp(OpMove lastEffectiveOp) { - return this.toBuilder().lastEffectiveOp(lastEffectiveOp).build(); + return new TestTreeNode(key, parent, lastEffectiveOp, meta, children); } @Override public TreeNode withMeta(TestNodeMeta meta) { - return this.toBuilder().meta(meta).build(); + return new TestTreeNode(key, parent, lastEffectiveOp, meta, children); } @Override public TreeNode withChildren(Map children) { - return this.toBuilder().children(children).build(); + return new TestTreeNode(key, parent, lastEffectiveOp, meta, children); } } diff --git a/dhfs-parent/objects/pom.xml b/dhfs-parent/objects/pom.xml index b1b35c5d..20b09cae 100644 --- a/dhfs-parent/objects/pom.xml +++ b/dhfs-parent/objects/pom.xml @@ -35,11 +35,6 @@ net.openhft zero-allocation-hashing - - org.projectlombok - lombok - provided - org.junit.jupiter junit-jupiter-engine diff --git a/dhfs-parent/objects/src/lombok.config b/dhfs-parent/objects/src/lombok.config deleted file mode 100644 index f1c474ce..00000000 --- a/dhfs-parent/objects/src/lombok.config +++ /dev/null @@ -1 +0,0 @@ -lombok.accessors.prefix += _ diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java index c20c3e46..b71ac8b9 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java @@ -1,10 +1,11 @@ package com.usatiuk.dhfs.objects; import jakarta.annotation.Nonnull; -import lombok.Builder; import java.io.Serializable; -@Builder public record JDataVersionedWrapper(@Nonnull T data, long version) implements Serializable { + public JDataVersionedWrapper withVersion(long version) { + return new JDataVersionedWrapper<>(data, version); + } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java index 4f30a3f4..b64f46d6 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java @@ -12,7 +12,7 @@ public interface TransactionManager { void rollback(); - default T run(Supplier supplier) { + default T runTries(Supplier supplier, int tries) { if (current() != null) { return supplier.get(); } @@ -23,14 +23,16 @@ public interface TransactionManager { commit(); return ret; } catch (TxCommitException txCommitException) { - return run(supplier); + if (tries == 0) + throw txCommitException; + return runTries(supplier, tries - 1); } catch (Throwable e) { rollback(); throw e; } } - default void run(VoidFn fn) { + default void runTries(VoidFn fn, int tries) { if (current() != null) { fn.apply(); return; @@ -41,7 +43,9 @@ public interface TransactionManager { fn.apply(); commit(); } catch (TxCommitException txCommitException) { - run(fn); + if (tries == 0) + throw txCommitException; + runTries(fn, tries - 1); return; } catch (Throwable e) { rollback(); @@ -49,6 +53,14 @@ public interface TransactionManager { } } + default void run(VoidFn fn) { + runTries(fn, 10); + } + + default T run(Supplier supplier) { + return runTries(supplier, 10); + } + default void executeTx(VoidFn fn) { run(fn); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java index d9d8d977..04900473 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java @@ -10,7 +10,6 @@ import jakarta.annotation.Priority; import jakarta.enterprise.context.ApplicationScoped; import jakarta.enterprise.event.Observes; import jakarta.inject.Inject; -import lombok.Getter; import org.apache.commons.lang3.concurrent.BasicThreadFactory; import org.eclipse.microprofile.config.inject.ConfigProperty; @@ -286,7 +285,6 @@ public class TxWritebackImpl implements TxWriteback { private final LinkedHashMap _entries = new LinkedHashMap<>(); private final ArrayList _callbacks = new ArrayList<>(); private long _txId; - @Getter private volatile boolean _ready = false; private long _size = -1; private boolean _wasCommitted = false; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index ed9bce57..bc1217bd 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -1,11 +1,9 @@ package com.usatiuk.dhfs.objects.transaction; -import com.usatiuk.dhfs.objects.JDataVersionedWrapper; import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JDataVersionedWrapper; import com.usatiuk.dhfs.objects.JObjectKey; import jakarta.enterprise.context.ApplicationScoped; -import lombok.AccessLevel; -import lombok.Getter; import java.util.Collection; import java.util.HashMap; @@ -15,8 +13,12 @@ import java.util.Optional; @ApplicationScoped public class TransactionFactoryImpl implements TransactionFactory { private class TransactionImpl implements TransactionPrivate { - @Getter(AccessLevel.PUBLIC) private final long _id; + + public long getId() { + return _id; + } + private final ReadTrackingObjectSource _source; private final Map> _writes = new HashMap<>(); diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index 392cec00..c0836fbf 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -25,123 +25,91 @@ public class ObjectsTest { @Test void createObject() { - { - txm.begin(); - var newParent = new Parent(JObjectKey.of("Parent"), "John"); + txm.run(() -> { + var newParent = new Parent(JObjectKey.of("ParentCreate"), "John"); curTx.put(newParent); - txm.commit(); - } + }); - { - txm.begin(); - var parent = curTx.get(Parent.class, JObjectKey.of("Parent")).orElse(null); + txm.run(() -> { + var parent = curTx.get(Parent.class, new JObjectKey("ParentCreate")).orElse(null); Assertions.assertEquals("John", parent.name()); - txm.commit(); - } + }); } @Test void createGetObject() { - { - txm.begin(); + txm.run(() -> { var newParent = new Parent(JObjectKey.of("ParentCreateGet"), "John"); curTx.put(newParent); var parent = curTx.get(Parent.class, JObjectKey.of("ParentCreateGet")).orElse(null); Assertions.assertEquals("John", parent.name()); - txm.commit(); - } + }); - { - txm.begin(); - var parent = curTx.get(Parent.class, JObjectKey.of("ParentCreateGet")).orElse(null); + txm.run(() -> { + var parent = curTx.get(Parent.class, new JObjectKey("ParentCreateGet")).orElse(null); Assertions.assertEquals("John", parent.name()); - txm.commit(); - } + }); } @Test void createDeleteObject() { - { - txm.begin(); - var newParent = new Parent(JObjectKey.of("Parent2"), "John"); + txm.run(() -> { + var newParent = new Parent(JObjectKey.of("ParentCreateDeleteObject"), "John"); curTx.put(newParent); - txm.commit(); - } + }); - { - txm.begin(); - var parent = curTx.get(Parent.class, JObjectKey.of("Parent2")).orElse(null); + txm.run(() -> { + var parent = curTx.get(Parent.class, JObjectKey.of("ParentCreateDeleteObject")).orElse(null); Assertions.assertEquals("John", parent.name()); - txm.commit(); - } + }); - { - txm.begin(); - curTx.delete(new JObjectKey("Parent2")); - txm.commit(); - } + txm.run(() -> { + curTx.delete(new JObjectKey("ParentCreateDeleteObject")); + }); - { - txm.begin(); - var parent = curTx.get(Parent.class, new JObjectKey("Parent2")).orElse(null); + txm.run(() -> { + var parent = curTx.get(Parent.class, new JObjectKey("ParentCreateDeleteObject")).orElse(null); Assertions.assertNull(parent); - txm.commit(); - } + }); } @Test void createCreateObject() { - { - txm.begin(); + txm.run(() -> { var newParent = new Parent(JObjectKey.of("Parent7"), "John"); curTx.put(newParent); - txm.commit(); - } - { - txm.begin(); + }); + txm.run(() -> { var newParent = new Parent(JObjectKey.of("Parent7"), "John2"); curTx.put(newParent); - txm.commit(); - } - { - txm.begin(); + }); + txm.run(() -> { var parent = curTx.get(Parent.class, new JObjectKey("Parent7")).orElse(null); Assertions.assertEquals("John2", parent.name()); - txm.commit(); - } + }); } @Test void editObject() { - { - txm.begin(); + txm.run(() -> { var newParent = new Parent(JObjectKey.of("Parent3"), "John"); curTx.put(newParent); - txm.commit(); - } + }); - { - txm.begin(); + txm.run(() -> { var parent = curTx.get(Parent.class, new JObjectKey("Parent3"), LockingStrategy.OPTIMISTIC).orElse(null); Assertions.assertEquals("John", parent.name()); - curTx.put(parent.toBuilder().name("John2").build()); - txm.commit(); - } - - { - txm.begin(); + curTx.put(parent.withName("John2")); + }); + txm.run(() -> { var parent = curTx.get(Parent.class, new JObjectKey("Parent3"), LockingStrategy.WRITE).orElse(null); Assertions.assertEquals("John2", parent.name()); - curTx.put(parent.toBuilder().name("John3").build()); - txm.commit(); - } - - { - txm.begin(); + curTx.put(parent.withName("John3")); + }); + txm.run(() -> { var parent = curTx.get(Parent.class, new JObjectKey("Parent3")).orElse(null); Assertions.assertEquals("John3", parent.name()); - txm.commit(); - } + }); } @Test @@ -155,13 +123,17 @@ public class ObjectsTest { Just.run(() -> { try { Log.warn("Thread 1"); - txm.begin(); - barrier.await(); - var got = curTx.get(Parent.class, new JObjectKey("Parent2")).orElse(null); - var newParent = new Parent(JObjectKey.of("Parent2"), "John"); - curTx.put(newParent); - Log.warn("Thread 1 commit"); - txm.commit(); + txm.runTries(() -> { + try { + barrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + var got = curTx.get(Parent.class, new JObjectKey("Parent2")).orElse(null); + var newParent = new Parent(JObjectKey.of("Parent2"), "John"); + curTx.put(newParent); + Log.warn("Thread 1 commit"); + }, 0); thread1Failed.set(false); return null; } finally { @@ -171,13 +143,17 @@ public class ObjectsTest { Just.run(() -> { try { Log.warn("Thread 2"); - txm.begin(); - barrier.await(); - var got = curTx.get(Parent.class, new JObjectKey("Parent2")).orElse(null); - var newParent = new Parent(JObjectKey.of("Parent2"), "John2"); - curTx.put(newParent); - Log.warn("Thread 2 commit"); - txm.commit(); + txm.runTries(() -> { + try { + barrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + var got = curTx.get(Parent.class, new JObjectKey("Parent2")).orElse(null); + var newParent = new Parent(JObjectKey.of("Parent2"), "John2"); + curTx.put(newParent); + Log.warn("Thread 2 commit"); + }, 0); thread2Failed.set(false); return null; } finally { @@ -187,9 +163,9 @@ public class ObjectsTest { latch.await(); - txm.begin(); - var got = curTx.get(Parent.class, new JObjectKey("Parent2")).orElse(null); - txm.commit(); + var got = txm.run(() -> { + return curTx.get(Parent.class, new JObjectKey("Parent2")).orElse(null); + }); if (!thread1Failed.get()) { Assertions.assertTrue(thread2Failed.get()); @@ -205,12 +181,10 @@ public class ObjectsTest { @EnumSource(LockingStrategy.class) void editConflict(LockingStrategy strategy) throws InterruptedException { String key = "Parent4" + strategy.name(); - { - txm.begin(); + txm.run(() -> { var newParent = new Parent(JObjectKey.of(key), "John3"); curTx.put(newParent); - txm.commit(); - } + }); AtomicBoolean thread1Failed = new AtomicBoolean(true); AtomicBoolean thread2Failed = new AtomicBoolean(true); @@ -221,12 +195,16 @@ public class ObjectsTest { Just.run(() -> { try { Log.warn("Thread 1"); - txm.begin(); - barrier.await(); - var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null); - curTx.put(parent.toBuilder().name("John").build()); - Log.warn("Thread 1 commit"); - txm.commit(); + txm.runTries(() -> { + try { + barrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null); + curTx.put(parent.withName("John")); + Log.warn("Thread 1 commit"); + }, 0); Log.warn("Thread 1 commit done"); thread1Failed.set(false); return null; @@ -237,12 +215,16 @@ public class ObjectsTest { Just.run(() -> { try { Log.warn("Thread 2"); - txm.begin(); - barrier.await(); - var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null); - curTx.put(parent.toBuilder().name("John2").build()); - Log.warn("Thread 2 commit"); - txm.commit(); + txm.runTries(() -> { + try { + barrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null); + curTx.put(parent.withName("John2")); + Log.warn("Thread 2 commit"); + }, 0); Log.warn("Thread 2 commit done"); thread2Failed.set(false); return null; @@ -253,9 +235,9 @@ public class ObjectsTest { latchEnd.await(); - txm.begin(); - var got = curTx.get(Parent.class, new JObjectKey(key)).orElse(null); - txm.commit(); + var got = txm.run(() -> { + return curTx.get(Parent.class, new JObjectKey(key)).orElse(null); + }); if (!thread1Failed.get()) { Assertions.assertTrue(thread2Failed.get()); diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java index 4fe54c05..850548d7 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java @@ -28,49 +28,38 @@ public class PreCommitTxHookTest { @Test void createObject() { - { - txm.begin(); - var newParent = new Parent(JObjectKey.of("ParentCreate"), "John"); + txm.run(() -> { + var newParent = new Parent(JObjectKey.of("ParentCreate2"), "John"); curTx.put(newParent); - curTx.put(newParent); - txm.commit(); - } + }); - { - txm.begin(); - var parent = curTx.get(Parent.class, new JObjectKey("ParentCreate")).orElse(null); + txm.run(() -> { + var parent = curTx.get(Parent.class, new JObjectKey("ParentCreate2")).orElse(null); Assertions.assertEquals("John", parent.name()); - txm.commit(); - } + }); ArgumentCaptor dataCaptor = ArgumentCaptor.forClass(JData.class); ArgumentCaptor keyCaptor = ArgumentCaptor.forClass(JObjectKey.class); Mockito.verify(spyHook, Mockito.times(1)).onCreate(keyCaptor.capture(), dataCaptor.capture()); Assertions.assertEquals("John", ((Parent) dataCaptor.getValue()).name()); - Assertions.assertEquals(new JObjectKey("ParentCreate"), keyCaptor.getValue()); + Assertions.assertEquals(new JObjectKey("ParentCreate2"), keyCaptor.getValue()); } @Test void deleteObject() { - { - txm.begin(); + txm.run(() -> { var newParent = new Parent(JObjectKey.of("ParentDel"), "John"); curTx.put(newParent); - txm.commit(); - } + }); - { - txm.begin(); + txm.run(() -> { var parent = curTx.get(Parent.class, new JObjectKey("ParentDel")).orElse(null); Assertions.assertEquals("John", parent.name()); - txm.commit(); - } + }); - { - txm.begin(); + txm.run(() -> { curTx.delete(new JObjectKey("ParentDel")); - txm.commit(); - } + }); ArgumentCaptor dataCaptor = ArgumentCaptor.forClass(JData.class); ArgumentCaptor keyCaptor = ArgumentCaptor.forClass(JObjectKey.class); @@ -81,19 +70,15 @@ public class PreCommitTxHookTest { @Test void editObject() { - { - txm.begin(); + txm.run(() -> { var newParent = new Parent(JObjectKey.of("ParentEdit"), "John"); curTx.put(newParent); - txm.commit(); - } + }); - { - txm.begin(); + txm.run(() -> { var newParent = new Parent(JObjectKey.of("ParentEdit"), "John changed"); curTx.put(newParent); - txm.commit(); - } + }); ArgumentCaptor dataCaptorOld = ArgumentCaptor.forClass(JData.class); ArgumentCaptor dataCaptorNew = ArgumentCaptor.forClass(JData.class); @@ -106,20 +91,16 @@ public class PreCommitTxHookTest { @Test void editObjectWithGet() { - { - txm.begin(); + txm.run(() -> { var newParent = new Parent(JObjectKey.of("ParentEdit2"), "John"); curTx.put(newParent); - txm.commit(); - } + }); - { - txm.begin(); + txm.run(() -> { var parent = curTx.get(Parent.class, new JObjectKey("ParentEdit2")).orElse(null); Assertions.assertEquals("John", parent.name()); - curTx.put(parent.toBuilder().name("John changed").build()); - txm.commit(); - } + curTx.put(parent.withName("John changed")); + }); ArgumentCaptor dataCaptorOld = ArgumentCaptor.forClass(JData.class); ArgumentCaptor dataCaptorNew = ArgumentCaptor.forClass(JData.class); diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java index 0e597f4d..b49d163f 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Kid.java @@ -2,8 +2,9 @@ package com.usatiuk.dhfs.objects.data; import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.JObjectKey; -import lombok.Builder; -@Builder(toBuilder = true) public record Kid(JObjectKey key, String name) implements JData { + public Kid withName(String name) { + return new Kid(key, name); + } } \ No newline at end of file diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java index 3527b6f4..c6dcbbb0 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/data/Parent.java @@ -2,8 +2,9 @@ package com.usatiuk.dhfs.objects.data; import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.JObjectKey; -import lombok.Builder; -@Builder(toBuilder = true) public record Parent(JObjectKey key, String name) implements JData { + public Parent withName(String name) { + return new Parent(key, name); + } } \ No newline at end of file diff --git a/dhfs-parent/utils/pom.xml b/dhfs-parent/utils/pom.xml index 30db029a..7b67e6f1 100644 --- a/dhfs-parent/utils/pom.xml +++ b/dhfs-parent/utils/pom.xml @@ -31,11 +31,6 @@ io.quarkus quarkus-grpc - - org.projectlombok - lombok - provided - org.junit.jupiter junit-jupiter-engine diff --git a/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/HashSetDelayedBlockingQueue.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/HashSetDelayedBlockingQueue.java index 628bf4fd..ce337297 100644 --- a/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/HashSetDelayedBlockingQueue.java +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/HashSetDelayedBlockingQueue.java @@ -1,7 +1,6 @@ package com.usatiuk.dhfs.utils; import jakarta.annotation.Nullable; -import lombok.Getter; import java.util.ArrayList; import java.util.Collection; diff --git a/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/VoidFnThrows.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/VoidFnThrows.java new file mode 100644 index 00000000..64dfe36f --- /dev/null +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/VoidFnThrows.java @@ -0,0 +1,7 @@ +package com.usatiuk.dhfs.utils; + +@FunctionalInterface +public interface VoidFnThrows { + void apply() throws Throwable; +} + From 5765efc9985f911fed317e5c732ec4c19827af89 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Thu, 2 Jan 2025 18:07:52 +0100 Subject: [PATCH 038/105] more lombok cleanup --- dhfs-parent/server/pom.xml | 5 ---- dhfs-parent/server/src/lombok.config | 1 - .../usatiuk/dhfs/files/objects/ChunkData.java | 6 ++-- .../com/usatiuk/dhfs/files/objects/File.java | 30 ++++++++++++++++--- .../files/service/DhfsFileServiceImpl.java | 16 +++++----- .../dhfs/fuse/JnrPtrByteOutputAccessors.java | 11 +++++-- .../jkleppmanntree/JKleppmannTreeManager.java | 14 ++++----- .../JKleppmannTreeOpWrapper.java | 8 +++-- .../JKleppmannTreePeriodicPushOp.java | 14 +++++---- .../structs/JKleppmannTreeNode.java | 16 +++++----- .../structs/JKleppmannTreeNodeMeta.java | 10 +++++-- .../structs/JKleppmannTreeNodeMetaFile.java | 8 +++-- .../structs/JKleppmannTreePersistentData.java | 22 +++++++++++--- 13 files changed, 101 insertions(+), 60 deletions(-) delete mode 100644 dhfs-parent/server/src/lombok.config diff --git a/dhfs-parent/server/pom.xml b/dhfs-parent/server/pom.xml index 2c1003b6..74372a0d 100644 --- a/dhfs-parent/server/pom.xml +++ b/dhfs-parent/server/pom.xml @@ -85,11 +85,6 @@ quarkus-junit5 test - - org.projectlombok - lombok - provided - com.github.SerCeMan jnr-fuse diff --git a/dhfs-parent/server/src/lombok.config b/dhfs-parent/server/src/lombok.config deleted file mode 100644 index f1c474ce..00000000 --- a/dhfs-parent/server/src/lombok.config +++ /dev/null @@ -1 +0,0 @@ -lombok.accessors.prefix += _ diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java index e4810e19..517e3ce3 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java @@ -3,12 +3,10 @@ package com.usatiuk.dhfs.files.objects; import com.google.protobuf.ByteString; import com.usatiuk.dhfs.objects.JDataRefcounted; import com.usatiuk.dhfs.objects.JObjectKey; -import lombok.Builder; import java.util.Collection; import java.util.LinkedHashSet; -@Builder(toBuilder = true) public record ChunkData(JObjectKey key, Collection refsFrom, boolean frozen, ByteString data) implements JDataRefcounted { public ChunkData(JObjectKey key, ByteString data) { @@ -17,12 +15,12 @@ public record ChunkData(JObjectKey key, Collection refsFrom, boolean @Override public ChunkData withRefsFrom(Collection refs) { - return this.toBuilder().refsFrom(refs).build(); + return new ChunkData(key, refs, frozen, data); } @Override public ChunkData withFrozen(boolean frozen) { - return this.toBuilder().frozen(frozen).build(); + return new ChunkData(key, refsFrom, frozen, data); } @Override diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java index f24227f8..19afb5fe 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java @@ -1,23 +1,45 @@ package com.usatiuk.dhfs.files.objects; import com.usatiuk.dhfs.objects.JObjectKey; -import lombok.Builder; import java.util.Collection; import java.util.NavigableMap; -@Builder(toBuilder = true) public record File(JObjectKey key, Collection refsFrom, boolean frozen, long mode, long cTime, long mTime, NavigableMap chunks, boolean symlink, long size ) implements FsNode { @Override public File withRefsFrom(Collection refs) { - return this.toBuilder().refsFrom(refs).build(); + return new File(key, refs, frozen, mode, cTime, mTime, chunks, symlink, size); } @Override public File withFrozen(boolean frozen) { - return this.toBuilder().frozen(frozen).build(); + return new File(key, refsFrom, frozen, mode, cTime, mTime, chunks, symlink, size); + } + + public File withChunks(NavigableMap chunks) { + return new File(key, refsFrom, frozen, mode, cTime, mTime, chunks, symlink, size); + } + + public File withSymlink(boolean symlink) { + return new File(key, refsFrom, frozen, mode, cTime, mTime, chunks, symlink, size); + } + + public File withSize(long size) { + return new File(key, refsFrom, frozen, mode, cTime, mTime, chunks, symlink, size); + } + + public File withMode(long mode) { + return new File(key, refsFrom, frozen, mode, cTime, mTime, chunks, symlink, size); + } + + public File withCTime(long cTime) { + return new File(key, refsFrom, frozen, mode, cTime, mTime, chunks, symlink, size); + } + + public File withMTime(long mTime) { + return new File(key, refsFrom, frozen, mode, cTime, mTime, chunks, symlink, size); } @Override diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java index d7ddefa7..22e338d2 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java @@ -4,6 +4,8 @@ import com.google.protobuf.ByteString; import com.google.protobuf.UnsafeByteOperations; import com.usatiuk.dhfs.files.objects.ChunkData; import com.usatiuk.dhfs.files.objects.File; +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.TransactionManager; import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeManager; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; @@ -13,8 +15,6 @@ import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFil import com.usatiuk.dhfs.objects.transaction.LockingStrategy; import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; -import com.usatiuk.dhfs.objects.JData; -import com.usatiuk.dhfs.objects.JObjectKey; import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.quarkus.logging.Log; @@ -228,7 +228,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { if (dent instanceof JKleppmannTreeNode) { return true; } else if (dent instanceof File f) { - curTx.put(f.toBuilder().mode(mode).mTime(System.currentTimeMillis()).build()); + curTx.put(f.withMode(mode).withMTime(System.currentTimeMillis())); return true; } else { throw new IllegalArgumentException(uuid + " is not a file"); @@ -503,7 +503,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { realNewChunks.putAll(newChunks); - file = file.toBuilder().chunks(Collections.unmodifiableNavigableMap(realNewChunks)).mTime(System.currentTimeMillis()).build(); + file = file.withChunks(Collections.unmodifiableNavigableMap(realNewChunks)).withMTime(System.currentTimeMillis()); curTx.put(file); cleanupChunks(file, removedChunks.values()); updateFileSize(file); @@ -527,7 +527,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { if (length == 0) { var oldChunks = Collections.unmodifiableNavigableMap(new TreeMap<>(file.chunks())); - file = file.toBuilder().chunks(new TreeMap<>()).mTime(System.currentTimeMillis()).build(); + file = file.withChunks(new TreeMap<>()).withMTime(System.currentTimeMillis()); curTx.put(file); cleanupChunks(file, oldChunks.values()); updateFileSize(file); @@ -597,7 +597,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { realNewChunks.putAll(newChunks); - file = file.toBuilder().chunks(Collections.unmodifiableNavigableMap(realNewChunks)).mTime(System.currentTimeMillis()).build(); + file = file.withChunks(Collections.unmodifiableNavigableMap(realNewChunks)).withMTime(System.currentTimeMillis()); curTx.put(file); cleanupChunks(file, removedChunks.values()); updateFileSize(file); @@ -652,7 +652,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { "File not found for setTimes: " + fileUuid)) ); - curTx.put(file.toBuilder().cTime(atimeMs).mTime(mtimeMs).build()); + curTx.put(file.withCTime(atimeMs).withMTime(mtimeMs)); return true; }); } @@ -669,7 +669,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { } if (realSize != file.size()) { - curTx.put(file.toBuilder().size(realSize).build()); + curTx.put(file.withSize(realSize)); } }); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java index 78cc8ff4..98da17fa 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java @@ -3,18 +3,23 @@ package com.usatiuk.dhfs.fuse; import jakarta.inject.Singleton; import jdk.internal.access.JavaNioAccess; import jdk.internal.access.SharedSecrets; -import lombok.Getter; import sun.misc.Unsafe; import java.lang.reflect.Field; @Singleton class JnrPtrByteOutputAccessors { - @Getter JavaNioAccess _nioAccess; - @Getter Unsafe _unsafe; + public JavaNioAccess getNioAccess() { + return _nioAccess; + } + + public Unsafe getUnsafe() { + return _unsafe; + } + JnrPtrByteOutputAccessors() throws NoSuchFieldException, IllegalAccessException { _nioAccess = SharedSecrets.getJavaNioAccess(); Field f = Unsafe.class.getDeclaredField("theUnsafe"); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java index bbbbdb6a..55802cd6 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java @@ -1,5 +1,6 @@ package com.usatiuk.dhfs.objects.jkleppmanntree; +import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.TransactionManager; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; @@ -8,7 +9,6 @@ import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreePersistentD import com.usatiuk.dhfs.objects.transaction.LockingStrategy; import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.kleppmanntree.*; -import com.usatiuk.dhfs.objects.JObjectKey; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; import org.apache.commons.lang3.tuple.Pair; @@ -279,7 +279,7 @@ public class JKleppmannTreeManager { @Override public Long getTimestamp() { var res = _data.clock() + 1; - _data = _data.toBuilder().clock(res).build(); + _data = _data.withClock(res); curTx.put(_data); return res; } @@ -292,7 +292,7 @@ public class JKleppmannTreeManager { @Override public Long updateTimestamp(Long receivedTimestamp) { var old = _data.clock(); - _data = _data.toBuilder().clock(Math.max(old, receivedTimestamp) + 1).build(); + _data = _data.withClock(Math.max(old, receivedTimestamp) + 1); curTx.put(_data); return old; } @@ -361,7 +361,7 @@ public class JKleppmannTreeManager { public void putForPeer(UUID peerId, Long timestamp) { var newPeerTimestampLog = new HashMap<>(_data.peerTimestampLog()); newPeerTimestampLog.put(peerId, timestamp); - _data = _data.toBuilder().peerTimestampLog(newPeerTimestampLog).build(); + _data = _data.withPeerTimestampLog(newPeerTimestampLog); curTx.put(_data); } } @@ -378,7 +378,7 @@ public class JKleppmannTreeManager { public Pair, LogRecord> takeOldest() { var newLog = new TreeMap<>(_data.log()); var ret = newLog.pollFirstEntry(); - _data = _data.toBuilder().log(newLog).build(); + _data = _data.withLog(newLog); curTx.put(_data); if (ret == null) return null; return Pair.of(ret); @@ -422,7 +422,7 @@ public class JKleppmannTreeManager { throw new IllegalStateException("Overwriting log entry?"); var newLog = new TreeMap<>(_data.log()); newLog.put(timestamp, record); - _data = _data.toBuilder().log(newLog).build(); + _data = _data.withLog(newLog); curTx.put(_data); } @@ -430,7 +430,7 @@ public class JKleppmannTreeManager { public void replace(CombinedTimestamp timestamp, LogRecord record) { var newLog = new TreeMap<>(_data.log()); newLog.put(timestamp, record); - _data = _data.toBuilder().log(newLog).build(); + _data = _data.withLog(newLog); curTx.put(_data); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java index a0a6d7a4..cf5c8ce9 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java @@ -1,17 +1,19 @@ package com.usatiuk.dhfs.objects.jkleppmanntree; +import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; import com.usatiuk.kleppmanntree.OpMove; -import com.usatiuk.dhfs.objects.JObjectKey; -import lombok.Getter; import java.util.UUID; // Wrapper to avoid having to specify generic types public class JKleppmannTreeOpWrapper { - @Getter private final OpMove _op; + public OpMove getOp() { + return _op; + } + public JKleppmannTreeOpWrapper(OpMove op) { if (op == null) throw new IllegalArgumentException("op shouldn't be null"); _op = op; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java index 5259c51b..e2e4f8c2 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java @@ -1,17 +1,19 @@ package com.usatiuk.dhfs.objects.jkleppmanntree; -import lombok.Getter; - -import java.util.Collection; -import java.util.List; import java.util.UUID; public class JKleppmannTreePeriodicPushOp { - @Getter private final UUID _from; - @Getter private final long _timestamp; + public UUID getFrom() { + return _from; + } + + public long getTimestamp() { + return _timestamp; + } + public JKleppmannTreePeriodicPushOp(UUID from, long timestamp) { _from = from; _timestamp = timestamp; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java index 096a32e5..82ee0fde 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java @@ -1,10 +1,9 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; import com.usatiuk.dhfs.objects.JDataRefcounted; +import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.kleppmanntree.OpMove; import com.usatiuk.kleppmanntree.TreeNode; -import com.usatiuk.dhfs.objects.JObjectKey; -import lombok.Builder; import java.io.Serializable; import java.util.Collection; @@ -14,7 +13,6 @@ import java.util.UUID; import java.util.stream.Stream; // FIXME: Ideally this is two classes? -@Builder(toBuilder = true) public record JKleppmannTreeNode(JObjectKey key, Collection refsFrom, boolean frozen, JObjectKey parent, OpMove lastEffectiveOp, JKleppmannTreeNodeMeta meta, @@ -26,32 +24,32 @@ public record JKleppmannTreeNode(JObjectKey key, Collection refsFrom @Override public JKleppmannTreeNode withParent(JObjectKey parent) { - return this.toBuilder().parent(parent).build(); + return new JKleppmannTreeNode(key, refsFrom, frozen, parent, lastEffectiveOp, meta, children); } @Override public JKleppmannTreeNode withLastEffectiveOp(OpMove lastEffectiveOp) { - return this.toBuilder().lastEffectiveOp(lastEffectiveOp).build(); + return new JKleppmannTreeNode(key, refsFrom, frozen, parent, lastEffectiveOp, meta, children); } @Override public JKleppmannTreeNode withMeta(JKleppmannTreeNodeMeta meta) { - return this.toBuilder().meta(meta).build(); + return new JKleppmannTreeNode(key, refsFrom, frozen, parent, lastEffectiveOp, meta, children); } @Override public JKleppmannTreeNode withChildren(Map children) { - return this.toBuilder().children(children).build(); + return new JKleppmannTreeNode(key, refsFrom, frozen, parent, lastEffectiveOp, meta, children); } @Override public JKleppmannTreeNode withRefsFrom(Collection refs) { - return this.toBuilder().refsFrom(refs).build(); + return new JKleppmannTreeNode(key, refs, frozen, parent, lastEffectiveOp, meta, children); } @Override public JKleppmannTreeNode withFrozen(boolean frozen) { - return this.toBuilder().frozen(frozen).build(); + return new JKleppmannTreeNode(key, refsFrom, frozen, parent, lastEffectiveOp, meta, children); } @Override diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java index 2ea7d27f..4a8f9fa6 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java @@ -3,16 +3,20 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; import com.usatiuk.autoprotomap.runtime.ProtoMirror; import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaP; import com.usatiuk.kleppmanntree.NodeMeta; -import lombok.Getter; import java.util.Objects; @ProtoMirror(JKleppmannTreeNodeMetaP.class) public abstract class JKleppmannTreeNodeMeta implements NodeMeta { - @Getter private final String _name; - public JKleppmannTreeNodeMeta(String name) {_name = name;} + public String getName() { + return _name; + } + + public JKleppmannTreeNodeMeta(String name) { + _name = name; + } public abstract JKleppmannTreeNodeMeta withName(String name); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java index ae01c3ef..563a2447 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java @@ -1,17 +1,19 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; import com.usatiuk.autoprotomap.runtime.ProtoMirror; -import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaFileP; import com.usatiuk.dhfs.objects.JObjectKey; -import lombok.Getter; +import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaFileP; import java.util.Objects; @ProtoMirror(JKleppmannTreeNodeMetaFileP.class) public class JKleppmannTreeNodeMetaFile extends JKleppmannTreeNodeMeta { - @Getter private final JObjectKey _fileIno; + public JObjectKey getFileIno() { + return _fileIno; + } + public JKleppmannTreeNodeMetaFile(String name, JObjectKey fileIno) { super(name); _fileIno = fileIno; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java index 30367594..6422c69d 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java @@ -5,11 +5,9 @@ import com.usatiuk.kleppmanntree.CombinedTimestamp; import com.usatiuk.kleppmanntree.LogRecord; import com.usatiuk.kleppmanntree.OpMove; import com.usatiuk.dhfs.objects.JObjectKey; -import lombok.Builder; import java.util.*; -@Builder(toBuilder = true) public record JKleppmannTreePersistentData( JObjectKey key, Collection refsFrom, boolean frozen, long clock, @@ -40,12 +38,28 @@ public record JKleppmannTreePersistentData( @Override public JKleppmannTreePersistentData withRefsFrom(Collection refs) { - return this.toBuilder().refsFrom(refs).build(); + return new JKleppmannTreePersistentData(key, refs, frozen, clock, queues, peerTimestampLog, log); } @Override public JKleppmannTreePersistentData withFrozen(boolean frozen) { - return this.toBuilder().frozen(frozen).build(); + return new JKleppmannTreePersistentData(key, refsFrom, frozen, clock, queues, peerTimestampLog, log); + } + + public JKleppmannTreePersistentData withClock(long clock) { + return new JKleppmannTreePersistentData(key, refsFrom, frozen, clock, queues, peerTimestampLog, log); + } + + public JKleppmannTreePersistentData withQueues(HashMap, OpMove>> queues) { + return new JKleppmannTreePersistentData(key, refsFrom, frozen, clock, queues, peerTimestampLog, log); + } + + public JKleppmannTreePersistentData withPeerTimestampLog(HashMap peerTimestampLog) { + return new JKleppmannTreePersistentData(key, refsFrom, frozen, clock, queues, peerTimestampLog, log); + } + + public JKleppmannTreePersistentData withLog(TreeMap, LogRecord> log) { + return new JKleppmannTreePersistentData(key, refsFrom, frozen, clock, queues, peerTimestampLog, log); } @Override From e870b297c571992b98c9b364faf6b5d3fc6b482c Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Thu, 2 Jan 2025 18:10:30 +0100 Subject: [PATCH 039/105] add scala --- dhfs-parent/pom.xml | 51 ++++++++++++++++++++++++++++++++++++++ dhfs-parent/server/pom.xml | 8 ++++++ 2 files changed, 59 insertions(+) diff --git a/dhfs-parent/pom.xml b/dhfs-parent/pom.xml index 3140d94e..b01f4338 100644 --- a/dhfs-parent/pom.xml +++ b/dhfs-parent/pom.xml @@ -32,6 +32,10 @@ 3.15.2 3.5.2 ${project.parent.build.outputDirectory}/native + + --add-exports java.base/sun.nio.ch=ALL-UNNAMED + --add-exports java.base/jdk.internal.access=ALL-UNNAMED + @@ -78,11 +82,58 @@ commons-collections4 4.5.0-M2 + + io.quarkiverse.scala + quarkus-scala3 + 1.0.0 + + + org.scala-lang + scala3-library_3 + 3.6.2 + + + net.alchim31.maven + scala-maven-plugin + 4.9.2 + + + scala-compile-first + process-resources + + add-source + compile + + + + scala-test-compile + process-test-resources + + add-source + testCompile + + + + + JavaThenScala + + + -deprecated + -explain + -feature + -Ysafe-init + + + --add-exports + java.base/jdk.internal.access=ALL-UNNAMED + + + ${quarkus.platform.group-id} quarkus-maven-plugin diff --git a/dhfs-parent/server/pom.xml b/dhfs-parent/server/pom.xml index 74372a0d..89fdfa20 100644 --- a/dhfs-parent/server/pom.xml +++ b/dhfs-parent/server/pom.xml @@ -151,6 +151,14 @@ utils 1.0-SNAPSHOT + + io.quarkiverse.scala + quarkus-scala3 + + + org.scala-lang + scala3-library_3 + From 6a4b2dd815eed6c0af87115dd7c85f10a1000e57 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Thu, 2 Jan 2025 18:23:20 +0100 Subject: [PATCH 040/105] Revert "add scala" This reverts commit 375925b800c1e844d0c0dcbd541281b5f0340332. --- dhfs-parent/pom.xml | 51 -------------------------------------- dhfs-parent/server/pom.xml | 8 ------ 2 files changed, 59 deletions(-) diff --git a/dhfs-parent/pom.xml b/dhfs-parent/pom.xml index b01f4338..3140d94e 100644 --- a/dhfs-parent/pom.xml +++ b/dhfs-parent/pom.xml @@ -32,10 +32,6 @@ 3.15.2 3.5.2 ${project.parent.build.outputDirectory}/native - - --add-exports java.base/sun.nio.ch=ALL-UNNAMED - --add-exports java.base/jdk.internal.access=ALL-UNNAMED - @@ -82,58 +78,11 @@ commons-collections4 4.5.0-M2 - - io.quarkiverse.scala - quarkus-scala3 - 1.0.0 - - - org.scala-lang - scala3-library_3 - 3.6.2 - - - net.alchim31.maven - scala-maven-plugin - 4.9.2 - - - scala-compile-first - process-resources - - add-source - compile - - - - scala-test-compile - process-test-resources - - add-source - testCompile - - - - - JavaThenScala - - - -deprecated - -explain - -feature - -Ysafe-init - - - --add-exports - java.base/jdk.internal.access=ALL-UNNAMED - - - ${quarkus.platform.group-id} quarkus-maven-plugin diff --git a/dhfs-parent/server/pom.xml b/dhfs-parent/server/pom.xml index 89fdfa20..74372a0d 100644 --- a/dhfs-parent/server/pom.xml +++ b/dhfs-parent/server/pom.xml @@ -151,14 +151,6 @@ utils 1.0-SNAPSHOT - - io.quarkiverse.scala - quarkus-scala3 - - - org.scala-lang - scala3-library_3 - From 57f865dafb44f5df18cd7443f64a60b0347d4912 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Thu, 2 Jan 2025 18:51:26 +0100 Subject: [PATCH 041/105] hacky read anomaly solution --- .../usatiuk/dhfs/objects/JObjectManager.java | 22 +++++++++++-- .../com/usatiuk/dhfs/objects/TxWriteback.java | 14 +++++++++ .../usatiuk/dhfs/objects/TxWritebackImpl.java | 31 ++++++++++++++++--- 3 files changed, 61 insertions(+), 6 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index aaf9e57e..3f7b2239 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -82,7 +82,25 @@ public class JObjectManager { try (var readLock = _objLocker.lock(key)) { if (_objects.containsKey(key)) continue; - var read = objectStorage.readObject(key) + var pending = txWriteback.getPendingWrite(key); + + JDataVersionedWrapper read; + + switch (pending.orElse(null)) { + case TxWriteback.PendingWrite write -> { + read = write.data(); + } + case TxWriteback.PendingDelete delete -> { + return null; + } + case null -> { + } + default -> { + throw new IllegalStateException("Unexpected value: " + pending); + } + } + + read = objectStorage.readObject(key) .map(objectSerializer::deserialize) .orElse(null); @@ -92,7 +110,7 @@ public class JObjectManager { var wrapper = new JDataWrapper<>((JDataVersionedWrapper) read); var old = _objects.put(key, wrapper); assert old == null; - return read; + return (JDataVersionedWrapper) read; } else { throw new IllegalArgumentException("Object type mismatch: " + read.getClass() + " vs " + type); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java index 38ca45f4..92305bf1 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java @@ -2,6 +2,8 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.utils.VoidFn; +import java.util.Optional; + public interface TxWriteback { TxBundle createBundle(); @@ -11,6 +13,18 @@ public interface TxWriteback { void fence(long bundleId); + interface PendingWriteEntry { + long bundleId(); + } + + record PendingWrite(JDataVersionedWrapper data, long bundleId) implements PendingWriteEntry { + } + + record PendingDelete(JObjectKey key, long bundleId) implements PendingWriteEntry { + } + + Optional getPendingWrite(JObjectKey key); + // Executes callback after bundle with bundleId id has been persisted // if it was already, runs callback on the caller thread void asyncFence(long bundleId, VoidFn callback); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java index 04900473..bf1584f9 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java @@ -14,15 +14,14 @@ import org.apache.commons.lang3.concurrent.BasicThreadFactory; import org.eclipse.microprofile.config.inject.ConfigProperty; import java.util.*; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; +import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicLong; @ApplicationScoped public class TxWritebackImpl implements TxWriteback { private final LinkedList _pendingBundles = new LinkedList<>(); + + private final ConcurrentHashMap _pendingWrites = new ConcurrentHashMap<>(); private final LinkedHashMap _notFlushedBundles = new LinkedHashMap<>(); private final Object _flushWaitSynchronizer = new Object(); @@ -148,6 +147,14 @@ public class TxWritebackImpl implements TxWriteback { Log.trace("Bundle " + bundle.getId() + " committed"); + synchronized (_pendingBundles) { + bundle._entries.values().forEach(e -> { + var cur = _pendingWrites.get(e.key()); + if (cur.bundleId() == bundle.getId()) + _pendingWrites.remove(e.key(), cur); + }); + } + List> callbacks = new ArrayList<>(); synchronized (_notFlushedBundles) { _lastWrittenTx.set(bundle.getId()); @@ -233,6 +240,15 @@ public class TxWritebackImpl implements TxWriteback { verifyReady(); synchronized (_pendingBundles) { ((TxBundleImpl) bundle).setReady(); + ((TxBundleImpl) bundle)._entries.values().forEach(e -> { + switch (e) { + case TxBundleImpl.CommittedEntry c -> + _pendingWrites.put(c.key(), new PendingWrite(c.data, bundle.getId())); + case TxBundleImpl.DeletedEntry d -> + _pendingWrites.put(d.key(), new PendingDelete(d.key, bundle.getId())); + default -> throw new IllegalStateException("Unexpected value: " + e); + } + }); if (_pendingBundles.peek() == bundle) _pendingBundles.notify(); synchronized (_flushWaitSynchronizer) { @@ -264,6 +280,13 @@ public class TxWritebackImpl implements TxWriteback { } } + @Override + public Optional getPendingWrite(JObjectKey key) { + synchronized (_pendingWrites) { + return Optional.ofNullable(_pendingWrites.get(key)); + } + } + @Override public void asyncFence(long bundleId, VoidFn fn) { verifyReady(); From a143648c465cdbf3ea2865ec0e9aad10ca58bdc3 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Thu, 2 Jan 2025 22:07:23 +0100 Subject: [PATCH 042/105] some cleanup and fixes --- .../usatiuk/dhfs/objects/JObjectManager.java | 67 +++-------- .../dhfs/objects/TransactionManager.java | 10 +- .../dhfs/objects/TransactionManagerImpl.java | 2 +- .../usatiuk/dhfs/objects/TxWritebackImpl.java | 8 +- .../WritebackObjectPersistentStore.java | 62 ++++++++++ .../CachingObjectPersistentStore.java | 110 ++++++++++++++++++ .../SerializingObjectPersistentStore.java | 38 ++++++ .../src/main/resources/application.properties | 2 +- .../java/com/usatiuk/dhfs/fuse/DhfsFuse.java | 49 ++++---- .../usatiuk/dhfs/fuse/JnrPtrByteOutput.java | 2 +- .../supportlib/UninitializedByteBuffer.java | 32 ++--- 11 files changed, 283 insertions(+), 99 deletions(-) create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 3f7b2239..2cc74735 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -1,6 +1,5 @@ package com.usatiuk.dhfs.objects; -import com.usatiuk.dhfs.objects.persistence.ObjectPersistentStore; import com.usatiuk.dhfs.objects.transaction.*; import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; import com.usatiuk.dhfs.utils.DataLocker; @@ -24,13 +23,9 @@ import java.util.function.Function; @ApplicationScoped public class JObjectManager { @Inject - ObjectPersistentStore objectStorage; - @Inject - ObjectSerializer objectSerializer; + WritebackObjectPersistentStore writebackObjectPersistentStore; @Inject TransactionFactory transactionFactory; - @Inject - TxWriteback txWriteback; private final List _preCommitTxHooks; @@ -82,27 +77,7 @@ public class JObjectManager { try (var readLock = _objLocker.lock(key)) { if (_objects.containsKey(key)) continue; - var pending = txWriteback.getPendingWrite(key); - - JDataVersionedWrapper read; - - switch (pending.orElse(null)) { - case TxWriteback.PendingWrite write -> { - read = write.data(); - } - case TxWriteback.PendingDelete delete -> { - return null; - } - case null -> { - } - default -> { - throw new IllegalStateException("Unexpected value: " + pending); - } - } - - read = objectStorage.readObject(key) - .map(objectSerializer::deserialize) - .orElse(null); + var read = writebackObjectPersistentStore.readObject(key).orElse(null); if (read == null) return null; @@ -280,35 +255,27 @@ public class JObjectManager { Log.tracef("Flushing transaction %d to storage", tx.getId()); - var bundle = txWriteback.createBundle(); - try { - for (var action : current.entrySet()) { - switch (action.getValue()) { - case TxRecord.TxObjectRecordWrite write -> { - Log.trace("Flushing object " + action.getKey()); - var wrapped = new JDataVersionedWrapper<>(write.data(), tx.getId()); - bundle.commit(wrapped); - _objects.put(action.getKey(), new JDataWrapper<>(wrapped)); - } - case TxRecord.TxObjectRecordDeleted deleted -> { - Log.trace("Deleting object " + action.getKey()); - bundle.delete(action.getKey()); - _objects.remove(action.getKey()); - } - default -> { - throw new TxCommitException("Unexpected value: " + action.getValue()); - } + for (var action : current.entrySet()) { + switch (action.getValue()) { + case TxRecord.TxObjectRecordWrite write -> { + Log.trace("Flushing object " + action.getKey()); + var wrapped = new JDataVersionedWrapper<>(write.data(), tx.getId()); + _objects.put(action.getKey(), new JDataWrapper<>(wrapped)); + } + case TxRecord.TxObjectRecordDeleted deleted -> { + Log.trace("Deleting object " + action.getKey()); + _objects.remove(action.getKey()); + } + default -> { + throw new TxCommitException("Unexpected value: " + action.getValue()); } } - } catch (Throwable t) { - txWriteback.dropBundle(bundle); - throw new TxCommitException(t.getMessage(), t); } Log.tracef("Committing transaction %d to storage", tx.getId()); - txWriteback.commitBundle(bundle); + writebackObjectPersistentStore.commitTx(current.values(), tx.getId()); } catch (Throwable t) { - Log.error("Error when committing transaction", t); + Log.trace("Error when committing transaction", t); throw new TxCommitException(t.getMessage(), t); } finally { for (var unlock : toUnlock) { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java index b64f46d6..f4a5bc8d 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java @@ -2,6 +2,7 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.dhfs.utils.VoidFn; +import io.quarkus.logging.Log; import java.util.function.Supplier; @@ -23,8 +24,10 @@ public interface TransactionManager { commit(); return ret; } catch (TxCommitException txCommitException) { - if (tries == 0) + if (tries == 0) { + Log.error("Transaction commit failed", txCommitException); throw txCommitException; + } return runTries(supplier, tries - 1); } catch (Throwable e) { rollback(); @@ -43,10 +46,11 @@ public interface TransactionManager { fn.apply(); commit(); } catch (TxCommitException txCommitException) { - if (tries == 0) + if (tries == 0) { + Log.error("Transaction commit failed", txCommitException); throw txCommitException; + } runTries(fn, tries - 1); - return; } catch (Throwable e) { rollback(); throw e; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java index 91ee6d50..0dacf744 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java @@ -34,7 +34,7 @@ public class TransactionManagerImpl implements TransactionManager { try { jObjectManager.commit(_currentTransaction.get()); } catch (Throwable e) { - Log.warn("Transaction commit failed", e); + Log.trace("Transaction commit failed", e); throw e; } finally { _currentTransaction.remove(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java index bf1584f9..e6543c23 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.objects; -import com.usatiuk.dhfs.objects.persistence.ObjectPersistentStore; +import com.usatiuk.dhfs.objects.persistence.CachingObjectPersistentStore; import com.usatiuk.dhfs.objects.persistence.TxManifest; import com.usatiuk.dhfs.utils.VoidFn; import io.quarkus.logging.Log; @@ -29,9 +29,7 @@ public class TxWritebackImpl implements TxWriteback { private final AtomicLong _counter = new AtomicLong(); private final AtomicLong _waitedTotal = new AtomicLong(0); @Inject - ObjectPersistentStore objectPersistentStore; - @Inject - ObjectSerializer objectSerializer; + CachingObjectPersistentStore objectPersistentStore; @ConfigProperty(name = "dhfs.objects.writeback.limit") long sizeLimit; private long currentSize = 0; @@ -118,7 +116,7 @@ public class TxWritebackImpl implements TxWriteback { case TxBundleImpl.CommittedEntry c -> _commitExecutor.execute(() -> { try { Log.trace("Writing new " + c.key()); - objectPersistentStore.writeObject(c.key(), objectSerializer.serialize(c.data())); + objectPersistentStore.writeObject(c.key(), c.data()); } catch (Throwable t) { Log.error("Error writing " + c.key(), t); errors.add(t); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java new file mode 100644 index 00000000..c40164a9 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java @@ -0,0 +1,62 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.CachingObjectPersistentStore; +import com.usatiuk.dhfs.objects.transaction.TxRecord; +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +import javax.annotation.Nonnull; +import java.util.Collection; +import java.util.Optional; + +@ApplicationScoped +public class WritebackObjectPersistentStore { + @Inject + CachingObjectPersistentStore delegate; + @Inject + TxWriteback txWriteback; + + @Nonnull + Collection findAllObjects() { + return delegate.findAllObjects(); + } + + @Nonnull + Optional> readObject(JObjectKey name) { + var pending = txWriteback.getPendingWrite(name).orElse(null); + return switch (pending) { + case TxWriteback.PendingWrite write -> Optional.of(write.data()); + case TxWriteback.PendingDelete ignored -> Optional.empty(); + case null -> delegate.readObject(name); + default -> throw new IllegalStateException("Unexpected value: " + pending); + }; + } + + void commitTx(Collection> writes, long id) { + var bundle = txWriteback.createBundle(); + try { + for (var action : writes) { + switch (action) { + case TxRecord.TxObjectRecordWrite write -> { + Log.trace("Flushing object " + write.key()); + bundle.commit(new JDataVersionedWrapper<>(write.data(), id)); + } + case TxRecord.TxObjectRecordDeleted deleted -> { + Log.trace("Deleting object " + deleted.key()); + bundle.delete(deleted.key()); + } + default -> { + throw new TxCommitException("Unexpected value: " + action.key()); + } + } + } + } catch (Throwable t) { + txWriteback.dropBundle(bundle); + throw new TxCommitException(t.getMessage(), t); + } + + Log.tracef("Committing transaction %d to storage", id); + txWriteback.commitBundle(bundle); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java new file mode 100644 index 00000000..37dd3e23 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java @@ -0,0 +1,110 @@ +package com.usatiuk.dhfs.objects.persistence; + +import com.usatiuk.dhfs.objects.JDataVersionedWrapper; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.utils.DataLocker; +import io.quarkus.logging.Log; +import io.quarkus.runtime.Startup; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import javax.annotation.Nonnull; +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.Optional; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.stream.Stream; + +@ApplicationScoped +public class CachingObjectPersistentStore { + @Inject + SerializingObjectPersistentStore delegate; + + private record CacheEntry(Optional> object, long size) { + } + + private final LinkedHashMap _cache = new LinkedHashMap<>(); + + @ConfigProperty(name = "dhfs.objects.lru.limit") + long sizeLimit; + @ConfigProperty(name = "dhfs.objects.lru.print-stats") + boolean printStats; + + private long _curSize = 0; + private long _evict = 0; + + private ExecutorService _statusExecutor = null; + + private final DataLocker _locker = new DataLocker(); + + @Startup + void init() { + if (printStats) { + _statusExecutor = Executors.newSingleThreadExecutor(); + _statusExecutor.submit(() -> { + try { + while (true) { + Thread.sleep(10000); + if (_curSize > 0) + Log.info("Cache status: size=" + _curSize / 1024 / 1024 + "MB" + " evicted=" + _evict); + _evict = 0; + } + } catch (InterruptedException ignored) { + } + }); + } + } + + @Nonnull + public Collection findAllObjects() { + return delegate.findAllObjects(); + } + + private void put(JObjectKey key, Optional> obj) { + synchronized (_cache) { + int size = obj.map(o -> o.data().estimateSize()).orElse(0); + + _curSize += size; + var old = _cache.putLast(key, new CacheEntry(obj, size)); + if (old != null) + _curSize -= old.size(); + + while (_curSize >= sizeLimit) { + var del = _cache.pollFirstEntry(); + _curSize -= del.getValue().size(); + _evict++; + } + } + } + + @Nonnull + public Optional> readObject(JObjectKey name) { + try (var lock = _locker.lock(name)) { + synchronized (_cache) { + var got = _cache.get(name); + if (got != null) { + return got.object(); + } + } + + var got = delegate.readObject(name); + put(name, got); + return got; + } + } + + public void writeObject(JObjectKey name, JDataVersionedWrapper object) { + delegate.writeObject(name, object); + } + + public void commitTx(TxManifest names) { + // During commit, readObject shouldn't be called for these items, + // it should be handled by the upstream store + for (var key : Stream.concat(names.written().stream(), names.deleted().stream()).toList()) { + _cache.remove(key); + } + delegate.commitTx(names); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java new file mode 100644 index 00000000..318c025a --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java @@ -0,0 +1,38 @@ +package com.usatiuk.dhfs.objects.persistence; + +import com.usatiuk.dhfs.objects.JDataVersionedWrapper; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.ObjectSerializer; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +import javax.annotation.Nonnull; +import java.util.Collection; +import java.util.Optional; + +@ApplicationScoped +public class SerializingObjectPersistentStore { + @Inject + ObjectSerializer serializer; + + @Inject + ObjectPersistentStore delegate; + + @Nonnull + Collection findAllObjects() { + return delegate.findAllObjects(); + } + + @Nonnull + Optional> readObject(JObjectKey name) { + return delegate.readObject(name).map(serializer::deserialize); + } + + void writeObject(JObjectKey name, JDataVersionedWrapper object) { + delegate.writeObject(name, serializer.serialize(object)); + } + + void commitTx(TxManifest names) { + delegate.commitTx(names); + } +} diff --git a/dhfs-parent/objects/src/main/resources/application.properties b/dhfs-parent/objects/src/main/resources/application.properties index 6b41a553..24bd7282 100644 --- a/dhfs-parent/objects/src/main/resources/application.properties +++ b/dhfs-parent/objects/src/main/resources/application.properties @@ -1,7 +1,7 @@ dhfs.objects.persistence=files dhfs.objects.writeback.limit=134217728 dhfs.objects.lru.limit=134217728 -dhfs.objects.lru.print-stats=false +dhfs.objects.lru.print-stats=true dhfs.objects.lock_timeout_secs=15 dhfs.objects.persistence.files.root=${HOME}/dhfs_default/data/objs dhfs.objects.root=${HOME}/dhfs_default/data/stuff diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java index 01644c3f..34111b34 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java @@ -106,7 +106,7 @@ public class DhfsFuse extends FuseStubFS { stbuf.f_favail.set(Integer.MAX_VALUE - 2000); //FIXME: stbuf.f_namemax.set(2048); return super.statfs(path, stbuf); - } catch (Exception e) { + } catch (Throwable e) { Log.error("When statfs " + path, e); return -ErrorCodes.EIO(); } @@ -147,9 +147,6 @@ public class DhfsFuse extends FuseStubFS { stat.st_atim.tv_sec.set(found.get().mtime() / 1000); stat.st_atim.tv_nsec.set((found.get().mtime() % 1000) * 1000); stat.st_blksize.set(blksize); - } catch (Exception e) { - Log.error("When getattr " + path, e); - return -ErrorCodes.EIO(); } catch (Throwable e) { Log.error("When getattr " + path, e); return -ErrorCodes.EIO(); @@ -168,7 +165,7 @@ public class DhfsFuse extends FuseStubFS { timespec[1].tv_sec.get() * 1000); if (!res) return -ErrorCodes.EINVAL(); else return 0; - } catch (Exception e) { + } catch (Throwable e) { Log.error("When utimens " + path, e); return -ErrorCodes.EIO(); } @@ -179,7 +176,7 @@ public class DhfsFuse extends FuseStubFS { try { if (fileService.open(path).isEmpty()) return -ErrorCodes.ENOENT(); return 0; - } catch (Exception e) { + } catch (Throwable e) { Log.error("When open " + path, e); return -ErrorCodes.EIO(); } @@ -197,7 +194,7 @@ public class DhfsFuse extends FuseStubFS { if (read.isEmpty()) return 0; UnsafeByteOperations.unsafeWriteTo(read.get(), new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size)); return read.get().size(); - } catch (Exception e) { + } catch (Throwable e) { Log.error("When reading " + path, e); return -ErrorCodes.EIO(); } @@ -211,15 +208,19 @@ public class DhfsFuse extends FuseStubFS { if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); var buffer = UninitializedByteBuffer.allocateUninitialized((int) size); - jnrPtrByteOutputAccessors.getUnsafe().copyMemory( - buf.address(), - jnrPtrByteOutputAccessors.getNioAccess().getBufferAddress(buffer), - size - ); + if (buffer.isDirect()) { + jnrPtrByteOutputAccessors.getUnsafe().copyMemory( + buf.address(), + jnrPtrByteOutputAccessors.getNioAccess().getBufferAddress(buffer), + size + ); + } else { + buf.get(0, buffer.array(), 0, (int) size); + } var written = fileService.write(fileOpt.get(), offset, UnsafeByteOperations.unsafeWrap(buffer)); return written.intValue(); - } catch (Exception e) { + } catch (Throwable e) { Log.error("When writing " + path, e); return -ErrorCodes.EIO(); } @@ -231,7 +232,7 @@ public class DhfsFuse extends FuseStubFS { var ret = fileService.create(path, mode); if (ret.isEmpty()) return -ErrorCodes.ENOSPC(); else return 0; - } catch (Exception e) { + } catch (Throwable e) { Log.error("When creating " + path, e); return -ErrorCodes.EIO(); } @@ -244,7 +245,7 @@ public class DhfsFuse extends FuseStubFS { return 0; } catch (AlreadyExistsException aex) { return -ErrorCodes.EEXIST(); - } catch (Exception e) { + } catch (Throwable e) { Log.error("When creating dir " + path, e); return -ErrorCodes.EIO(); } @@ -257,7 +258,7 @@ public class DhfsFuse extends FuseStubFS { return 0; } catch (DirectoryNotEmptyException ex) { return -ErrorCodes.ENOTEMPTY(); - } catch (Exception e) { + } catch (Throwable e) { Log.error("When removing dir " + path, e); return -ErrorCodes.EIO(); } @@ -269,7 +270,7 @@ public class DhfsFuse extends FuseStubFS { var ret = fileService.rename(path, newName); if (!ret) return -ErrorCodes.ENOENT(); else return 0; - } catch (Exception e) { + } catch (Throwable e) { Log.error("When renaming " + path, e); return -ErrorCodes.EIO(); } @@ -281,7 +282,7 @@ public class DhfsFuse extends FuseStubFS { try { fileService.unlink(path); return 0; - } catch (Exception e) { + } catch (Throwable e) { Log.error("When unlinking " + path, e); return -ErrorCodes.EIO(); } @@ -299,7 +300,7 @@ public class DhfsFuse extends FuseStubFS { return 0; else return -ErrorCodes.ENOSPC(); - } catch (Exception e) { + } catch (Throwable e) { Log.error("When truncating " + path, e); return -ErrorCodes.EIO(); } @@ -313,7 +314,7 @@ public class DhfsFuse extends FuseStubFS { var ret = fileService.chmod(fileOpt.get(), mode); if (ret) return 0; else return -ErrorCodes.EINVAL(); - } catch (Exception e) { + } catch (Throwable e) { Log.error("When chmod " + path, e); return -ErrorCodes.EIO(); } @@ -339,7 +340,7 @@ public class DhfsFuse extends FuseStubFS { } return 0; - } catch (Exception e) { + } catch (Throwable e) { Log.error("When readdir " + path, e); return -ErrorCodes.EIO(); } @@ -357,7 +358,7 @@ public class DhfsFuse extends FuseStubFS { UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size)); buf.putByte(Math.min(size - 1, read.size()), (byte) 0); return 0; - } catch (Exception e) { + } catch (Throwable e) { Log.error("When reading " + path, e); return -ErrorCodes.EIO(); } @@ -369,7 +370,7 @@ public class DhfsFuse extends FuseStubFS { var fileOpt = fileService.open(path); if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); return 0; - } catch (Exception e) { + } catch (Throwable e) { Log.error("When chown " + path, e); return -ErrorCodes.EIO(); } @@ -381,7 +382,7 @@ public class DhfsFuse extends FuseStubFS { var ret = fileService.symlink(oldpath, newpath); if (ret == null) return -ErrorCodes.EEXIST(); else return 0; - } catch (Exception e) { + } catch (Throwable e) { Log.error("When creating " + newpath, e); return -ErrorCodes.EIO(); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutput.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutput.java index d2790516..51be0f7a 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutput.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutput.java @@ -51,7 +51,7 @@ public class JnrPtrByteOutput extends ByteOutput { var out = _backing.address() + _pos; _accessors.getUnsafe().copyMemory(addr, out, rem); } else { - throw new UnsupportedOperationException(); + _backing.put(_pos, value.array(), value.arrayOffset() + value.position(), rem); } _pos += rem; diff --git a/dhfs-parent/supportlib/src/main/java/com/usatiuk/dhfs/supportlib/UninitializedByteBuffer.java b/dhfs-parent/supportlib/src/main/java/com/usatiuk/dhfs/supportlib/UninitializedByteBuffer.java index c5f16629..42616fda 100644 --- a/dhfs-parent/supportlib/src/main/java/com/usatiuk/dhfs/supportlib/UninitializedByteBuffer.java +++ b/dhfs-parent/supportlib/src/main/java/com/usatiuk/dhfs/supportlib/UninitializedByteBuffer.java @@ -9,20 +9,24 @@ public class UninitializedByteBuffer { private static final Logger LOGGER = Logger.getLogger(UninitializedByteBuffer.class.getName()); public static ByteBuffer allocateUninitialized(int size) { - if (size < DhfsSupport.PAGE_SIZE) - return ByteBuffer.allocateDirect(size); + try { + if (size < DhfsSupport.PAGE_SIZE) + return ByteBuffer.allocateDirect(size); - var bb = new ByteBuffer[1]; - long token = DhfsSupport.allocateUninitializedByteBuffer(bb, size); - var ret = bb[0]; - CLEANER.register(ret, () -> { - try { - DhfsSupport.releaseByteBuffer(token); - } catch (Throwable e) { - LOGGER.severe("Error releasing buffer: " + e); - System.exit(-1); - } - }); - return ret; + var bb = new ByteBuffer[1]; + long token = DhfsSupport.allocateUninitializedByteBuffer(bb, size); + var ret = bb[0]; + CLEANER.register(ret, () -> { + try { + DhfsSupport.releaseByteBuffer(token); + } catch (Throwable e) { + LOGGER.severe("Error releasing buffer: " + e); + System.exit(-1); + } + }); + return ret; + } catch (OutOfMemoryError e) { + return ByteBuffer.allocate(size); + } } } From 2fe3cfc3f84435067d05a3ddc234dcf494e6203e Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Thu, 2 Jan 2025 22:16:19 +0100 Subject: [PATCH 043/105] fix cache lock --- .../objects/persistence/CachingObjectPersistentStore.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java index 37dd3e23..d1f18961 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java @@ -102,8 +102,10 @@ public class CachingObjectPersistentStore { public void commitTx(TxManifest names) { // During commit, readObject shouldn't be called for these items, // it should be handled by the upstream store - for (var key : Stream.concat(names.written().stream(), names.deleted().stream()).toList()) { - _cache.remove(key); + synchronized (_cache) { + for (var key : Stream.concat(names.written().stream(), names.deleted().stream()).toList()) { + _cache.remove(key); + } } delegate.commitTx(names); } From f93b3226d083f23590876316e1ae89a556a61813 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Thu, 2 Jan 2025 22:34:56 +0100 Subject: [PATCH 044/105] writeback leak fix --- .../main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java index e6543c23..42f4fb23 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java @@ -148,7 +148,7 @@ public class TxWritebackImpl implements TxWriteback { synchronized (_pendingBundles) { bundle._entries.values().forEach(e -> { var cur = _pendingWrites.get(e.key()); - if (cur.bundleId() == bundle.getId()) + if (cur.bundleId() <= bundle.getId()) _pendingWrites.remove(e.key(), cur); }); } @@ -280,7 +280,7 @@ public class TxWritebackImpl implements TxWriteback { @Override public Optional getPendingWrite(JObjectKey key) { - synchronized (_pendingWrites) { + synchronized (_pendingBundles) { return Optional.ofNullable(_pendingWrites.get(key)); } } From 0cab13624a19b6e51bcfab4302df0b151ac7c1d5 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Fri, 3 Jan 2025 11:02:52 +0100 Subject: [PATCH 045/105] cache fixes --- .../dhfs/objects/persistence/CachingObjectPersistentStore.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java index d1f18961..2aede5fc 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java @@ -25,7 +25,7 @@ public class CachingObjectPersistentStore { private record CacheEntry(Optional> object, long size) { } - private final LinkedHashMap _cache = new LinkedHashMap<>(); + private final LinkedHashMap _cache = new LinkedHashMap<>(8, 0.75f, true); @ConfigProperty(name = "dhfs.objects.lru.limit") long sizeLimit; @@ -104,6 +104,7 @@ public class CachingObjectPersistentStore { // it should be handled by the upstream store synchronized (_cache) { for (var key : Stream.concat(names.written().stream(), names.deleted().stream()).toList()) { + _curSize -= Optional.ofNullable(_cache.get(key)).map(CacheEntry::size).orElse(0L); _cache.remove(key); } } From 4243618cb855510ffcfd18f7a75759fba64f3980 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Fri, 3 Jan 2025 11:29:26 +0100 Subject: [PATCH 046/105] improved tx commit logs --- .../main/java/com/usatiuk/dhfs/objects/JObjectManager.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 2cc74735..8e781272 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -162,8 +162,8 @@ public class JObjectManager { Consumer addDependency = key -> { dependenciesLocked.computeIfAbsent(key, k -> { - Log.trace("Adding dependency " + k.toString()); var got = getObjLock(JData.class, k); + Log.trace("Adding dependency " + k.toString() + " -> " + got); toUnlock.add(got.lock); return got; }); @@ -250,7 +250,7 @@ public class JObjectManager { throw new TxCommitException("Read mismatch for " + dep.getKey() + ": " + read + " vs " + dep.getValue()); } - Log.trace("Checking dependency " + dep.getKey() + " - ok"); + Log.trace("Checking dependency " + dep.getKey() + " - ok with read " + read); } Log.tracef("Flushing transaction %d to storage", tx.getId()); From 617a72814c8437b2020b0bd5897ce0d1d78032af Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Fri, 3 Jan 2025 11:41:43 +0100 Subject: [PATCH 047/105] editConflict test fix --- .../com/usatiuk/dhfs/objects/ObjectsTest.java | 30 +++++++++++-------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index c0836fbf..2483703c 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -215,12 +215,8 @@ public class ObjectsTest { Just.run(() -> { try { Log.warn("Thread 2"); + barrier.await(); // Ensure thread 2 tx id is larger than thread 1 txm.runTries(() -> { - try { - barrier.await(); - } catch (Throwable e) { - throw new RuntimeException(e); - } var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null); curTx.put(parent.withName("John2")); Log.warn("Thread 2 commit"); @@ -239,16 +235,24 @@ public class ObjectsTest { return curTx.get(Parent.class, new JObjectKey(key)).orElse(null); }); - if (!thread1Failed.get()) { - Assertions.assertTrue(thread2Failed.get()); - Assertions.assertEquals("John", got.name()); - } else if (!thread2Failed.get()) { - Assertions.assertEquals("John2", got.name()); - } else { - Assertions.fail("No thread succeeded"); + // It is possible that thread 2 did get the object after thread 1 committed it, so there is no conflict + Assertions.assertTrue(!thread1Failed.get() || !thread2Failed.get()); + + if (strategy.equals(LockingStrategy.WRITE)) { + if (!thread1Failed.get()) + Assertions.assertFalse(thread2Failed.get()); } - Assertions.assertTrue(thread1Failed.get() || thread2Failed.get()); + if (!thread1Failed.get()) { + if (!thread2Failed.get()) { + Assertions.assertEquals("John2", got.name()); + } else { + Assertions.assertEquals("John", got.name()); + } + } else { + Assertions.assertTrue(!thread2Failed.get()); + Assertions.assertEquals("John2", got.name()); + } } // } From ea0ee90776fc6cb7691aa7aded151540ab00e14b Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Fri, 3 Jan 2025 12:22:43 +0100 Subject: [PATCH 048/105] start using pcollections --- .../com/usatiuk/dhfs/objects/JObjectKey.java | 7 ++- dhfs-parent/pom.xml | 5 +++ dhfs-parent/server/pom.xml | 4 ++ .../usatiuk/dhfs/files/objects/ChunkData.java | 11 +++-- .../com/usatiuk/dhfs/files/objects/File.java | 12 +++--- .../files/service/DhfsFileServiceImpl.java | 43 ++++++------------- .../usatiuk/dhfs/objects/JDataRefcounted.java | 6 ++- .../dhfs/objects/RefcounterTxHook.java | 27 ++++++------ .../jkleppmanntree/JKleppmannTreeManager.java | 3 +- .../structs/JKleppmannTreeNode.java | 8 ++-- .../structs/JKleppmannTreePersistentData.java | 7 +-- 11 files changed, 70 insertions(+), 63 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java index 9cf22d1c..2da79825 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java @@ -2,8 +2,13 @@ package com.usatiuk.dhfs.objects; import java.io.Serializable; -public record JObjectKey(String name) implements Serializable { +public record JObjectKey(String name) implements Serializable, Comparable { public static JObjectKey of(String name) { return new JObjectKey(name); } + + @Override + public int compareTo(JObjectKey o) { + return name.compareTo(o.name); + } } diff --git a/dhfs-parent/pom.xml b/dhfs-parent/pom.xml index 3140d94e..6a4d4717 100644 --- a/dhfs-parent/pom.xml +++ b/dhfs-parent/pom.xml @@ -78,6 +78,11 @@ commons-collections4 4.5.0-M2 + + org.pcollections + pcollections + 4.0.2 + diff --git a/dhfs-parent/server/pom.xml b/dhfs-parent/server/pom.xml index 74372a0d..53daeb0e 100644 --- a/dhfs-parent/server/pom.xml +++ b/dhfs-parent/server/pom.xml @@ -126,6 +126,10 @@ org.apache.commons commons-collections4 + + org.pcollections + pcollections + org.apache.commons commons-math3 diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java index 517e3ce3..99811ff6 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java @@ -3,18 +3,17 @@ package com.usatiuk.dhfs.files.objects; import com.google.protobuf.ByteString; import com.usatiuk.dhfs.objects.JDataRefcounted; import com.usatiuk.dhfs.objects.JObjectKey; +import org.pcollections.PCollection; +import org.pcollections.TreePSet; -import java.util.Collection; -import java.util.LinkedHashSet; - -public record ChunkData(JObjectKey key, Collection refsFrom, boolean frozen, +public record ChunkData(JObjectKey key, PCollection refsFrom, boolean frozen, ByteString data) implements JDataRefcounted { public ChunkData(JObjectKey key, ByteString data) { - this(key, new LinkedHashSet<>(), false, data); + this(key, TreePSet.empty(), false, data); } @Override - public ChunkData withRefsFrom(Collection refs) { + public ChunkData withRefsFrom(PCollection refs) { return new ChunkData(key, refs, frozen, data); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java index 19afb5fe..fb63b29d 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java @@ -1,15 +1,17 @@ package com.usatiuk.dhfs.files.objects; import com.usatiuk.dhfs.objects.JObjectKey; +import org.pcollections.PCollection; +import org.pcollections.TreePMap; import java.util.Collection; -import java.util.NavigableMap; -public record File(JObjectKey key, Collection refsFrom, boolean frozen, long mode, long cTime, long mTime, - NavigableMap chunks, boolean symlink, long size +public record File(JObjectKey key, PCollection refsFrom, boolean frozen, + long mode, long cTime, long mTime, + TreePMap chunks, boolean symlink, long size ) implements FsNode { @Override - public File withRefsFrom(Collection refs) { + public File withRefsFrom(PCollection refs) { return new File(key, refs, frozen, mode, cTime, mTime, chunks, symlink, size); } @@ -18,7 +20,7 @@ public record File(JObjectKey key, Collection refsFrom, boolean froz return new File(key, refsFrom, frozen, mode, cTime, mTime, chunks, symlink, size); } - public File withChunks(NavigableMap chunks) { + public File withChunks(TreePMap chunks) { return new File(key, refsFrom, frozen, mode, cTime, mTime, chunks, symlink, size); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java index 22e338d2..baa9537b 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java @@ -25,6 +25,8 @@ import jakarta.enterprise.event.Observes; import jakarta.inject.Inject; import org.apache.commons.lang3.tuple.Pair; import org.eclipse.microprofile.config.inject.ConfigProperty; +import org.pcollections.TreePMap; +import org.pcollections.TreePSet; import java.nio.charset.StandardCharsets; import java.nio.file.Path; @@ -150,7 +152,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { var fuuid = UUID.randomUUID(); Log.debug("Creating file " + fuuid); - File f = new File(JObjectKey.of(fuuid.toString()), new HashSet<>(), false, mode, System.currentTimeMillis(), System.currentTimeMillis(), new TreeMap<>(), false, 0); + File f = new File(JObjectKey.of(fuuid.toString()), TreePSet.empty(), false, mode, System.currentTimeMillis(), System.currentTimeMillis(), TreePMap.empty(), false, 0); curTx.put(f); try { @@ -371,8 +373,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { file = curTx.get(File.class, fileUuid).orElse(null); } - // FIXME: Some kind of immutable interface? - var chunksAll = Collections.unmodifiableNavigableMap(file.chunks()); + var chunksAll = file.chunks(); var first = chunksAll.floorEntry(offset); var last = chunksAll.lowerEntry(offset + data.size()); NavigableMap removedChunks = new TreeMap<>(); @@ -494,16 +495,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { } } - NavigableMap realNewChunks = new TreeMap<>(); - for (var chunk : chunksAll.entrySet()) { - if (!removedChunks.containsKey(chunk.getKey())) { - realNewChunks.put(chunk.getKey(), chunk.getValue()); - } - } - - realNewChunks.putAll(newChunks); - - file = file.withChunks(Collections.unmodifiableNavigableMap(realNewChunks)).withMTime(System.currentTimeMillis()); + file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis()); curTx.put(file); cleanupChunks(file, removedChunks.values()); updateFileSize(file); @@ -525,9 +517,9 @@ public class DhfsFileServiceImpl implements DhfsFileService { } if (length == 0) { - var oldChunks = Collections.unmodifiableNavigableMap(new TreeMap<>(file.chunks())); + var oldChunks = file.chunks(); - file = file.withChunks(new TreeMap<>()).withMTime(System.currentTimeMillis()); + file = file.withChunks(TreePMap.empty()).withMTime(System.currentTimeMillis()); curTx.put(file); cleanupChunks(file, oldChunks.values()); updateFileSize(file); @@ -537,7 +529,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { var curSize = size(fileUuid); if (curSize == length) return true; - var chunksAll = Collections.unmodifiableNavigableMap(file.chunks()); + var chunksAll = file.chunks(); NavigableMap removedChunks = new TreeMap<>(); NavigableMap newChunks = new TreeMap<>(); @@ -588,16 +580,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { newChunks.put(tail.getKey(), newChunkData.key()); } - NavigableMap realNewChunks = new TreeMap<>(); - for (var chunk : chunksAll.entrySet()) { - if (!removedChunks.containsKey(chunk.getKey())) { - realNewChunks.put(chunk.getKey(), chunk.getValue()); - } - } - - realNewChunks.putAll(newChunks); - - file = file.withChunks(Collections.unmodifiableNavigableMap(realNewChunks)).withMTime(System.currentTimeMillis()); + file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis()); curTx.put(file); cleanupChunks(file, removedChunks.values()); updateFileSize(file); @@ -633,10 +616,10 @@ public class DhfsFileServiceImpl implements DhfsFileService { var fuuid = UUID.randomUUID(); Log.debug("Creating file " + fuuid); - File f = new File(JObjectKey.of(fuuid.toString()), new HashSet<>(), false, 0, System.currentTimeMillis(), System.currentTimeMillis(), new TreeMap<>(), true, 0); ChunkData newChunkData = createChunk(UnsafeByteOperations.unsafeWrap(oldpath.getBytes(StandardCharsets.UTF_8))); + File f = new File(JObjectKey.of(fuuid.toString()), TreePSet.empty(), + false, 0, System.currentTimeMillis(), System.currentTimeMillis(), TreePMap.empty().plus(0L, newChunkData.key()), true, 0); - f.chunks().put(0L, newChunkData.key()); updateFileSize(f); getTree().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTree().getNewNodeId()); @@ -662,8 +645,8 @@ public class DhfsFileServiceImpl implements DhfsFileService { jObjectTxManager.executeTx(() -> { long realSize = 0; - var last = file.chunks().lastEntry(); - if (last != null) { + if (!file.chunks().isEmpty()) { + var last = file.chunks().lastEntry(); var lastSize = getChunkSize(last.getValue()); realSize = last.getKey() + lastSize; } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java index a23f8a60..09293ebb 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRefcounted.java @@ -1,12 +1,14 @@ package com.usatiuk.dhfs.objects; +import org.pcollections.PCollection; + import java.util.Collection; import java.util.List; public interface JDataRefcounted extends JData { - Collection refsFrom(); + PCollection refsFrom(); - JDataRefcounted withRefsFrom(Collection refs); + JDataRefcounted withRefsFrom(PCollection refs); boolean frozen(); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java index ba2e7b9f..e4f945c7 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java @@ -3,9 +3,6 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.transaction.Transaction; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; -import org.apache.commons.collections4.CollectionUtils; - -import java.util.Set; @ApplicationScoped public class RefcounterTxHook implements PreCommitTxHook { @@ -19,14 +16,21 @@ public class RefcounterTxHook implements PreCommitTxHook { } var refOld = (JDataRefcounted) old; - for (var newRef : CollectionUtils.subtract(refCur.collectRefsTo(), refOld.collectRefsTo())) { - var referenced = curTx.get(JDataRefcounted.class, newRef).orElse(null); - curTx.put(referenced.withRefsFrom(CollectionUtils.union(referenced.refsFrom(), Set.of(key)))); + var curRefs = refCur.collectRefsTo(); + var oldRefs = refOld.collectRefsTo(); + + for (var curRef : curRefs) { + if (!oldRefs.contains(curRef)) { + var referenced = curTx.get(JDataRefcounted.class, curRef).orElse(null); + curTx.put(referenced.withRefsFrom(referenced.refsFrom().plus(key))); + } } - for (var removedRef : CollectionUtils.subtract(refOld.collectRefsTo(), refCur.collectRefsTo())) { - var referenced = curTx.get(JDataRefcounted.class, removedRef).orElse(null); - curTx.put(referenced.withRefsFrom(CollectionUtils.subtract(referenced.refsFrom(), Set.of(key)))); + for (var oldRef : oldRefs) { + if (!curRefs.contains(oldRef)) { + var referenced = curTx.get(JDataRefcounted.class, oldRef).orElse(null); + curTx.put(referenced.withRefsFrom(referenced.refsFrom().minus(key))); + } } } @@ -38,7 +42,7 @@ public class RefcounterTxHook implements PreCommitTxHook { for (var newRef : refCur.collectRefsTo()) { var referenced = curTx.get(JDataRefcounted.class, newRef).orElse(null); - curTx.put(referenced.withRefsFrom(CollectionUtils.union(referenced.refsFrom(), Set.of(key)))); + curTx.put(referenced.withRefsFrom(referenced.refsFrom().plus(key))); } } @@ -48,10 +52,9 @@ public class RefcounterTxHook implements PreCommitTxHook { return; } - for (var removedRef : refCur.collectRefsTo()) { var referenced = curTx.get(JDataRefcounted.class, removedRef).orElse(null); - curTx.put(referenced.withRefsFrom(CollectionUtils.subtract(referenced.refsFrom(), Set.of(key)))); + curTx.put(referenced.withRefsFrom(referenced.refsFrom().minus(key))); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java index 55802cd6..d0a8029a 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java @@ -12,6 +12,7 @@ import com.usatiuk.kleppmanntree.*; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; import org.apache.commons.lang3.tuple.Pair; +import org.pcollections.TreePSet; import java.util.HashMap; import java.util.List; @@ -37,7 +38,7 @@ public class JKleppmannTreeManager { if (data == null) { data = new JKleppmannTreePersistentData( name, - List.of(), + TreePSet.empty(), true, 1L, new HashMap<>(), diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java index 82ee0fde..0801c62f 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java @@ -4,6 +4,8 @@ import com.usatiuk.dhfs.objects.JDataRefcounted; import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.kleppmanntree.OpMove; import com.usatiuk.kleppmanntree.TreeNode; +import org.pcollections.PCollection; +import org.pcollections.TreePSet; import java.io.Serializable; import java.util.Collection; @@ -13,13 +15,13 @@ import java.util.UUID; import java.util.stream.Stream; // FIXME: Ideally this is two classes? -public record JKleppmannTreeNode(JObjectKey key, Collection refsFrom, boolean frozen, JObjectKey parent, +public record JKleppmannTreeNode(JObjectKey key, PCollection refsFrom, boolean frozen, JObjectKey parent, OpMove lastEffectiveOp, JKleppmannTreeNodeMeta meta, Map children) implements TreeNode, JDataRefcounted, Serializable { public JKleppmannTreeNode(JObjectKey id, JObjectKey parent, JKleppmannTreeNodeMeta meta) { - this(id, Collections.emptyList(), false, parent, null, meta, Collections.emptyMap()); + this(id, TreePSet.empty(), false, parent, null, meta, Collections.emptyMap()); } @Override @@ -43,7 +45,7 @@ public record JKleppmannTreeNode(JObjectKey key, Collection refsFrom } @Override - public JKleppmannTreeNode withRefsFrom(Collection refs) { + public JKleppmannTreeNode withRefsFrom(PCollection refs) { return new JKleppmannTreeNode(key, refs, frozen, parent, lastEffectiveOp, meta, children); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java index 6422c69d..910a652b 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java @@ -1,15 +1,16 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; import com.usatiuk.dhfs.objects.JDataRefcounted; +import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.kleppmanntree.CombinedTimestamp; import com.usatiuk.kleppmanntree.LogRecord; import com.usatiuk.kleppmanntree.OpMove; -import com.usatiuk.dhfs.objects.JObjectKey; +import org.pcollections.PCollection; import java.util.*; public record JKleppmannTreePersistentData( - JObjectKey key, Collection refsFrom, boolean frozen, + JObjectKey key, PCollection refsFrom, boolean frozen, long clock, HashMap, OpMove>> queues, HashMap peerTimestampLog, @@ -37,7 +38,7 @@ public record JKleppmannTreePersistentData( } @Override - public JKleppmannTreePersistentData withRefsFrom(Collection refs) { + public JKleppmannTreePersistentData withRefsFrom(PCollection refs) { return new JKleppmannTreePersistentData(key, refs, frozen, clock, queues, peerTimestampLog, log); } From dbc5230fb83a1aba91d170f36e175377666e10ef Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Fri, 3 Jan 2025 12:24:39 +0100 Subject: [PATCH 049/105] run code cleanup --- .../deployment/AutoprotomapProcessor.java | 4 +- .../usatiuk/kleppmanntree/TestNodeMeta.java | 8 +- .../kleppmanntree/TestNodeMetaFile.java | 8 +- .../kleppmanntree/TestPeerInterface.java | 4 +- dhfs-parent/objects/pom.xml | 4 +- .../usatiuk/dhfs/objects/JObjectManager.java | 131 +++++++++--------- .../dhfs/objects/TransactionManagerImpl.java | 3 +- .../com/usatiuk/dhfs/objects/TxWriteback.java | 12 +- .../usatiuk/dhfs/objects/TxWritebackImpl.java | 2 +- .../CachingObjectPersistentStore.java | 13 +- .../transaction/TransactionFactoryImpl.java | 21 ++- .../transaction/TransactionObject.java | 2 +- .../com/usatiuk/dhfs/objects/ObjectsTest.java | 2 +- .../dhfs/objects/PreCommitTxHookTest.java | 9 +- .../files/service/DhfsFileServiceImpl.java | 5 +- .../dhfs/fuse/JnrPtrByteOutputAccessors.java | 14 +- .../jkleppmanntree/JKleppmannTreeManager.java | 7 +- .../JKleppmannTreeOpWrapper.java | 8 +- .../JKleppmannTreePeriodicPushOp.java | 12 +- .../structs/JKleppmannTreeNode.java | 2 +- .../structs/JKleppmannTreeNodeMeta.java | 8 +- .../structs/JKleppmannTreeNodeMetaFile.java | 8 +- .../com/usatiuk/dhfs/TempDataProfile.java | 3 +- .../usatiuk/dhfs/integration/DhfsImage.java | 5 +- dhfs-parent/utils/pom.xml | 4 +- .../com/usatiuk/dhfs/utils/DataLocker.java | 67 +++++---- .../utils/HashSetDelayedBlockingQueue.java | 21 ++- 27 files changed, 186 insertions(+), 201 deletions(-) diff --git a/dhfs-parent/autoprotomap/deployment/src/main/java/com/usatiuk/autoprotomap/deployment/AutoprotomapProcessor.java b/dhfs-parent/autoprotomap/deployment/src/main/java/com/usatiuk/autoprotomap/deployment/AutoprotomapProcessor.java index 3c3b0809..d3e574eb 100644 --- a/dhfs-parent/autoprotomap/deployment/src/main/java/com/usatiuk/autoprotomap/deployment/AutoprotomapProcessor.java +++ b/dhfs-parent/autoprotomap/deployment/src/main/java/com/usatiuk/autoprotomap/deployment/AutoprotomapProcessor.java @@ -68,11 +68,11 @@ class AutoprotomapProcessor { } } catch (Throwable e) { StringBuilder sb = new StringBuilder(); - sb.append(e.toString() + "\n"); + sb.append(e + "\n"); for (var el : e.getStackTrace()) { sb.append(el.toString() + "\n"); } - System.out.println(sb.toString()); + System.out.println(sb); } } } diff --git a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMeta.java b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMeta.java index c02dd785..be276c9c 100644 --- a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMeta.java +++ b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMeta.java @@ -3,14 +3,14 @@ package com.usatiuk.kleppmanntree; public abstract class TestNodeMeta implements NodeMeta { private final String _name; + public TestNodeMeta(String name) { + _name = name; + } + @Override public String getName() { return _name; } - public TestNodeMeta(String name) { - _name = name; - } - abstract public NodeMeta withName(String name); } diff --git a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMetaFile.java b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMetaFile.java index bb1bbec6..9cb0792f 100644 --- a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMetaFile.java +++ b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestNodeMetaFile.java @@ -3,15 +3,15 @@ package com.usatiuk.kleppmanntree; public class TestNodeMetaFile extends TestNodeMeta { private final long _inode; - public long getInode() { - return _inode; - } - public TestNodeMetaFile(String name, long inode) { super(name); _inode = inode; } + public long getInode() { + return _inode; + } + @Override public NodeMeta withName(String name) { return new TestNodeMetaFile(name, _inode); diff --git a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestPeerInterface.java b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestPeerInterface.java index 3f793aab..708bb204 100644 --- a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestPeerInterface.java +++ b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestPeerInterface.java @@ -6,7 +6,9 @@ import java.util.List; public class TestPeerInterface implements PeerInterface { private final long selfId; - public TestPeerInterface(long selfId) {this.selfId = selfId;} + public TestPeerInterface(long selfId) { + this.selfId = selfId; + } @Override public Long getSelfId() { diff --git a/dhfs-parent/objects/pom.xml b/dhfs-parent/objects/pom.xml index 20b09cae..66fe78ff 100644 --- a/dhfs-parent/objects/pom.xml +++ b/dhfs-parent/objects/pom.xml @@ -1,6 +1,6 @@ - 4.0.0 diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 8e781272..c3c2df63 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -22,40 +22,18 @@ import java.util.function.Function; // TODO: persistent tx id @ApplicationScoped public class JObjectManager { + private final List _preCommitTxHooks; + private final DataLocker _objLocker = new DataLocker(); + private final ConcurrentHashMap> _objects = new ConcurrentHashMap<>(); + private final AtomicLong _txCounter = new AtomicLong(); @Inject WritebackObjectPersistentStore writebackObjectPersistentStore; @Inject TransactionFactory transactionFactory; - - private final List _preCommitTxHooks; - JObjectManager(Instance preCommitTxHooks) { _preCommitTxHooks = preCommitTxHooks.stream().sorted(Comparator.comparingInt(PreCommitTxHook::getPriority)).toList(); } - private final DataLocker _objLocker = new DataLocker(); - private final ConcurrentHashMap> _objects = new ConcurrentHashMap<>(); - private final AtomicLong _txCounter = new AtomicLong(); - - private class JDataWrapper extends WeakReference> { - private static final Cleaner CLEANER = Cleaner.create(); - - public JDataWrapper(JDataVersionedWrapper referent) { - super(referent); - var key = referent.data().key(); - CLEANER.register(referent, () -> { - _objects.remove(key, this); - }); - } - - @Override - public String toString() { - return "JDataWrapper{" + - "ref=" + get() + - '}'; - } - } - private JDataVersionedWrapper get(Class type, JObjectKey key) { while (true) { { @@ -93,16 +71,6 @@ public class JObjectManager { } } - private record TransactionObjectNoLock - (Optional> data) - implements TransactionObject { - } - - private record TransactionObjectLocked - (Optional> data, AutoCloseableNoThrow lock) - implements TransactionObject { - } - private TransactionObjectNoLock getObj(Class type, JObjectKey key) { var got = get(type, key); return new TransactionObjectNoLock<>(Optional.ofNullable(got)); @@ -114,37 +82,6 @@ public class JObjectManager { return new TransactionObjectLocked<>(Optional.ofNullable(got), lock); } - private class TransactionObjectSourceImpl implements TransactionObjectSource { - private final long _txId; - - private TransactionObjectSourceImpl(long txId) { - _txId = txId; - } - - @Override - public TransactionObject get(Class type, JObjectKey key) { - return getObj(type, key); -// return getObj(type, key).map(got -> { -// if (got.data().getVersion() > _txId) { -// throw new IllegalStateException("Serialization race for " + key + ": " + got.data().getVersion() + " vs " + _txId); -// } -// return got; -// }); - } - - @Override - public TransactionObject getWriteLocked(Class type, JObjectKey key) { - return getObjLock(type, key); -// return getObjLock(type, key).map(got -> { -// if (got.data().getVersion() > _txId) { -// got.lock.close(); -// throw new IllegalStateException("Serialization race for " + key + ": " + got.data().getVersion() + " vs " + _txId); -// } -// return got; -// }); - } - } - public TransactionPrivate createTransaction() { var counter = _txCounter.getAndIncrement(); Log.trace("Creating transaction " + counter); @@ -292,4 +229,64 @@ public class JObjectManager { } }); } + + private record TransactionObjectNoLock + (Optional> data) + implements TransactionObject { + } + + private record TransactionObjectLocked + (Optional> data, AutoCloseableNoThrow lock) + implements TransactionObject { + } + + private class JDataWrapper extends WeakReference> { + private static final Cleaner CLEANER = Cleaner.create(); + + public JDataWrapper(JDataVersionedWrapper referent) { + super(referent); + var key = referent.data().key(); + CLEANER.register(referent, () -> { + _objects.remove(key, this); + }); + } + + @Override + public String toString() { + return "JDataWrapper{" + + "ref=" + get() + + '}'; + } + } + + private class TransactionObjectSourceImpl implements TransactionObjectSource { + private final long _txId; + + private TransactionObjectSourceImpl(long txId) { + _txId = txId; + } + + @Override + public TransactionObject get(Class type, JObjectKey key) { + return getObj(type, key); +// return getObj(type, key).map(got -> { +// if (got.data().getVersion() > _txId) { +// throw new IllegalStateException("Serialization race for " + key + ": " + got.data().getVersion() + " vs " + _txId); +// } +// return got; +// }); + } + + @Override + public TransactionObject getWriteLocked(Class type, JObjectKey key) { + return getObjLock(type, key); +// return getObjLock(type, key).map(got -> { +// if (got.data().getVersion() > _txId) { +// got.lock.close(); +// throw new IllegalStateException("Serialization race for " + key + ": " + got.data().getVersion() + " vs " + _txId); +// } +// return got; +// }); + } + } } \ No newline at end of file diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java index 0dacf744..d8bbf6a4 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java @@ -8,11 +8,10 @@ import jakarta.inject.Inject; @ApplicationScoped public class TransactionManagerImpl implements TransactionManager { + private static final ThreadLocal _currentTransaction = new ThreadLocal<>(); @Inject JObjectManager jObjectManager; - private static final ThreadLocal _currentTransaction = new ThreadLocal<>(); - @Override public void begin() { if (_currentTransaction.get() != null) { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java index 92305bf1..66138d60 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java @@ -13,6 +13,12 @@ public interface TxWriteback { void fence(long bundleId); + Optional getPendingWrite(JObjectKey key); + + // Executes callback after bundle with bundleId id has been persisted + // if it was already, runs callback on the caller thread + void asyncFence(long bundleId, VoidFn callback); + interface PendingWriteEntry { long bundleId(); } @@ -22,10 +28,4 @@ public interface TxWriteback { record PendingDelete(JObjectKey key, long bundleId) implements PendingWriteEntry { } - - Optional getPendingWrite(JObjectKey key); - - // Executes callback after bundle with bundleId id has been persisted - // if it was already, runs callback on the caller thread - void asyncFence(long bundleId, VoidFn callback); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java index 42f4fb23..c603a61b 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java @@ -163,7 +163,7 @@ public class TxWritebackImpl implements TxWriteback { callbacks.forEach(l -> l.forEach(VoidFn::apply)); synchronized (_flushWaitSynchronizer) { - currentSize -= ((TxBundleImpl) bundle).calculateTotalSize(); + currentSize -= bundle.calculateTotalSize(); // FIXME: if (currentSize <= sizeLimit || !_ready) _flushWaitSynchronizer.notifyAll(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java index 2aede5fc..fa6f8799 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java @@ -19,14 +19,10 @@ import java.util.stream.Stream; @ApplicationScoped public class CachingObjectPersistentStore { + private final LinkedHashMap _cache = new LinkedHashMap<>(8, 0.75f, true); + private final DataLocker _locker = new DataLocker(); @Inject SerializingObjectPersistentStore delegate; - - private record CacheEntry(Optional> object, long size) { - } - - private final LinkedHashMap _cache = new LinkedHashMap<>(8, 0.75f, true); - @ConfigProperty(name = "dhfs.objects.lru.limit") long sizeLimit; @ConfigProperty(name = "dhfs.objects.lru.print-stats") @@ -37,8 +33,6 @@ public class CachingObjectPersistentStore { private ExecutorService _statusExecutor = null; - private final DataLocker _locker = new DataLocker(); - @Startup void init() { if (printStats) { @@ -110,4 +104,7 @@ public class CachingObjectPersistentStore { } delegate.commitTx(names); } + + private record CacheEntry(Optional> object, long size) { + } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index bc1217bd..5cef472d 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -12,23 +12,25 @@ import java.util.Optional; @ApplicationScoped public class TransactionFactoryImpl implements TransactionFactory { + @Override + public TransactionPrivate createTransaction(long id, TransactionObjectSource source) { + return new TransactionImpl(id, source); + } + private class TransactionImpl implements TransactionPrivate { private final long _id; - - public long getId() { - return _id; - } - private final ReadTrackingObjectSource _source; - private final Map> _writes = new HashMap<>(); private Map> _newWrites = new HashMap<>(); - private TransactionImpl(long id, TransactionObjectSource source) { _id = id; _source = new ReadTrackingObjectSource(source); } + public long getId() { + return _id; + } + @Override public Optional get(Class type, JObjectKey key, LockingStrategy strategy) { switch (_writes.get(key)) { @@ -97,9 +99,4 @@ public class TransactionFactoryImpl implements TransactionFactory { } } - @Override - public TransactionPrivate createTransaction(long id, TransactionObjectSource source) { - return new TransactionImpl(id, source); - } - } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java index fdf01178..5404245a 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.transaction; -import com.usatiuk.dhfs.objects.JDataVersionedWrapper; import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JDataVersionedWrapper; import java.util.Optional; diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index 2483703c..e5d3e83d 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -250,7 +250,7 @@ public class ObjectsTest { Assertions.assertEquals("John", got.name()); } } else { - Assertions.assertTrue(!thread2Failed.get()); + Assertions.assertFalse(thread2Failed.get()); Assertions.assertEquals("John2", got.name()); } } diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java index 850548d7..0598e61e 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java @@ -18,11 +18,6 @@ public class PreCommitTxHookTest { @Inject Transaction curTx; - - @ApplicationScoped - public static class DummyPreCommitTxHook implements PreCommitTxHook { - } - @InjectSpy private DummyPreCommitTxHook spyHook; @@ -111,4 +106,8 @@ public class PreCommitTxHookTest { Assertions.assertEquals(new JObjectKey("ParentEdit2"), keyCaptor.getValue()); } + @ApplicationScoped + public static class DummyPreCommitTxHook implements PreCommitTxHook { + } + } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java index baa9537b..e1056659 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java @@ -160,8 +160,6 @@ public class DhfsFileServiceImpl implements DhfsFileService { } catch (Exception e) { // fobj.getMeta().removeRef(newNodeId); throw e; - } finally { -// fobj.rwUnlock(); } return Optional.of(f.key()); }); @@ -173,8 +171,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { return jObjectTxManager.executeTx(() -> { return getTree().findParent(w -> { if (w.meta() instanceof JKleppmannTreeNodeMetaFile f) - if (f.getFileIno().equals(ino)) - return true; + return f.getFileIno().equals(ino); return false; }); }); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java index 98da17fa..6ec005d7 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java @@ -12,6 +12,13 @@ class JnrPtrByteOutputAccessors { JavaNioAccess _nioAccess; Unsafe _unsafe; + JnrPtrByteOutputAccessors() throws NoSuchFieldException, IllegalAccessException { + _nioAccess = SharedSecrets.getJavaNioAccess(); + Field f = Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + _unsafe = (Unsafe) f.get(null); + } + public JavaNioAccess getNioAccess() { return _nioAccess; } @@ -19,11 +26,4 @@ class JnrPtrByteOutputAccessors { public Unsafe getUnsafe() { return _unsafe; } - - JnrPtrByteOutputAccessors() throws NoSuchFieldException, IllegalAccessException { - _nioAccess = SharedSecrets.getJavaNioAccess(); - Field f = Unsafe.class.getDeclaredField("theUnsafe"); - f.setAccessible(true); - _unsafe = (Unsafe) f.get(null); - } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java index d0a8029a..ecdc816c 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java @@ -58,13 +58,10 @@ public class JKleppmannTreeManager { public class JKleppmannTree { private final KleppmannTree _tree; - - private JKleppmannTreePersistentData _data; - private final JKleppmannTreeStorageInterface _storageInterface; private final JKleppmannTreeClock _clock; - private final JObjectKey _treeName; + private JKleppmannTreePersistentData _data; JKleppmannTree(JKleppmannTreePersistentData data) { _treeName = data.key(); @@ -414,7 +411,7 @@ public class JKleppmannTreeManager { @Override public long size() { - return (long) _data.log().size(); + return _data.log().size(); } @Override diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java index cf5c8ce9..cf734a4e 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java @@ -10,15 +10,15 @@ import java.util.UUID; public class JKleppmannTreeOpWrapper { private final OpMove _op; - public OpMove getOp() { - return _op; - } - public JKleppmannTreeOpWrapper(OpMove op) { if (op == null) throw new IllegalArgumentException("op shouldn't be null"); _op = op; } + public OpMove getOp() { + return _op; + } + // @Override // public Collection getEscapedRefs() { // if (_op.newMeta() instanceof JKleppmannTreeNodeMetaFile mf) { diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java index e2e4f8c2..f7526587 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java @@ -2,10 +2,15 @@ package com.usatiuk.dhfs.objects.jkleppmanntree; import java.util.UUID; -public class JKleppmannTreePeriodicPushOp { +public class JKleppmannTreePeriodicPushOp { private final UUID _from; private final long _timestamp; + public JKleppmannTreePeriodicPushOp(UUID from, long timestamp) { + _from = from; + _timestamp = timestamp; + } + public UUID getFrom() { return _from; } @@ -14,11 +19,6 @@ public class JKleppmannTreePeriodicPushOp { return _timestamp; } - public JKleppmannTreePeriodicPushOp(UUID from, long timestamp) { - _from = from; - _timestamp = timestamp; - } - // @Override // public Collection getEscapedRefs() { // return List.of(); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java index 0801c62f..4fda0648 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java @@ -59,7 +59,7 @@ public record JKleppmannTreeNode(JObjectKey key, PCollection refsFro return Stream.concat(children().values().stream(), switch (meta()) { case JKleppmannTreeNodeMetaDirectory dir -> Stream.of(); - case JKleppmannTreeNodeMetaFile file -> Stream.of(file.getFileIno()); + case JKleppmannTreeNodeMetaFile file -> Stream.of(file.getFileIno()); default -> throw new IllegalStateException("Unexpected value: " + meta()); } ).toList(); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java index 4a8f9fa6..a7171ee8 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java @@ -10,14 +10,14 @@ import java.util.Objects; public abstract class JKleppmannTreeNodeMeta implements NodeMeta { private final String _name; - public String getName() { - return _name; - } - public JKleppmannTreeNodeMeta(String name) { _name = name; } + public String getName() { + return _name; + } + public abstract JKleppmannTreeNodeMeta withName(String name); @Override diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java index 563a2447..5c1ff8f1 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java @@ -10,15 +10,15 @@ import java.util.Objects; public class JKleppmannTreeNodeMetaFile extends JKleppmannTreeNodeMeta { private final JObjectKey _fileIno; - public JObjectKey getFileIno() { - return _fileIno; - } - public JKleppmannTreeNodeMetaFile(String name, JObjectKey fileIno) { super(name); _fileIno = fileIno; } + public JObjectKey getFileIno() { + return _fileIno; + } + @Override public JKleppmannTreeNodeMeta withName(String name) { return new JKleppmannTreeNodeMetaFile(name, _fileIno); diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/TempDataProfile.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/TempDataProfile.java index 03f74be5..8b075b57 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/TempDataProfile.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/TempDataProfile.java @@ -9,7 +9,8 @@ import java.util.HashMap; import java.util.Map; abstract public class TempDataProfile implements QuarkusTestProfile { - protected void getConfigOverrides(Map toPut) {} + protected void getConfigOverrides(Map toPut) { + } @Override final public Map getConfigOverrides() { diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java index 5bec10e9..12d30e28 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java @@ -12,10 +12,11 @@ import java.util.concurrent.TimeoutException; public class DhfsImage implements Future { + private static final DhfsImage INSTANCE = new DhfsImage(); private static String _builtImage = null; - private static DhfsImage INSTANCE = new DhfsImage(); - private DhfsImage() {} + private DhfsImage() { + } public static DhfsImage getInstance() { return INSTANCE; diff --git a/dhfs-parent/utils/pom.xml b/dhfs-parent/utils/pom.xml index 7b67e6f1..4e59f908 100644 --- a/dhfs-parent/utils/pom.xml +++ b/dhfs-parent/utils/pom.xml @@ -1,6 +1,6 @@ - 4.0.0 diff --git a/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java index 8a8fb89f..5648e03e 100644 --- a/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java @@ -6,42 +6,9 @@ import java.lang.ref.Cleaner; import java.util.concurrent.ConcurrentHashMap; public class DataLocker { - private static class LockTag { - boolean released = false; - final Thread owner = Thread.currentThread(); - } - - private final ConcurrentHashMap _locks = new ConcurrentHashMap<>(); - - private class Lock implements AutoCloseableNoThrow { - private final Object _key; - private final LockTag _tag; - private static final Cleaner CLEANER = Cleaner.create(); - - public Lock(Object key, LockTag tag) { - _key = key; - _tag = tag; - CLEANER.register(this, () -> { - if (!tag.released) { - Log.error("Lock collected without release: " + key); - } - }); - } - - @Override - public void close() { - synchronized (_tag) { - _tag.released = true; - // Notify all because when the object is locked again, - // it's a different lock tag - _tag.notifyAll(); - _locks.remove(_key, _tag); - } - } - } - private static final AutoCloseableNoThrow DUMMY_LOCK = () -> { }; + private final ConcurrentHashMap _locks = new ConcurrentHashMap<>(); public AutoCloseableNoThrow lock(Object data) { while (true) { @@ -69,4 +36,36 @@ public class DataLocker { } } + private static class LockTag { + final Thread owner = Thread.currentThread(); + boolean released = false; + } + + private class Lock implements AutoCloseableNoThrow { + private static final Cleaner CLEANER = Cleaner.create(); + private final Object _key; + private final LockTag _tag; + + public Lock(Object key, LockTag tag) { + _key = key; + _tag = tag; + CLEANER.register(this, () -> { + if (!tag.released) { + Log.error("Lock collected without release: " + key); + } + }); + } + + @Override + public void close() { + synchronized (_tag) { + _tag.released = true; + // Notify all because when the object is locked again, + // it's a different lock tag + _tag.notifyAll(); + _locks.remove(_key, _tag); + } + } + } + } diff --git a/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/HashSetDelayedBlockingQueue.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/HashSetDelayedBlockingQueue.java index ce337297..e37aa9ea 100644 --- a/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/HashSetDelayedBlockingQueue.java +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/HashSetDelayedBlockingQueue.java @@ -11,15 +11,21 @@ public class HashSetDelayedBlockingQueue { private final LinkedHashMap> _set = new LinkedHashMap<>(); private final Object _sleepSynchronizer = new Object(); private long _delay; + private boolean _closed = false; + + public HashSetDelayedBlockingQueue(long delay) { + _delay = delay; + } public long getDelay() { return _delay; } - private boolean _closed = false; - - public HashSetDelayedBlockingQueue(long delay) { - _delay = delay; + public void setDelay(long delay) { + synchronized (_sleepSynchronizer) { + _delay = delay; + _sleepSynchronizer.notifyAll(); + } } // If there's object with key in the queue, don't do anything @@ -252,13 +258,6 @@ public class HashSetDelayedBlockingQueue { return out; } - public void setDelay(long delay) { - synchronized (_sleepSynchronizer) { - _delay = delay; - _sleepSynchronizer.notifyAll(); - } - } - private record SetElement(T el, long time) { } } From e34225eb0ae35b6ca35ec4416cd9c3f0c929ca44 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Fri, 3 Jan 2025 12:35:36 +0100 Subject: [PATCH 050/105] don't use contains in List still kinda slow but oh well --- .../src/main/java/com/usatiuk/dhfs/files/objects/File.java | 3 ++- .../objects/jkleppmanntree/structs/JKleppmannTreeNode.java | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java index fb63b29d..d6012ef9 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java @@ -5,6 +5,7 @@ import org.pcollections.PCollection; import org.pcollections.TreePMap; import java.util.Collection; +import java.util.Set; public record File(JObjectKey key, PCollection refsFrom, boolean frozen, long mode, long cTime, long mTime, @@ -46,6 +47,6 @@ public record File(JObjectKey key, PCollection refsFrom, boolean fro @Override public Collection collectRefsTo() { - return chunks().values().stream().toList(); + return Set.copyOf(chunks().values()); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java index 4fda0648..890b13ea 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java @@ -12,6 +12,7 @@ import java.util.Collection; import java.util.Collections; import java.util.Map; import java.util.UUID; +import java.util.stream.Collectors; import java.util.stream.Stream; // FIXME: Ideally this is two classes? @@ -62,6 +63,6 @@ public record JKleppmannTreeNode(JObjectKey key, PCollection refsFro case JKleppmannTreeNodeMetaFile file -> Stream.of(file.getFileIno()); default -> throw new IllegalStateException("Unexpected value: " + meta()); } - ).toList(); + ).collect(Collectors.toUnmodifiableSet()); } } From 55ea9ddc44cfb9d079804046b6d5987607eb2499 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Tue, 28 Jan 2025 07:53:45 +0100 Subject: [PATCH 051/105] simple peer connectivity --- .../src/main/resources/application.properties | 1 - .../com/usatiuk/dhfs/ShutdownChecker.java | 2 +- .../com/usatiuk/dhfs/objects/JDataRemote.java | 4 + .../java/com/usatiuk/dhfs/objects/PeerId.java | 23 ++ .../usatiuk/dhfs/objects/RemoteObject.java | 67 +++++ .../dhfs/objects/RemoteTransaction.java | 31 +++ .../structs/JKleppmannTreeNode.java | 2 + .../structs/JKleppmannTreeNodeMeta.java | 2 +- .../JKleppmannTreeNodeMetaDirectory.java | 2 +- .../structs/JKleppmannTreeNodeMetaFile.java | 2 +- .../objects/repository/CertificateTools.java | 63 +++++ .../dhfs/objects/repository/PeerManager.java | 233 ++++++++++++++++++ .../repository/PersistentPeerDataService.java | 179 ++++++++++++++ .../repository/PersistentRemoteHostsData.java | 25 ++ .../repository/RemoteObjectServiceClient.java | 174 +++++++++++++ .../repository/RemoteObjectServiceServer.java | 191 ++++++++++++++ .../objects/repository/RpcChannelFactory.java | 44 ++++ .../objects/repository/RpcClientFactory.java | 94 +++++++ .../dhfs/objects/repository/SyncHandler.java | 207 ++++++++++++++++ .../peerdiscovery/IpPeerAddress.java | 9 + .../repository/peerdiscovery/PeerAddress.java | 8 + .../peerdiscovery/PeerAddressType.java | 7 + .../peerdiscovery/PeerDiscoveryDirectory.java | 70 ++++++ .../peerdiscovery/StaticPeerDiscovery.java | 46 ++++ .../local/LocalPeerDiscoveryBroadcaster.java | 102 ++++++++ .../local/LocalPeerDiscoveryClient.java | 91 +++++++ .../objects/repository/peersync/PeerInfo.java | 37 +++ .../repository/peersync/PeerInfoService.java | 76 ++++++ .../repository/peersync/api/ApiPeerInfo.java | 4 + .../repository/peersync/api/PeerSyncApi.java | 26 ++ .../peersync/api/PeerSyncApiClient.java | 11 + .../api/PeerSyncApiClientDynamic.java | 28 +++ .../structs/JKleppmannTreeNodeMetaPeer.java | 41 +++ .../peertrust/PeerRolesAugmentor.java | 51 ++++ .../peertrust/PeerTrustManager.java | 69 ++++++ .../peertrust/PeerTrustServerCustomizer.java | 44 ++++ .../repository/webapi/AvailablePeerInfo.java | 4 + .../repository/webapi/KnownPeerDelete.java | 4 + .../repository/webapi/KnownPeerInfo.java | 4 + .../repository/webapi/KnownPeerPut.java | 4 + .../repository/webapi/ManagementApi.java | 45 ++++ .../src/main/resources/application.properties | 4 +- .../com/usatiuk/dhfs/TempDataProfile.java | 1 - .../com/usatiuk/dhfs/TestDataCleaner.java | 4 - webui/src/api/dto.ts | 4 +- 45 files changed, 2127 insertions(+), 13 deletions(-) create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRemote.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/PeerId.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObject.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcChannelFactory.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/IpPeerAddress.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerAddress.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerAddressType.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerDiscoveryDirectory.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/StaticPeerDiscovery.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryBroadcaster.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryClient.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoService.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/ApiPeerInfo.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/PeerSyncApi.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/PeerSyncApiClient.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/PeerSyncApiClientDynamic.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/structs/JKleppmannTreeNodeMetaPeer.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerRolesAugmentor.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustManager.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustServerCustomizer.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/AvailablePeerInfo.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerDelete.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerInfo.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerPut.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java diff --git a/dhfs-parent/objects/src/main/resources/application.properties b/dhfs-parent/objects/src/main/resources/application.properties index 24bd7282..f7842d0c 100644 --- a/dhfs-parent/objects/src/main/resources/application.properties +++ b/dhfs-parent/objects/src/main/resources/application.properties @@ -4,5 +4,4 @@ dhfs.objects.lru.limit=134217728 dhfs.objects.lru.print-stats=true dhfs.objects.lock_timeout_secs=15 dhfs.objects.persistence.files.root=${HOME}/dhfs_default/data/objs -dhfs.objects.root=${HOME}/dhfs_default/data/stuff quarkus.package.jar.decompiler.enabled=true diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/ShutdownChecker.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/ShutdownChecker.java index dcd379a8..7074af8d 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/ShutdownChecker.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/ShutdownChecker.java @@ -14,7 +14,7 @@ import java.nio.file.Paths; @ApplicationScoped public class ShutdownChecker { private static final String dataFileName = "running"; - @ConfigProperty(name = "dhfs.objects.root") + @ConfigProperty(name = "dhfs.objects.persistence.files.root") String dataRoot; boolean _cleanShutdown = true; boolean _initialized = false; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRemote.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRemote.java new file mode 100644 index 00000000..4386f03e --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRemote.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.objects; + +public interface JDataRemote { +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/PeerId.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/PeerId.java new file mode 100644 index 00000000..5c34de0e --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/PeerId.java @@ -0,0 +1,23 @@ +package com.usatiuk.dhfs.objects; + +import java.io.Serializable; +import java.util.UUID; + +public record PeerId(UUID id) implements Serializable { + public static PeerId of(UUID id) { + return new PeerId(id); + } + + public static PeerId of(String id) { + return new PeerId(UUID.fromString(id)); + } + + @Override + public String toString() { + return id.toString(); + } + + public JObjectKey toJObjectKey() { + return JObjectKey.of(id.toString()); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObject.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObject.java new file mode 100644 index 00000000..46719854 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObject.java @@ -0,0 +1,67 @@ +package com.usatiuk.dhfs.objects; + +import org.pcollections.PCollection; +import org.pcollections.PMap; +import org.pcollections.PSet; + +import java.util.Collection; +import java.util.List; + +public record RemoteObject( + JObjectKey key, PCollection refsFrom, boolean frozen, + PMap knownRemoteVersions, + Class knownType, + PSet confirmedDeletes, + boolean seen, + PMap changelog, + boolean haveLocal +) implements JDataRefcounted { + @Override + public RemoteObject withRefsFrom(PCollection refs) { + return new RemoteObject<>(key, refs, frozen, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, haveLocal); + } + + @Override + public RemoteObject withFrozen(boolean frozen) { + return new RemoteObject<>(key, refsFrom, frozen, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, haveLocal); + } + + public RemoteObject withKnownRemoteVersions(PMap knownRemoteVersions) { + return new RemoteObject<>(key, refsFrom, frozen, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, haveLocal); + } + + public RemoteObject withKnownType(Class knownType) { + return new RemoteObject<>(key, refsFrom, frozen, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, haveLocal); + } + + public RemoteObject withConfirmedDeletes(PSet confirmedDeletes) { + return new RemoteObject<>(key, refsFrom, frozen, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, haveLocal); + } + + public RemoteObject withSeen(boolean seen) { + return new RemoteObject<>(key, refsFrom, frozen, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, haveLocal); + } + + public RemoteObject withChangelog(PMap changelog) { + return new RemoteObject<>(key, refsFrom, frozen, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, haveLocal); + } + + public RemoteObject withHaveLocal(boolean haveLocal) { + return new RemoteObject<>(key, refsFrom, frozen, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, haveLocal); + } + + public static JObjectKey keyFrom(JObjectKey key) { + return new JObjectKey(key + "_remote"); + } + + public JObjectKey localKey() { + if (!haveLocal) throw new IllegalStateException("No local key"); + return JObjectKey.of(key.name().substring(0, key.name().length() - "_remote".length())); + } + + @Override + public Collection collectRefsTo() { + if (haveLocal) return List.of(localKey()); + return List.of(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java new file mode 100644 index 00000000..b5086e4a --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java @@ -0,0 +1,31 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.transaction.LockingStrategy; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.apache.commons.lang3.NotImplementedException; + +import java.util.Optional; + +@ApplicationScoped +public class RemoteTransaction { + @Inject + Transaction curTx; + + public long getId() { + return curTx.getId(); + } + + public Optional get(Class type, JObjectKey key, LockingStrategy strategy) { + throw new NotImplementedException(); + } + + public void put(JData obj) { + throw new NotImplementedException(); + } + + public Optional get(Class type, JObjectKey key) { + return get(type, key, LockingStrategy.OPTIMISTIC); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java index 890b13ea..3b4a8687 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java @@ -2,6 +2,7 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; import com.usatiuk.dhfs.objects.JDataRefcounted; import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.repository.peersync.structs.JKleppmannTreeNodeMetaPeer; import com.usatiuk.kleppmanntree.OpMove; import com.usatiuk.kleppmanntree.TreeNode; import org.pcollections.PCollection; @@ -61,6 +62,7 @@ public record JKleppmannTreeNode(JObjectKey key, PCollection refsFro switch (meta()) { case JKleppmannTreeNodeMetaDirectory dir -> Stream.of(); case JKleppmannTreeNodeMetaFile file -> Stream.of(file.getFileIno()); + case JKleppmannTreeNodeMetaPeer peer -> Stream.of(peer.getPeerId()); default -> throw new IllegalStateException("Unexpected value: " + meta()); } ).collect(Collectors.toUnmodifiableSet()); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java index a7171ee8..fb4e5311 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java @@ -6,7 +6,7 @@ import com.usatiuk.kleppmanntree.NodeMeta; import java.util.Objects; -@ProtoMirror(JKleppmannTreeNodeMetaP.class) +//@ProtoMirror(JKleppmannTreeNodeMetaP.class) public abstract class JKleppmannTreeNodeMeta implements NodeMeta { private final String _name; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java index 79882017..39ebd488 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java @@ -3,7 +3,7 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; import com.usatiuk.autoprotomap.runtime.ProtoMirror; import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaDirectoryP; -@ProtoMirror(JKleppmannTreeNodeMetaDirectoryP.class) +//@ProtoMirror(JKleppmannTreeNodeMetaDirectoryP.class) public class JKleppmannTreeNodeMetaDirectory extends JKleppmannTreeNodeMeta { public JKleppmannTreeNodeMetaDirectory(String name) { super(name); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java index 5c1ff8f1..b51af59a 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java @@ -6,7 +6,7 @@ import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaFileP; import java.util.Objects; -@ProtoMirror(JKleppmannTreeNodeMetaFileP.class) +//@ProtoMirror(JKleppmannTreeNodeMetaFileP.class) public class JKleppmannTreeNodeMetaFile extends JKleppmannTreeNodeMeta { private final JObjectKey _fileIno; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java new file mode 100644 index 00000000..fcb5a07e --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java @@ -0,0 +1,63 @@ +package com.usatiuk.dhfs.objects.repository; + +import org.apache.commons.codec.digest.DigestUtils; +import org.bouncycastle.asn1.ASN1ObjectIdentifier; +import org.bouncycastle.asn1.x500.X500Name; +import org.bouncycastle.asn1.x509.BasicConstraints; +import org.bouncycastle.cert.CertIOException; +import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; +import org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder; +import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.bouncycastle.operator.ContentSigner; +import org.bouncycastle.operator.OperatorCreationException; +import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder; + +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.math.BigInteger; +import java.security.*; +import java.security.cert.CertificateException; +import java.security.cert.CertificateFactory; +import java.security.cert.X509Certificate; +import java.util.Calendar; +import java.util.Date; + +public class CertificateTools { + + public static X509Certificate certFromBytes(byte[] bytes) throws CertificateException { + CertificateFactory certFactory = CertificateFactory.getInstance("X.509"); + InputStream in = new ByteArrayInputStream(bytes); + return (X509Certificate) certFactory.generateCertificate(in); + } + + public static KeyPair generateKeyPair() throws NoSuchAlgorithmException { + KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); + keyGen.initialize(2048); //FIXME: + return keyGen.generateKeyPair(); + } + + public static X509Certificate generateCertificate(KeyPair keyPair, String subject) throws CertificateException, CertIOException, NoSuchAlgorithmException, OperatorCreationException { + Provider bcProvider = new BouncyCastleProvider(); + Security.addProvider(bcProvider); + + Date startDate = new Date(); + + X500Name cnName = new X500Name("CN=" + subject); + BigInteger certSerialNumber = new BigInteger(DigestUtils.sha256(subject)); + + Calendar calendar = Calendar.getInstance(); + calendar.setTime(startDate); + calendar.add(Calendar.YEAR, 999); + + Date endDate = calendar.getTime(); + + ContentSigner contentSigner = new JcaContentSignerBuilder("SHA256WithRSA").build(keyPair.getPrivate()); + + JcaX509v3CertificateBuilder certBuilder = new JcaX509v3CertificateBuilder(cnName, certSerialNumber, startDate, endDate, cnName, keyPair.getPublic()); + + BasicConstraints basicConstraints = new BasicConstraints(false); + certBuilder.addExtension(new ASN1ObjectIdentifier("2.5.29.19"), true, basicConstraints); + + return new JcaX509CertificateConverter().setProvider(bcProvider).getCertificate(certBuilder.build(contentSigner)); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java new file mode 100644 index 00000000..81f3c764 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java @@ -0,0 +1,233 @@ +package com.usatiuk.dhfs.objects.repository; + +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.TransactionManager; +import com.usatiuk.dhfs.objects.repository.peerdiscovery.PeerAddress; +import com.usatiuk.dhfs.objects.repository.peerdiscovery.PeerDiscoveryDirectory; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfo; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfoService; +import com.usatiuk.dhfs.objects.repository.peersync.api.PeerSyncApiClientDynamic; +import com.usatiuk.dhfs.objects.repository.peertrust.PeerTrustManager; +import com.usatiuk.dhfs.objects.repository.webapi.AvailablePeerInfo; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import io.quarkus.logging.Log; +import io.quarkus.runtime.StartupEvent; +import io.quarkus.scheduler.Scheduled; +import io.smallrye.common.annotation.Blocking; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.io.IOException; +import java.util.*; +import java.util.concurrent.*; + +@ApplicationScoped +public class PeerManager { + private final ConcurrentMap _states = new ConcurrentHashMap<>(); + // FIXME: Ideally not call them on every ping + private final ArrayList _connectedListeners = new ArrayList<>(); + private final ArrayList _disconnectedListeners = new ArrayList<>(); + @Inject + PersistentPeerDataService persistentPeerDataService; + @Inject + PeerInfoService peerInfoService; + @Inject + RpcClientFactory rpcClientFactory; + @Inject + PeerSyncApiClientDynamic peerSyncApiClient; + @Inject + TransactionManager transactionManager; + @Inject + Transaction curTx; + @Inject + PeerTrustManager peerTrustManager; + @ConfigProperty(name = "dhfs.objects.sync.ping.timeout") + long pingTimeout; + @Inject + PeerDiscoveryDirectory peerDiscoveryDirectory; + private ExecutorService _heartbeatExecutor; + + // Note: keep priority updated with below + void init(@Observes @Priority(600) StartupEvent event) throws IOException { + _heartbeatExecutor = Executors.newVirtualThreadPerTaskExecutor(); + } + + @Scheduled(every = "${dhfs.objects.reconnect_interval}", concurrentExecution = Scheduled.ConcurrentExecution.SKIP) + @Blocking + public void tryConnectAll() { + if (_heartbeatExecutor == null) return; + try { + _heartbeatExecutor.invokeAll(peerInfoService.getPeersNoSelf() + .stream() + .>map(host -> () -> { + try { + if (isReachable(host)) + Log.trace("Heartbeat: " + host); + else + Log.debug("Trying to connect to " + host); + var bestAddr = selectBestAddress(host.id()); + if (pingCheck(host, bestAddr)) + handleConnectionSuccess(host, bestAddr); + else + handleConnectionError(host); + } catch (Exception e) { + Log.error("Failed to connect to " + host, e); + } + return null; + }).toList(), 30, TimeUnit.SECONDS); //FIXME: + } catch (InterruptedException iex) { + Log.error("Heartbeat was interrupted"); + } + } + + // Note: registrations should be completed with Priority < 600 + public void registerConnectEventListener(ConnectionEventListener listener) { + synchronized (_connectedListeners) { + _connectedListeners.add(listener); + } + } + + // Note: registrations should be completed with Priority < 600 + public void registerDisconnectEventListener(ConnectionEventListener listener) { + synchronized (_disconnectedListeners) { + _disconnectedListeners.add(listener); + } + } + + private void handleConnectionSuccess(PeerInfo host, PeerAddress address) { + boolean wasReachable = isReachable(host); + +// boolean shouldSyncObj = persistentPeerDataService.markInitialObjSyncDone(host); +// boolean shouldSyncOp = persistentPeerDataService.markInitialOpSyncDone(host); +// +// if (shouldSyncObj) +// syncHandler.pushInitialResyncObj(host); +// if (shouldSyncOp) +// syncHandler.pushInitialResyncOp(host); + + _states.put(host.id(), address); + + if (wasReachable) return; + + Log.info("Connected to " + host); + +// for (var l : _connectedListeners) { +// l.apply(host); +// } + } + + public void handleConnectionError(PeerInfo host) { + boolean wasReachable = isReachable(host); + + if (wasReachable) + Log.info("Lost connection to " + host); + + _states.remove(host.id()); + +// for (var l : _disconnectedListeners) { +// l.apply(host); +// } + } + + // FIXME: + private boolean pingCheck(PeerInfo host, PeerAddress address) { + try { + return rpcClientFactory.withObjSyncClient(host.id(), address, pingTimeout, c -> { + var ret = c.ping(PingRequest.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).build()); + if (!UUID.fromString(ret.getSelfUuid()).equals(host.id().id())) { + throw new IllegalStateException("Ping selfUuid returned " + ret.getSelfUuid() + " but expected " + host.id()); + } + return true; + }); + } catch (Exception ignored) { + Log.debug("Host " + host + " is unreachable: " + ignored.getMessage() + " " + ignored.getCause()); + return false; + } + } + + public boolean isReachable(PeerInfo host) { + return _states.containsKey(host.id()); + } + + public PeerAddress getAddress(PeerId host) { + return _states.get(host); + } + + public List getAvailableHosts() { + return _states.keySet().stream().toList(); + } + +// public List getUnavailableHosts() { +// return _transientPeersState.runReadLocked(d -> d.getStates().entrySet().stream() +// .filter(e -> !e.getValue().isReachable()) +// .map(Map.Entry::getKey).toList()); +// } + +// public HostStateSnapshot getHostStateSnapshot() { +// ArrayList available = new ArrayList<>(); +// ArrayList unavailable = new ArrayList<>(); +// _transientPeersState.runReadLocked(d -> { +// for (var v : d.getStates().entrySet()) { +// if (v.getValue().isReachable()) +// available.add(v.getKey()); +// else +// unavailable.add(v.getKey()); +// } +// return null; +// } +// ); +// return new HostStateSnapshot(available, unavailable); +// } + +// public void removeRemoteHost(UUID host) { +// persistentPeerDataService.removeHost(host); +// // Race? +// _transientPeersState.runWriteLocked(d -> { +// d.getStates().remove(host); +// return null; +// }); +// } + + private PeerAddress selectBestAddress(PeerId host) { + return peerDiscoveryDirectory.getForPeer(host).stream().findFirst().orElseThrow(); + } + + public void addRemoteHost(PeerId host) { + if (_states.containsKey(host)) { + throw new IllegalStateException("Host " + host + " is already added"); + } + + transactionManager.run(() -> { + if (peerInfoService.getPeerInfo(host).isPresent()) + throw new IllegalStateException("Host " + host + " is already added"); + + var info = peerSyncApiClient.getSelfInfo(selectBestAddress(host)); + + var cert = Base64.getDecoder().decode(info.cert()); + peerInfoService.putPeer(host, cert); + }); + + peerTrustManager.reloadTrustManagerHosts( + transactionManager.run(() -> peerInfoService.getPeers().stream().toList()) + ); //FIXME: + } + + public Collection getSeenButNotAddedHosts() { + return transactionManager.run(() -> { + return peerDiscoveryDirectory.getReachablePeers().stream().filter(p -> !peerInfoService.getPeerInfo(p).isPresent()) + .map(p -> new AvailablePeerInfo(p.toString())).toList(); + }); + } + + @FunctionalInterface + public interface ConnectionEventListener { + void apply(UUID host); + } + + public record HostStateSnapshot(List available, List unavailable) { + } + +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java new file mode 100644 index 00000000..523aa7cc --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java @@ -0,0 +1,179 @@ +package com.usatiuk.dhfs.objects.repository; + +import com.usatiuk.dhfs.ShutdownChecker; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.TransactionManager; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfoService; +import com.usatiuk.dhfs.objects.repository.peertrust.PeerTrustManager; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import io.quarkus.logging.Log; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.io.IOException; +import java.security.KeyPair; +import java.security.cert.X509Certificate; +import java.util.Optional; +import java.util.UUID; +import java.util.concurrent.ExecutorService; + +@ApplicationScoped +public class PersistentPeerDataService { + @Inject + PeerTrustManager peerTrustManager; + @Inject + ExecutorService executorService; + @Inject + RpcClientFactory rpcClientFactory; + @Inject + ShutdownChecker shutdownChecker; + @Inject + TransactionManager jObjectTxManager; + @Inject + Transaction curTx; + @Inject + PeerInfoService peerInfoService; + + @ConfigProperty(name = "dhfs.peerdiscovery.preset-uuid") + Optional presetUuid; + + private PeerId _selfUuid; + private X509Certificate _selfCertificate; + private KeyPair _selfKeyPair; + + void init(@Observes @Priority(300) StartupEvent event) throws IOException { + jObjectTxManager.run(() -> { + var selfData = curTx.get(PersistentRemoteHostsData.class, PersistentRemoteHostsData.KEY).orElse(null); + if (selfData != null) { + _selfUuid = selfData.selfUuid(); + _selfCertificate = selfData.selfCertificate(); + _selfKeyPair = selfData.selfKeyPair(); + return; + } else { + _selfUuid = presetUuid.map(s -> PeerId.of(UUID.fromString(s))).orElseGet(() -> PeerId.of(UUID.randomUUID())); + try { + Log.info("Generating a key pair, please wait"); + _selfKeyPair = CertificateTools.generateKeyPair(); + _selfCertificate = CertificateTools.generateCertificate(_selfKeyPair, _selfUuid.toString()); + + curTx.put(new PersistentRemoteHostsData(_selfUuid, 0, _selfCertificate, _selfKeyPair)); + peerInfoService.putPeer(_selfUuid, _selfCertificate.getEncoded()); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + }); + peerTrustManager.reloadTrustManagerHosts(peerInfoService.getPeers()); + Log.info("Self uuid is: " + _selfUuid.toString()); + } + +// private void pushPeerUpdates() { +// pushPeerUpdates(null); +// } + +// private void pushPeerUpdates(@Nullable JObject obj) { +// if (obj != null) +// Log.info("Scheduling certificate update after " + obj.getMeta().getName() + " was updated"); +// executorService.submit(() -> { +// updateCerts(); +// invalidationQueueService.pushInvalidationToAll(PeerDirectory.PeerDirectoryObjName); +// for (var p : peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getPeers().stream().toList())) +// invalidationQueueService.pushInvalidationToAll(PersistentPeerInfo.getNameFromUuid(p)); +// }); +// } + + public PeerId getSelfUuid() { + return _selfUuid; + } + + public long getUniqueId() { + return jObjectTxManager.run(() -> { + var curData = curTx.get(PersistentRemoteHostsData.class, PersistentRemoteHostsData.KEY).orElse(null); + curTx.put(curData.withSelfCounter(curData.selfCounter() + 1)); + return curData.selfCounter(); + }); + } + +// private void updateCerts() { +// try { +// peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { +// peerTrustManager.reloadTrustManagerHosts(getHostsNoNulls()); +// // Fixme:? I don't think it should be needed with custom trust store +// // but it doesn't work? +// rpcClientFactory.dropCache(); +// return null; +// }); +// } catch (Exception ex) { +// Log.warn("Error when refreshing certificates, will retry: " + ex.getMessage()); +// pushPeerUpdates(); +// } +// } + + public KeyPair getSelfKeypair() { + return _selfKeyPair; + } + + public X509Certificate getSelfCertificate() { + return _selfCertificate; + } + +// // Returns true if host's initial sync wasn't done before, and marks it as done +// public boolean markInitialOpSyncDone(UUID connectedHost) { +// return jObjectTxManager.executeTx(() -> { +// peerDirectoryLocal.get().rwLock(); +// try { +// peerDirectoryLocal.get().local(); +// boolean contained = peerDirectoryLocal.get().getData().getInitialOpSyncDone().contains(connectedHost); +// +// if (!contained) +// peerDirectoryLocal.get().local().mutate(new JMutator() { +// @Override +// public boolean mutate(PeerDirectoryLocal object) { +// object.getInitialOpSyncDone().add(connectedHost); +// return true; +// } +// +// @Override +// public void revert(PeerDirectoryLocal object) { +// object.getInitialOpSyncDone().remove(connectedHost); +// } +// }); +// return !contained; +// } finally { +// peerDirectoryLocal.get().rwUnlock(); +// } +// }); +// } +// +// public boolean markInitialObjSyncDone(UUID connectedHost) { +// return jObjectTxManager.executeTx(() -> { +// peerDirectoryLocal.get().rwLock(); +// try { +// peerDirectoryLocal.get().local(); +// boolean contained = peerDirectoryLocal.get().getData().getInitialObjSyncDone().contains(connectedHost); +// +// if (!contained) +// peerDirectoryLocal.get().local().mutate(new JMutator() { +// @Override +// public boolean mutate(PeerDirectoryLocal object) { +// object.getInitialObjSyncDone().add(connectedHost); +// return true; +// } +// +// @Override +// public void revert(PeerDirectoryLocal object) { +// object.getInitialObjSyncDone().remove(connectedHost); +// } +// }); +// return !contained; +// } finally { +// peerDirectoryLocal.get().rwUnlock(); +// } +// }); +// } + +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java new file mode 100644 index 00000000..23a99e0e --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java @@ -0,0 +1,25 @@ +package com.usatiuk.dhfs.objects.repository; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; + +import java.io.Serializable; +import java.security.KeyPair; +import java.security.cert.X509Certificate; + +public record PersistentRemoteHostsData(PeerId selfUuid, + long selfCounter, + X509Certificate selfCertificate, + KeyPair selfKeyPair) implements JData, Serializable { + public static final JObjectKey KEY = JObjectKey.of("self_peer_data"); + + @Override + public JObjectKey key() { + return KEY; + } + + public PersistentRemoteHostsData withSelfCounter(long selfCounter) { + return new PersistentRemoteHostsData(selfUuid, selfCounter, selfCertificate, selfKeyPair); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java new file mode 100644 index 00000000..37458390 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java @@ -0,0 +1,174 @@ +//package com.usatiuk.dhfs.objects.repository; +// +//import com.google.common.collect.Maps; +//import com.usatiuk.autoprotomap.runtime.ProtoSerializer; +//import com.usatiuk.dhfs.objects.jrepository.*; +//import com.usatiuk.dhfs.objects.persistence.JObjectDataP; +//import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; +//import com.usatiuk.dhfs.objects.repository.opsupport.Op; +//import io.grpc.Status; +//import io.grpc.StatusRuntimeException; +//import io.quarkus.logging.Log; +//import jakarta.enterprise.context.ApplicationScoped; +//import jakarta.inject.Inject; +//import org.apache.commons.lang3.tuple.Pair; +// +//import javax.annotation.Nullable; +//import java.util.*; +//import java.util.concurrent.Callable; +//import java.util.concurrent.ConcurrentLinkedDeque; +//import java.util.concurrent.Executors; +//import java.util.stream.Collectors; +// +//@ApplicationScoped +//public class RemoteObjectServiceClient { +// @Inject +// PersistentPeerDataService persistentPeerDataService; +// +// @Inject +// RpcClientFactory rpcClientFactory; +// +// @Inject +// JObjectManager jObjectManager; +// +// @Inject +// SyncHandler syncHandler; +// @Inject +// InvalidationQueueService invalidationQueueService; +// @Inject +// ProtoSerializer dataProtoSerializer; +// @Inject +// ProtoSerializer opProtoSerializer; +// @Inject +// JObjectTxManager jObjectTxManager; +// +// public Pair getSpecificObject(UUID host, String name) { +// return rpcClientFactory.withObjSyncClient(host, client -> { +// var reply = client.getObject(GetObjectRequest.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).setName(name).build()); +// return Pair.of(reply.getObject().getHeader(), reply.getObject().getContent()); +// }); +// } +// +// public JObjectDataP getObject(JObject jObject) { +// jObject.assertRwLock(); +// +// var targets = jObject.runReadLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (md, d) -> { +// var ourVersion = md.getOurVersion(); +// if (ourVersion >= 1) +// return md.getRemoteCopies().entrySet().stream() +// .filter(entry -> entry.getValue().equals(ourVersion)) +// .map(Map.Entry::getKey).toList(); +// else +// return persistentPeerDataService.getHostUuids(); +// }); +// +// if (targets.isEmpty()) +// throw new IllegalStateException("No targets for object " + jObject.getMeta().getName()); +// +// Log.info("Downloading object " + jObject.getMeta().getName() + " from " + targets.stream().map(UUID::toString).collect(Collectors.joining(", "))); +// +// return rpcClientFactory.withObjSyncClient(targets, client -> { +// var reply = client.getObject(GetObjectRequest.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).setName(jObject.getMeta().getName()).build()); +// +// var receivedMap = new HashMap(); +// for (var e : reply.getObject().getHeader().getChangelog().getEntriesList()) { +// receivedMap.put(UUID.fromString(e.getHost()), e.getVersion()); +// } +// +// return jObjectTxManager.executeTx(() -> { +// return jObject.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (md, d, b, v) -> { +// var unexpected = !Objects.equals( +// Maps.filterValues(md.getChangelog(), val -> val != 0), +// Maps.filterValues(receivedMap, val -> val != 0)); +// +// if (unexpected) { +// try { +// syncHandler.handleOneUpdate(UUID.fromString(reply.getSelfUuid()), reply.getObject().getHeader()); +// } catch (SyncHandler.OutdatedUpdateException ignored) { +// Log.info("Outdated update of " + md.getName() + " from " + reply.getSelfUuid()); +// invalidationQueueService.pushInvalidationToOne(UUID.fromString(reply.getSelfUuid()), md.getName()); // True? +// throw new StatusRuntimeException(Status.ABORTED.withDescription("Received outdated object version")); +// } catch (Exception e) { +// Log.error("Received unexpected object version from " + reply.getSelfUuid() +// + " for " + reply.getObject().getHeader().getName() + " and conflict resolution failed", e); +// throw new StatusRuntimeException(Status.ABORTED.withDescription("Received unexpected object version")); +// } +// } +// +// return reply.getObject().getContent(); +// }); +// }); +// }); +// } +// +// @Nullable +// public IndexUpdateReply notifyUpdate(JObject obj, UUID host) { +// var builder = IndexUpdatePush.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()); +// +// var header = obj +// .runReadLocked( +// obj.getMeta().getKnownClass().isAnnotationPresent(PushResolution.class) +// ? JObjectManager.ResolutionStrategy.LOCAL_ONLY +// : JObjectManager.ResolutionStrategy.NO_RESOLUTION, +// (m, d) -> { +// if (obj.getMeta().isDeleted()) return null; +// if (m.getKnownClass().isAnnotationPresent(PushResolution.class) && d == null) +// Log.warn("Object " + m.getName() + " is marked as PushResolution but no resolution found"); +// if (m.getKnownClass().isAnnotationPresent(PushResolution.class)) +// return m.toRpcHeader(dataProtoSerializer.serialize(d)); +// else +// return m.toRpcHeader(); +// }); +// if (header == null) return null; +// jObjectTxManager.executeTx(obj::markSeen); +// builder.setHeader(header); +// +// var send = builder.build(); +// +// return rpcClientFactory.withObjSyncClient(host, client -> client.indexUpdate(send)); +// } +// +// public OpPushReply pushOps(List ops, String queueName, UUID host) { +// for (Op op : ops) { +// for (var ref : op.getEscapedRefs()) { +// jObjectTxManager.executeTx(() -> { +// jObjectManager.get(ref).ifPresent(JObject::markSeen); +// }); +// } +// } +// var builder = OpPushMsg.newBuilder() +// .setSelfUuid(persistentPeerDataService.getSelfUuid().toString()) +// .setQueueId(queueName); +// for (var op : ops) +// builder.addMsg(opProtoSerializer.serialize(op)); +// return rpcClientFactory.withObjSyncClient(host, client -> client.opPush(builder.build())); +// } +// +// public Collection canDelete(Collection targets, String object, Collection ourReferrers) { +// ConcurrentLinkedDeque results = new ConcurrentLinkedDeque<>(); +// Log.trace("Asking canDelete for " + object + " from " + targets.stream().map(UUID::toString).collect(Collectors.joining(", "))); +// try (var executor = Executors.newVirtualThreadPerTaskExecutor()) { +// try { +// executor.invokeAll(targets.stream().>map(h -> () -> { +// try { +// var req = CanDeleteRequest.newBuilder() +// .setSelfUuid(persistentPeerDataService.getSelfUuid().toString()) +// .setName(object); +// req.addAllOurReferrers(ourReferrers); +// var res = rpcClientFactory.withObjSyncClient(h, client -> client.canDelete(req.build())); +// if (res != null) +// results.add(res); +// } catch (Exception e) { +// Log.debug("Error when asking canDelete for object " + object, e); +// } +// return null; +// }).toList()); +// } catch (InterruptedException e) { +// Log.warn("Interrupted waiting for canDelete for object " + object); +// } +// if (!executor.shutdownNow().isEmpty()) +// Log.warn("Didn't ask all targets when asking canDelete for " + object); +// } +// return results; +// } +//} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java new file mode 100644 index 00000000..990ad534 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java @@ -0,0 +1,191 @@ +package com.usatiuk.dhfs.objects.repository; + +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.quarkus.grpc.GrpcService; +import io.smallrye.common.annotation.Blocking; +import io.smallrye.mutiny.Uni; +import jakarta.annotation.security.RolesAllowed; +import jakarta.inject.Inject; + +/// / Note: RunOnVirtualThread hangs somehow +@GrpcService +@RolesAllowed("cluster-member") +public class RemoteObjectServiceServer implements DhfsObjectSyncGrpc { +// @Inject +// SyncHandler syncHandler; +// +// @Inject +// JObjectManager jObjectManager; +// +// @Inject +// PeerManager peerManager; +// +// @Inject +// AutoSyncProcessor autoSyncProcessor; +// +@Inject +PersistentPeerDataService persistentPeerDataService; +// +// @Inject +// InvalidationQueueService invalidationQueueService; +// +// @Inject +// ProtoSerializer dataProtoSerializer; +// @Inject +// ProtoSerializer opProtoSerializer; +// +// @Inject +// OpObjectRegistry opObjectRegistry; +// +// @Inject +// JObjectTxManager jObjectTxManager; +// +// @Override +// @Blocking +// public Uni getObject(GetObjectRequest request) { +// if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); +// if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid()))) +// throw new StatusRuntimeException(Status.UNAUTHENTICATED); +// +// Log.info("<-- getObject: " + request.getName() + " from " + request.getSelfUuid()); +// +// var obj = jObjectManager.get(request.getName()).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND)); +// +// // Does @Blocking break this? +// return Uni.createFrom().emitter(emitter -> { +// var replyObj = jObjectTxManager.executeTx(() -> { +// // Obj.markSeen before markSeen of its children +// obj.markSeen(); +// return obj.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (meta, data) -> { +// if (meta.isOnlyLocal()) +// throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription("Trying to get local-only object")); +// if (data == null) { +// Log.info("<-- getObject FAIL: " + request.getName() + " from " + request.getSelfUuid()); +// throw new StatusRuntimeException(Status.ABORTED.withDescription("Not available locally")); +// } +// data.extractRefs().forEach(ref -> +// jObjectManager.get(ref) +// .orElseThrow(() -> new IllegalStateException("Non-hydrated refs for local object?")) +// .markSeen()); +// +// return ApiObject.newBuilder() +// .setHeader(obj.getMeta().toRpcHeader()) +// .setContent(dataProtoSerializer.serialize(obj.getData())).build(); +// }); +// }); +// var ret = GetObjectReply.newBuilder() +// .setSelfUuid(persistentPeerDataService.getSelfUuid().toString()) +// .setObject(replyObj).build(); +// // TODO: Could this cause problems if we wait for too long? +// obj.commitFenceAsync(() -> emitter.complete(ret)); +// }); +// } +// +// @Override +// @Blocking +// public Uni canDelete(CanDeleteRequest request) { +// if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); +// if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid()))) +// throw new StatusRuntimeException(Status.UNAUTHENTICATED); +// +// Log.info("<-- canDelete: " + request.getName() + " from " + request.getSelfUuid()); +// +// var builder = CanDeleteReply.newBuilder(); +// +// var obj = jObjectManager.get(request.getName()); +// +// builder.setSelfUuid(persistentPeerDataService.getSelfUuid().toString()); +// builder.setObjName(request.getName()); +// +// if (obj.isPresent()) try { +// boolean tryUpdate = obj.get().runReadLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d) -> { +// if (m.isDeleted() && !m.isDeletionCandidate()) +// throw new IllegalStateException("Object " + m.getName() + " is deleted but not a deletion candidate"); +// builder.setDeletionCandidate(m.isDeletionCandidate()); +// builder.addAllReferrers(m.getReferrers()); +// return m.isDeletionCandidate() && !m.isDeleted(); +// }); +// // FIXME +//// if (tryUpdate) { +//// obj.get().runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> { +//// return null; +//// }); +//// } +// } catch (DeletedObjectAccessException dox) { +// builder.setDeletionCandidate(true); +// } +// else { +// builder.setDeletionCandidate(true); +// } +// +// var ret = builder.build(); +// +// if (!ret.getDeletionCandidate()) +// for (var rr : request.getOurReferrersList()) +// autoSyncProcessor.add(rr); +// +// return Uni.createFrom().item(ret); +// } +// +// @Override +// @Blocking +// public Uni indexUpdate(IndexUpdatePush request) { +// if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); +// if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid()))) +// throw new StatusRuntimeException(Status.UNAUTHENTICATED); +// + /// / Log.info("<-- indexUpdate: " + request.getHeader().getName()); +// return jObjectTxManager.executeTxAndFlush(() -> { +// return Uni.createFrom().item(syncHandler.handleRemoteUpdate(request)); +// }); +// } +// +// @Override +// @Blocking +// public Uni opPush(OpPushMsg request) { +// if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); +// if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid()))) +// throw new StatusRuntimeException(Status.UNAUTHENTICATED); +// +// try { +// var objs = request.getMsgList().stream().map(opProtoSerializer::deserialize).toList(); +// jObjectTxManager.executeTxAndFlush(() -> { +// opObjectRegistry.acceptExternalOps(request.getQueueId(), UUID.fromString(request.getSelfUuid()), objs); +// }); +// } catch (Exception e) { +// Log.error(e, e); +// throw e; +// } +// return Uni.createFrom().item(OpPushReply.getDefaultInstance()); +// } +// + + @Override + public Uni getObject(GetObjectRequest request) { + return null; + } + + @Override + public Uni canDelete(CanDeleteRequest request) { + return null; + } + + @Override + public Uni indexUpdate(IndexUpdatePush request) { + return null; + } + + @Override + public Uni opPush(OpPushMsg request) { + return null; + } + + @Override + @Blocking + public Uni ping(PingRequest request) { + if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); + + return Uni.createFrom().item(PingReply.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).build()); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcChannelFactory.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcChannelFactory.java new file mode 100644 index 00000000..a985be13 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcChannelFactory.java @@ -0,0 +1,44 @@ +package com.usatiuk.dhfs.objects.repository; + +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.repository.peertrust.PeerTrustManager; +import io.grpc.ChannelCredentials; +import io.grpc.ManagedChannel; +import io.grpc.TlsChannelCredentials; +import io.grpc.netty.NettyChannelBuilder; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +import javax.net.ssl.KeyManagerFactory; +import java.security.KeyStore; +import java.security.cert.Certificate; +import java.util.concurrent.TimeUnit; + +@ApplicationScoped +public class RpcChannelFactory { + @Inject + PersistentPeerDataService persistentPeerDataService; + @Inject + PeerTrustManager peerTrustManager; + + private ChannelCredentials getChannelCredentials() { + try { + KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType()); + ks.load(null, null); + + ks.setKeyEntry("clientkey", persistentPeerDataService.getSelfKeypair().getPrivate(), null, new Certificate[]{persistentPeerDataService.getSelfCertificate()}); + + KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + keyManagerFactory.init(ks, null); + + ChannelCredentials creds = TlsChannelCredentials.newBuilder().trustManager(peerTrustManager).keyManager(keyManagerFactory.getKeyManagers()).build(); + return creds; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + ManagedChannel getSecureChannel(PeerId host, String address, int port) { + return NettyChannelBuilder.forAddress(address, port, getChannelCredentials()).overrideAuthority(host.toString()).idleTimeout(10, TimeUnit.SECONDS).build(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java new file mode 100644 index 00000000..3dddddf3 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java @@ -0,0 +1,94 @@ +package com.usatiuk.dhfs.objects.repository; + +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.repository.peerdiscovery.IpPeerAddress; +import com.usatiuk.dhfs.objects.repository.peerdiscovery.PeerAddress; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; + +// TODO: Dedup this +@ApplicationScoped +public class RpcClientFactory { + @ConfigProperty(name = "dhfs.objects.sync.timeout") + long syncTimeout; + + @Inject + PeerManager peerManager; + + @Inject + RpcChannelFactory rpcChannelFactory; + + // FIXME: Leaks! + private ConcurrentMap _objSyncCache = new ConcurrentHashMap<>(); + + public R withObjSyncClient(Collection targets, ObjectSyncClientFunction fn) { + var shuffledList = new ArrayList<>(targets); + Collections.shuffle(shuffledList); + for (PeerId target : shuffledList) { + try { + return withObjSyncClient(target, fn); + } catch (StatusRuntimeException e) { + if (e.getStatus().getCode().equals(Status.UNAVAILABLE.getCode())) + Log.debug("Host " + target + " is unreachable: " + e.getMessage()); + else + Log.warn("When calling " + target + " " + e.getMessage()); + } catch (Exception e) { + Log.warn("When calling " + target + " " + e.getMessage()); + } + } + throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("No reachable targets!")); + } + + public R withObjSyncClient(PeerId target, ObjectSyncClientFunction fn) { + var hostinfo = peerManager.getAddress(target); + + if (hostinfo == null) + throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("Not known to be reachable: " + target)); + + return withObjSyncClient(target, hostinfo, syncTimeout, fn); + } + + public R withObjSyncClient(PeerId host, PeerAddress address, long timeout, ObjectSyncClientFunction fn) { + return switch (address) { + case IpPeerAddress ipPeerAddress -> + withObjSyncClient(host, ipPeerAddress.address(), ipPeerAddress.securePort(), timeout, fn); + default -> throw new IllegalStateException("Unexpected value: " + address); + }; + } + + public R withObjSyncClient(PeerId host, InetAddress addr, int port, long timeout, ObjectSyncClientFunction fn) { + var key = new ObjSyncStubKey(host, addr, port); + var stub = _objSyncCache.computeIfAbsent(key, (k) -> { + var channel = rpcChannelFactory.getSecureChannel(host, addr.getHostAddress(), port); + return DhfsObjectSyncGrpcGrpc.newBlockingStub(channel) + .withMaxOutboundMessageSize(Integer.MAX_VALUE) + .withMaxInboundMessageSize(Integer.MAX_VALUE); + }); + return fn.apply(stub.withDeadlineAfter(timeout, TimeUnit.SECONDS)); + } + + public void dropCache() { + _objSyncCache = new ConcurrentHashMap<>(); + } + + @FunctionalInterface + public interface ObjectSyncClientFunction { + R apply(DhfsObjectSyncGrpcGrpc.DhfsObjectSyncGrpcBlockingStub client); + } + + private record ObjSyncStubKey(PeerId id, InetAddress addr, int port) { + } + +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java new file mode 100644 index 00000000..f47e34e2 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java @@ -0,0 +1,207 @@ +//package com.usatiuk.dhfs.objects.repository; +// +//import com.usatiuk.autoprotomap.runtime.ProtoSerializer; +//import com.usatiuk.dhfs.objects.jrepository.JObject; +//import com.usatiuk.dhfs.objects.jrepository.JObjectData; +//import com.usatiuk.dhfs.objects.jrepository.JObjectManager; +//import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; +//import com.usatiuk.dhfs.objects.persistence.JObjectDataP; +//import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; +//import com.usatiuk.dhfs.objects.repository.opsupport.OpObjectRegistry; +//import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; +//import io.grpc.Status; +//import io.quarkus.logging.Log; +//import jakarta.enterprise.context.ApplicationScoped; +//import jakarta.enterprise.inject.Instance; +//import jakarta.inject.Inject; +// +//import java.util.HashMap; +//import java.util.Objects; +//import java.util.Optional; +//import java.util.UUID; +//import java.util.concurrent.atomic.AtomicReference; +//import java.util.stream.Collectors; +//import java.util.stream.Stream; +// +//@ApplicationScoped +//public class SyncHandler { +// @Inject +// JObjectManager jObjectManager; +// @Inject +// PeerManager peerManager; +// @Inject +// RemoteObjectServiceClient remoteObjectServiceClient; +// @Inject +// InvalidationQueueService invalidationQueueService; +// @Inject +// Instance conflictResolvers; +// @Inject +// PersistentPeerDataService persistentPeerDataService; +// @Inject +// ProtoSerializer dataProtoSerializer; +// @Inject +// OpObjectRegistry opObjectRegistry; +// @Inject +// JObjectTxManager jObjectTxManager; +// +// public void pushInitialResyncObj(UUID host) { +// Log.info("Doing initial object push for " + host); +// +// var objs = jObjectManager.findAll(); +// +// for (var obj : objs) { +// Log.trace("IS: " + obj + " to " + host); +// invalidationQueueService.pushInvalidationToOne(host, obj); +// } +// } +// +// public void pushInitialResyncOp(UUID host) { +// Log.info("Doing initial op push for " + host); +// +// jObjectTxManager.executeTxAndFlush( +// () -> { +// opObjectRegistry.pushBootstrapData(host); +// } +// ); +// } +// +// public void handleOneUpdate(UUID from, ObjectHeader header) { +// AtomicReference> foundExt = new AtomicReference<>(); +// +// boolean conflict = jObjectTxManager.executeTx(() -> { +// JObject found = jObjectManager.getOrPut(header.getName(), JObjectData.class, Optional.empty()); +// foundExt.set(found); +// +// var receivedTotalVer = header.getChangelog().getEntriesList() +// .stream().map(ObjectChangelogEntry::getVersion).reduce(0L, Long::sum); +// +// var receivedMap = new HashMap(); +// for (var e : header.getChangelog().getEntriesList()) { +// receivedMap.put(UUID.fromString(e.getHost()), e.getVersion()); +// } +// +// return found.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (md, data, bump, invalidate) -> { +// if (md.getRemoteCopies().getOrDefault(from, 0L) > receivedTotalVer) { +// Log.error("Received older index update than was known for host: " +// + from + " " + header.getName()); +// throw new OutdatedUpdateException(); +// } +// +// String rcv = ""; +// for (var e : header.getChangelog().getEntriesList()) { +// rcv += e.getHost() + ": " + e.getVersion() + "; "; +// } +// String ours = ""; +// for (var e : md.getChangelog().entrySet()) { +// ours += e.getKey() + ": " + e.getValue() + "; "; +// } +// Log.trace("Handling update: " + header.getName() + " from " + from + "\n" + "ours: " + ours + " \n" + "received: " + rcv); +// +// boolean updatedRemoteVersion = false; +// +// var oldRemoteVer = md.getRemoteCopies().put(from, receivedTotalVer); +// if (oldRemoteVer == null || !oldRemoteVer.equals(receivedTotalVer)) updatedRemoteVersion = true; +// +// boolean hasLower = false; +// boolean hasHigher = false; +// for (var e : Stream.concat(md.getChangelog().keySet().stream(), receivedMap.keySet().stream()).collect(Collectors.toSet())) { +// if (receivedMap.getOrDefault(e, 0L) < md.getChangelog().getOrDefault(e, 0L)) +// hasLower = true; +// if (receivedMap.getOrDefault(e, 0L) > md.getChangelog().getOrDefault(e, 0L)) +// hasHigher = true; +// } +// +// if (hasLower && hasHigher) { +// Log.info("Conflict on update (inconsistent version): " + header.getName() + " from " + from); +// return true; +// } +// +// if (hasLower) { +// Log.info("Received older index update than known: " +// + from + " " + header.getName()); +// throw new OutdatedUpdateException(); +// } +// +// if (hasHigher) { +// invalidate.apply(); +// md.getChangelog().clear(); +// md.getChangelog().putAll(receivedMap); +// md.getChangelog().putIfAbsent(persistentPeerDataService.getSelfUuid(), 0L); +// if (header.hasPushedData()) +// found.externalResolution(dataProtoSerializer.deserialize(header.getPushedData())); +// return false; +// } else if (data == null && header.hasPushedData()) { +// found.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); +// if (found.getData() == null) +// found.externalResolution(dataProtoSerializer.deserialize(header.getPushedData())); +// } +// +// assert Objects.equals(receivedTotalVer, md.getOurVersion()); +// +// if (!updatedRemoteVersion) +// Log.debug("No action on update: " + header.getName() + " from " + from); +// +// return false; +// }); +// }); +// +// // TODO: Is the lock gap here ok? +// if (conflict) { +// Log.info("Trying conflict resolution: " + header.getName() + " from " + from); +// var found = foundExt.get(); +// +// JObjectData theirsData; +// ObjectHeader theirsHeader; +// if (header.hasPushedData()) { +// theirsHeader = header; +// theirsData = dataProtoSerializer.deserialize(header.getPushedData()); +// } else { +// var got = remoteObjectServiceClient.getSpecificObject(from, header.getName()); +// theirsData = dataProtoSerializer.deserialize(got.getRight()); +// theirsHeader = got.getLeft(); +// } +// +// jObjectTxManager.executeTx(() -> { +// var resolverClass = found.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { +// if (d == null) +// throw new StatusRuntimeExceptionNoStacktrace(Status.UNAVAILABLE.withDescription("No local data when conflict " + header.getName())); +// return d.getConflictResolver(); +// }); +// var resolver = conflictResolvers.select(resolverClass); +// resolver.get().resolve(from, theirsHeader, theirsData, found); +// }); +// Log.info("Resolved conflict for " + from + " " + header.getName()); +// } +// +// } +// +// public IndexUpdateReply handleRemoteUpdate(IndexUpdatePush request) { +// // TODO: Dedup +// try { +// handleOneUpdate(UUID.fromString(request.getSelfUuid()), request.getHeader()); +// } catch (OutdatedUpdateException ignored) { +// Log.warn("Outdated update of " + request.getHeader().getName() + " from " + request.getSelfUuid()); +// invalidationQueueService.pushInvalidationToOne(UUID.fromString(request.getSelfUuid()), request.getHeader().getName()); +// } catch (Exception ex) { +// Log.info("Error when handling update from " + request.getSelfUuid() + " of " + request.getHeader().getName(), ex); +// throw ex; +// } +// +// return IndexUpdateReply.getDefaultInstance(); +// } +// +// protected static class OutdatedUpdateException extends RuntimeException { +// OutdatedUpdateException() { +// super(); +// } +// +// OutdatedUpdateException(String message) { +// super(message); +// } +// +// @Override +// public synchronized Throwable fillInStackTrace() { +// return this; +// } +// } +//} \ No newline at end of file diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/IpPeerAddress.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/IpPeerAddress.java new file mode 100644 index 00000000..0d6ab1da --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/IpPeerAddress.java @@ -0,0 +1,9 @@ +package com.usatiuk.dhfs.objects.repository.peerdiscovery; + +import com.usatiuk.dhfs.objects.PeerId; + +import java.net.InetAddress; + +public record IpPeerAddress(PeerId peer, PeerAddressType type, + InetAddress address, int port, int securePort) implements PeerAddress { +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerAddress.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerAddress.java new file mode 100644 index 00000000..81824de5 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerAddress.java @@ -0,0 +1,8 @@ +package com.usatiuk.dhfs.objects.repository.peerdiscovery; + +import com.usatiuk.dhfs.objects.PeerId; + +public interface PeerAddress { + PeerId peer(); + PeerAddressType type(); +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerAddressType.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerAddressType.java new file mode 100644 index 00000000..b5027e4b --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerAddressType.java @@ -0,0 +1,7 @@ +package com.usatiuk.dhfs.objects.repository.peerdiscovery; + +public enum PeerAddressType { + LAN, + WAN, + PROXY +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerDiscoveryDirectory.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerDiscoveryDirectory.java new file mode 100644 index 00000000..50523880 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerDiscoveryDirectory.java @@ -0,0 +1,70 @@ +package com.usatiuk.dhfs.objects.repository.peerdiscovery; + +import com.usatiuk.dhfs.objects.PeerId; +import jakarta.enterprise.context.ApplicationScoped; +import org.apache.commons.collections4.MultiValuedMap; +import org.apache.commons.collections4.multimap.HashSetValuedHashMap; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.util.Collection; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +@ApplicationScoped +public class PeerDiscoveryDirectory { + @ConfigProperty(name = "dhfs.peerdiscovery.timeout") + long timeout; + + private record PeerEntry(PeerAddress addr, long lastSeen) { + public PeerEntry withLastSeen(long lastSeen) { + return new PeerEntry(addr, lastSeen); + } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) return false; + PeerEntry peerEntry = (PeerEntry) o; + return Objects.equals(addr, peerEntry.addr); + } + + @Override + public int hashCode() { + return Objects.hashCode(addr); + } + } + + private final MultiValuedMap _entries = new HashSetValuedHashMap<>(); + + public void notifyAddr(PeerAddress addr) { + synchronized (_entries) { + var peer = addr.peer(); + _entries.removeMapping(peer, new PeerEntry(addr, 0)); + _entries.put(peer, new PeerEntry(addr, System.currentTimeMillis())); + } + } + + public Collection getForPeer(PeerId peer) { + synchronized (_entries) { + long curTime = System.currentTimeMillis(); + var partitioned = _entries.asMap().get(peer).stream() + .collect(Collectors.partitioningBy(e -> e.lastSeen() + timeout < curTime)); + for (var entry : partitioned.get(true)) { + _entries.removeMapping(peer, entry); + } + return partitioned.get(false).stream().map(PeerEntry::addr).toList(); + } + } + + public Collection getReachablePeers() { + synchronized (_entries) { + long curTime = System.currentTimeMillis(); + var partitioned = _entries.entries().stream() + .collect(Collectors.partitioningBy(e -> e.getValue().lastSeen() + timeout < curTime)); + for (var entry : partitioned.get(true)) { + _entries.removeMapping(entry.getKey(), entry.getValue()); + } + return partitioned.get(false).stream().map(Map.Entry::getKey).collect(Collectors.toUnmodifiableSet()); + } + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/StaticPeerDiscovery.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/StaticPeerDiscovery.java new file mode 100644 index 00000000..f201c132 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/StaticPeerDiscovery.java @@ -0,0 +1,46 @@ +package com.usatiuk.dhfs.objects.repository.peerdiscovery; + +import com.usatiuk.dhfs.objects.PeerId; +import io.quarkus.scheduler.Scheduled; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.Arrays; +import java.util.List; +import java.util.Optional; +import java.util.stream.Stream; + +@ApplicationScoped +public class StaticPeerDiscovery { + private final List _peers; + + public StaticPeerDiscovery(@ConfigProperty(name = "dhfs.peerdiscovery.static-peers") Optional staticPeers) { + var peers = staticPeers.orElse(""); + _peers = Arrays.stream(peers.split(",")).flatMap(e -> + { + if (e.isEmpty()) { + return Stream.of(); + } + var split = e.split(":"); + try { + return Stream.of(new IpPeerAddress(PeerId.of(split[0]), PeerAddressType.LAN, InetAddress.getByName(split[1]), + Integer.parseInt(split[2]), Integer.parseInt(split[3]))); + } catch (UnknownHostException ex) { + throw new RuntimeException(ex); + } + }).toList(); + } + + @Inject + PeerDiscoveryDirectory peerDiscoveryDirectory; + + @Scheduled(every = "1s", concurrentExecution = Scheduled.ConcurrentExecution.SKIP) + public void discoverPeers() { + for (var peer : _peers) { + peerDiscoveryDirectory.notifyAddr(peer); + } + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryBroadcaster.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryBroadcaster.java new file mode 100644 index 00000000..d3f77471 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryBroadcaster.java @@ -0,0 +1,102 @@ +package com.usatiuk.dhfs.objects.repository.peerdiscovery.local; + +import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import com.usatiuk.dhfs.objects.repository.peerdiscovery.PeerDiscoveryInfo; +import io.quarkus.arc.properties.IfBuildProperty; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.Startup; +import io.quarkus.scheduler.Scheduled; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.net.*; + +@ApplicationScoped +@IfBuildProperty(name = "dhfs.local-discovery", stringValue = "true") +public class LocalPeerDiscoveryBroadcaster { + @Inject + PersistentPeerDataService persistentPeerDataService; + + @ConfigProperty(name = "quarkus.http.port") + int ourPort; + + @ConfigProperty(name = "quarkus.http.ssl-port") + int ourSecurePort; + + @ConfigProperty(name = "dhfs.objects.peerdiscovery.port") + int broadcastPort; + + @ConfigProperty(name = "dhfs.objects.peerdiscovery.broadcast") + boolean enabled; + + private DatagramSocket _socket; + + @Startup + void init() throws SocketException { + if (!enabled) { + return; + } + _socket = new DatagramSocket(); + _socket.setBroadcast(true); + } + + void shutdown(@Observes @Priority(10) ShutdownEvent event) { + if (!enabled) { + return; + } + _socket.close(); + } + + @Scheduled(every = "${dhfs.objects.peerdiscovery.interval}", concurrentExecution = Scheduled.ConcurrentExecution.SKIP) + public void broadcast() throws Exception { + if (!enabled) { + return; + } + var sendData = PeerDiscoveryInfo.newBuilder() + .setUuid(persistentPeerDataService.getSelfUuid().toString()) + .setPort(ourPort) + .setSecurePort(ourSecurePort) + .build(); + + var sendBytes = sendData.toByteArray(); + + DatagramPacket sendPacket + = new DatagramPacket(sendBytes, sendBytes.length, + InetAddress.getByName("255.255.255.255"), broadcastPort); + + _socket.send(sendPacket); + + var interfaces = NetworkInterface.getNetworkInterfaces(); + while (interfaces.hasMoreElements()) { + NetworkInterface networkInterface = interfaces.nextElement(); + + try { + if (networkInterface.isLoopback() || !networkInterface.isUp()) { + continue; + } + } catch (Exception e) { + continue; + } + + for (InterfaceAddress interfaceAddress : networkInterface.getInterfaceAddresses()) { + InetAddress broadcast = interfaceAddress.getBroadcast(); + if (broadcast == null) { + continue; + } + + try { + sendPacket = new DatagramPacket(sendBytes, sendBytes.length, broadcast, broadcastPort); + _socket.send(sendPacket); + } catch (Exception ignored) { + continue; + } + +// Log.trace(getClass().getName() + "Broadcast sent to: " + broadcast.getHostAddress() +// + ", at: " + networkInterface.getDisplayName()); + } + } + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryClient.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryClient.java new file mode 100644 index 00000000..f5ce9d6b --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryClient.java @@ -0,0 +1,91 @@ +package com.usatiuk.dhfs.objects.repository.peerdiscovery.local; + +import com.google.protobuf.InvalidProtocolBufferException; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.repository.peerdiscovery.IpPeerAddress; +import com.usatiuk.dhfs.objects.repository.peerdiscovery.PeerAddressType; +import com.usatiuk.dhfs.objects.repository.peerdiscovery.PeerDiscoveryDirectory; +import com.usatiuk.dhfs.objects.repository.peerdiscovery.PeerDiscoveryInfo; +import io.quarkus.arc.properties.IfBuildProperty; +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.Startup; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.net.*; +import java.nio.ByteBuffer; +import java.util.UUID; + +@ApplicationScoped +@IfBuildProperty(name = "dhfs.local-discovery", stringValue = "true") +public class LocalPeerDiscoveryClient { + @Inject + PeerDiscoveryDirectory peerDiscoveryDirectory; + + private Thread _clientThread; + + private DatagramSocket _socket; + + @ConfigProperty(name = "dhfs.objects.peerdiscovery.broadcast") + boolean enabled; + + @Startup + void init() throws SocketException, UnknownHostException { + if (!enabled) { + return; + } + _socket = new DatagramSocket(42069, InetAddress.getByName("0.0.0.0")); + _socket.setBroadcast(true); + + _clientThread = new Thread(this::client); + _clientThread.setName("LocalPeerDiscoveryClient"); + _clientThread.start(); + } + + void shutdown(@Observes @Priority(10) ShutdownEvent event) throws InterruptedException { + if (!enabled) { + return; + } + _socket.close(); + _clientThread.interrupt(); + _clientThread.interrupt(); + while (_clientThread.isAlive()) { + try { + _clientThread.join(); + } catch (InterruptedException ignored) { + } + } + } + + private void client() { + while (!Thread.interrupted() && !_socket.isClosed()) { + try { + byte[] buf = new byte[10000]; + DatagramPacket packet = new DatagramPacket(buf, buf.length); + _socket.receive(packet); + + try { + var got = PeerDiscoveryInfo.parseFrom(ByteBuffer.wrap(buf, 0, packet.getLength())); + peerDiscoveryDirectory.notifyAddr( + new IpPeerAddress( + PeerId.of(UUID.fromString(got.getUuid())), + PeerAddressType.LAN, + packet.getAddress(), + got.getPort(), + got.getSecurePort() + ) + ); + } catch (InvalidProtocolBufferException e) { + continue; + } + } catch (Exception ex) { + Log.error(ex); + } + } + Log.info("PeerDiscoveryClient stopped"); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java new file mode 100644 index 00000000..caa45d3a --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java @@ -0,0 +1,37 @@ +package com.usatiuk.dhfs.objects.repository.peersync; + +import com.usatiuk.dhfs.objects.JDataRefcounted; +import com.usatiuk.dhfs.objects.JDataRemote; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.repository.CertificateTools; +import org.pcollections.HashTreePSet; +import org.pcollections.PCollection; + +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; + +public record PeerInfo(JObjectKey key, PCollection refsFrom, boolean frozen, PeerId id, + byte[] cert) implements JDataRefcounted, JDataRemote { + public PeerInfo(PeerId id, byte[] cert) { + this(id.toJObjectKey(), HashTreePSet.empty(), false, id, cert); + } + + @Override + public JDataRefcounted withRefsFrom(PCollection refs) { + return new PeerInfo(key, refs, frozen, id, cert); + } + + @Override + public JDataRefcounted withFrozen(boolean frozen) { + return new PeerInfo(key, refsFrom, frozen, id, cert); + } + + public X509Certificate parsedCert() { + try { + return CertificateTools.certFromBytes(cert); + } catch (CertificateException e) { + throw new RuntimeException(e); + } + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoService.java new file mode 100644 index 00000000..c83fe311 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoService.java @@ -0,0 +1,76 @@ +package com.usatiuk.dhfs.objects.repository.peersync; + +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.TransactionManager; +import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeManager; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; +import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import com.usatiuk.dhfs.objects.repository.peersync.structs.JKleppmannTreeNodeMetaPeer; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +import java.util.List; +import java.util.Optional; + +@ApplicationScoped +public class PeerInfoService { + @Inject + Transaction curTx; + @Inject + TransactionManager jObjectTxManager; + @Inject + JKleppmannTreeManager jKleppmannTreeManager; + @Inject + PersistentPeerDataService persistentPeerDataService; + + private JKleppmannTreeManager.JKleppmannTree getTree() { + return jKleppmannTreeManager.getTree(JObjectKey.of("peers")); + } + + public Optional getPeerInfo(PeerId peer) { + return jObjectTxManager.run(() -> { + var gotKey = getTree().traverse(List.of(peer.toString())); + if (gotKey == null) { + return Optional.empty(); + } + return curTx.get(JKleppmannTreeNode.class, gotKey).flatMap(node -> { + var meta = (JKleppmannTreeNodeMetaPeer) node.meta(); + return curTx.get(PeerInfo.class, meta.getPeerId()); + }); + }); + } + + public List getPeers() { + return jObjectTxManager.run(() -> { + var gotKey = getTree().traverse(List.of()); + return curTx.get(JKleppmannTreeNode.class, gotKey).map( + node -> node.children().keySet().stream() + .map(PeerId::of).map(this::getPeerInfo) + .map(Optional::get).toList()) + .orElseThrow(); + }); + } + + public List getPeersNoSelf() { + return jObjectTxManager.run(() -> { + var gotKey = getTree().traverse(List.of()); + return curTx.get(JKleppmannTreeNode.class, gotKey).map( + node -> node.children().keySet().stream() + .map(PeerId::of).map(this::getPeerInfo) + .map(Optional::get).filter( + peerInfo -> !peerInfo.id().equals(persistentPeerDataService.getSelfUuid())).toList()) + .orElseThrow(); + }); + } + + public void putPeer(PeerId id, byte[] cert) { + jObjectTxManager.run(() -> { + var parent = getTree().traverse(List.of()); + var newPeerInfo = new PeerInfo(id, cert); + curTx.put(newPeerInfo); + getTree().move(parent, new JKleppmannTreeNodeMetaPeer(newPeerInfo.id()), getTree().getNewNodeId()); + }); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/ApiPeerInfo.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/ApiPeerInfo.java new file mode 100644 index 00000000..e84f4d83 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/ApiPeerInfo.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.objects.repository.peersync.api; + +public record ApiPeerInfo(String selfUuid, String cert) { +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/PeerSyncApi.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/PeerSyncApi.java new file mode 100644 index 00000000..f3d07189 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/PeerSyncApi.java @@ -0,0 +1,26 @@ +package com.usatiuk.dhfs.objects.repository.peersync.api; + +import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import jakarta.inject.Inject; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.Path; + +import java.security.cert.CertificateEncodingException; +import java.util.Base64; + +@Path("/peer-info") +public class PeerSyncApi { + @Inject + PersistentPeerDataService persistentPeerDataService; + + @Path("self") + @GET + public ApiPeerInfo getSelfInfo() { + try { + return new ApiPeerInfo(persistentPeerDataService.getSelfUuid().toString(), + Base64.getEncoder().encodeToString(persistentPeerDataService.getSelfCertificate().getEncoded())); + } catch (CertificateEncodingException e) { + throw new RuntimeException(e); + } + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/PeerSyncApiClient.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/PeerSyncApiClient.java new file mode 100644 index 00000000..49a04ac6 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/PeerSyncApiClient.java @@ -0,0 +1,11 @@ +package com.usatiuk.dhfs.objects.repository.peersync.api; + +import jakarta.ws.rs.GET; +import jakarta.ws.rs.Path; + +@Path("/peer-info") +public interface PeerSyncApiClient { + @Path("self") + @GET + ApiPeerInfo getSelfInfo(); +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/PeerSyncApiClientDynamic.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/PeerSyncApiClientDynamic.java new file mode 100644 index 00000000..c09262e9 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/api/PeerSyncApiClientDynamic.java @@ -0,0 +1,28 @@ +package com.usatiuk.dhfs.objects.repository.peersync.api; + +import com.usatiuk.dhfs.objects.repository.peerdiscovery.IpPeerAddress; +import com.usatiuk.dhfs.objects.repository.peerdiscovery.PeerAddress; +import io.quarkus.rest.client.reactive.QuarkusRestClientBuilder; +import jakarta.enterprise.context.ApplicationScoped; + +import java.net.URI; +import java.util.concurrent.TimeUnit; + +@ApplicationScoped +public class PeerSyncApiClientDynamic { + public ApiPeerInfo getSelfInfo(PeerAddress addr) { + return switch (addr) { + case IpPeerAddress ipAddr -> getSelfInfo(ipAddr.address().getHostAddress(), ipAddr.port()); + default -> throw new IllegalArgumentException("Unsupported peer address type: " + addr.getClass()); + }; + } + + private ApiPeerInfo getSelfInfo(String address, int port) { + var client = QuarkusRestClientBuilder.newBuilder() + .baseUri(URI.create("http://" + address + ":" + port)) + .connectTimeout(5, TimeUnit.SECONDS) + .readTimeout(5, TimeUnit.SECONDS) + .build(PeerSyncApiClient.class); + return client.getSelfInfo(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/structs/JKleppmannTreeNodeMetaPeer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/structs/JKleppmannTreeNodeMetaPeer.java new file mode 100644 index 00000000..a9ea1800 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/structs/JKleppmannTreeNodeMetaPeer.java @@ -0,0 +1,41 @@ +package com.usatiuk.dhfs.objects.repository.peersync.structs; + +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; + +import java.util.Objects; + +//@ProtoMirror(JKleppmannTreeNodeMetaFileP.class) +public class JKleppmannTreeNodeMetaPeer extends JKleppmannTreeNodeMeta { + private final JObjectKey _peerId; + + public JKleppmannTreeNodeMetaPeer(PeerId id) { + super(id.toString()); + _peerId = id.toJObjectKey(); + } + + public JObjectKey getPeerId() { + return _peerId; + } + + @Override + public JKleppmannTreeNodeMeta withName(String name) { + assert false; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + JKleppmannTreeNodeMetaPeer that = (JKleppmannTreeNodeMetaPeer) o; + return Objects.equals(_peerId, that._peerId); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), _peerId); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerRolesAugmentor.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerRolesAugmentor.java new file mode 100644 index 00000000..d2e1c4bf --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerRolesAugmentor.java @@ -0,0 +1,51 @@ +package com.usatiuk.dhfs.objects.repository.peertrust; + +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfoService; +import io.quarkus.logging.Log; +import io.quarkus.security.credential.CertificateCredential; +import io.quarkus.security.identity.AuthenticationRequestContext; +import io.quarkus.security.identity.SecurityIdentity; +import io.quarkus.security.identity.SecurityIdentityAugmentor; +import io.quarkus.security.runtime.QuarkusSecurityIdentity; +import io.smallrye.mutiny.Uni; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +import java.util.function.Supplier; + +@ApplicationScoped +public class PeerRolesAugmentor implements SecurityIdentityAugmentor { + @Inject + PeerInfoService peerInfoService; + + @Override + public Uni augment(SecurityIdentity identity, AuthenticationRequestContext context) { + return Uni.createFrom().item(build(identity)); + } + + private Supplier build(SecurityIdentity identity) { + if (identity.isAnonymous()) { + return () -> identity; + } else { + QuarkusSecurityIdentity.Builder builder = QuarkusSecurityIdentity.builder(identity); + + var uuid = identity.getPrincipal().getName().substring(3); + + try { + var entry = peerInfoService.getPeerInfo(PeerId.of(uuid)); + + if (!entry.get().parsedCert().equals(identity.getCredential(CertificateCredential.class).getCertificate())) { + Log.error("Certificate mismatch for " + uuid); + return () -> identity; + } + + builder.addRole("cluster-member"); + return builder::build; + } catch (Exception e) { + Log.error("Error when checking certificate for " + uuid, e); + return () -> identity; + } + } + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustManager.java new file mode 100644 index 00000000..26573abb --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustManager.java @@ -0,0 +1,69 @@ +package com.usatiuk.dhfs.objects.repository.peertrust; + +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfo; +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; +import org.apache.commons.lang3.tuple.Pair; + +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; +import javax.net.ssl.X509TrustManager; +import java.security.KeyStore; +import java.security.NoSuchAlgorithmException; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.util.Collection; +import java.util.concurrent.atomic.AtomicReference; + +@ApplicationScoped +public class PeerTrustManager implements X509TrustManager { + private final AtomicReference trustManager = new AtomicReference<>(); + + @Override + public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException { + trustManager.get().checkClientTrusted(chain, authType); + } + + @Override + public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException { + trustManager.get().checkServerTrusted(chain, authType); + } + + @Override + public X509Certificate[] getAcceptedIssuers() { + return trustManager.get().getAcceptedIssuers(); + } + + public synchronized void reloadTrustManagerHosts(Collection hosts) { + try { + Log.info("Trying to reload trust manager: " + hosts.size() + " known hosts"); + reloadTrustManager(hosts.stream().map(hostInfo -> + Pair.of(hostInfo.id().toString(), hostInfo.parsedCert())).toList()); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private synchronized void reloadTrustManager(Collection> certs) throws Exception { + KeyStore ts = KeyStore.getInstance(KeyStore.getDefaultType()); + ts.load(null, null); + + for (var cert : certs) { + ts.setCertificateEntry(cert.getLeft(), cert.getRight()); + } + + TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(ts); + + TrustManager[] tms = tmf.getTrustManagers(); + for (var tm : tms) { + if (tm instanceof X509TrustManager) { + trustManager.set((X509TrustManager) tm); + return; + } + } + + throw new NoSuchAlgorithmException("No X509TrustManager in TrustManagerFactory"); + } + +} \ No newline at end of file diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustServerCustomizer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustServerCustomizer.java new file mode 100644 index 00000000..167465f6 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustServerCustomizer.java @@ -0,0 +1,44 @@ +package com.usatiuk.dhfs.objects.repository.peertrust; + + +import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import io.quarkus.vertx.http.HttpServerOptionsCustomizer; +import io.vertx.core.http.HttpServerOptions; +import io.vertx.core.net.KeyCertOptions; +import io.vertx.core.net.TrustOptions; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +import javax.net.ssl.KeyManagerFactory; +import java.security.KeyStore; +import java.security.cert.Certificate; + +@ApplicationScoped +public class PeerTrustServerCustomizer implements HttpServerOptionsCustomizer { + + @Inject + PeerTrustManager peerTrustManager; + + @Inject + PersistentPeerDataService persistentPeerDataService; + + @Override + public void customizeHttpsServer(HttpServerOptions options) { + try { + KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType()); + ks.load(null, null); + + ks.setKeyEntry("sslkey", + persistentPeerDataService.getSelfKeypair().getPrivate(), null, + new Certificate[]{persistentPeerDataService.getSelfCertificate()}); + + KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + keyManagerFactory.init(ks, null); + + options.setKeyCertOptions(KeyCertOptions.wrap(keyManagerFactory)); + options.setTrustOptions(TrustOptions.wrap(peerTrustManager)); + } catch (Exception e) { + throw new RuntimeException("Error configuring https: ", e); + } + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/AvailablePeerInfo.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/AvailablePeerInfo.java new file mode 100644 index 00000000..0dbf2687 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/AvailablePeerInfo.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.objects.repository.webapi; + +public record AvailablePeerInfo(String uuid) { +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerDelete.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerDelete.java new file mode 100644 index 00000000..2d646474 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerDelete.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.objects.repository.webapi; + +public record KnownPeerDelete(String uuid) { +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerInfo.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerInfo.java new file mode 100644 index 00000000..5fbd9eb7 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerInfo.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.objects.repository.webapi; + +public record KnownPeerInfo(String uuid) { +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerPut.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerPut.java new file mode 100644 index 00000000..f1e109f8 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerPut.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.objects.repository.webapi; + +public record KnownPeerPut(String uuid) { +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java new file mode 100644 index 00000000..344ef33f --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java @@ -0,0 +1,45 @@ +package com.usatiuk.dhfs.objects.repository.webapi; + +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.repository.PeerManager; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfoService; +import jakarta.inject.Inject; +import jakarta.ws.rs.DELETE; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.PUT; +import jakarta.ws.rs.Path; + +import java.util.Collection; +import java.util.List; + +@Path("/objects-manage") +public class ManagementApi { + @Inject + PeerInfoService peerInfoService; + @Inject + PeerManager peerManager; + + @Path("known-peers") + @GET + public List knownPeers() { + return peerInfoService.getPeers().stream().map(peerInfo -> new KnownPeerInfo(peerInfo.id().toString())).toList(); + } + + @Path("known-peers") + @PUT + public void addPeer(KnownPeerPut knownPeerPut) { + peerManager.addRemoteHost(PeerId.of(knownPeerPut.uuid())); + } + + @Path("known-peers") + @DELETE + public void deletePeer(KnownPeerDelete knownPeerDelete) { +// peerManager.removeRemoteHost(PeerId.of(knownPeerPut.uuid())); + } + + @Path("available-peers") + @GET + public Collection availablePeers() { + return peerManager.getSeenButNotAddedHosts(); + } +} diff --git a/dhfs-parent/server/src/main/resources/application.properties b/dhfs-parent/server/src/main/resources/application.properties index aacd8c29..bbf3bab4 100644 --- a/dhfs-parent/server/src/main/resources/application.properties +++ b/dhfs-parent/server/src/main/resources/application.properties @@ -1,6 +1,7 @@ quarkus.grpc.server.use-separate-server=false dhfs.objects.peerdiscovery.port=42069 -dhfs.objects.peerdiscovery.interval=5000 +dhfs.objects.peerdiscovery.interval=5s +dhfs.objects.peerdiscovery.broadcast=true dhfs.objects.sync.timeout=30 dhfs.objects.sync.ping.timeout=5 dhfs.objects.invalidation.threads=4 @@ -31,6 +32,7 @@ dhfs.objects.ref-processor.threads=4 dhfs.objects.opsender.batch-size=100 dhfs.objects.lock_timeout_secs=2 dhfs.local-discovery=true +dhfs.peerdiscovery.timeout=5000 quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE quarkus.log.category."com.usatiuk.dhfs".level=TRACE quarkus.http.insecure-requests=enabled diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/TempDataProfile.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/TempDataProfile.java index 8b075b57..e5a9f59b 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/TempDataProfile.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/TempDataProfile.java @@ -22,7 +22,6 @@ abstract public class TempDataProfile implements QuarkusTestProfile { } var ret = new HashMap(); ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString()); - ret.put("dhfs.objects.root", tempDirWithPrefix.resolve("dhfs_root_d_test").toString()); ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString()); getConfigOverrides(ret); return ret; diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/TestDataCleaner.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/TestDataCleaner.java index 2a6979a6..b3659d01 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/TestDataCleaner.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/TestDataCleaner.java @@ -17,13 +17,10 @@ import java.util.Objects; public class TestDataCleaner { @ConfigProperty(name = "dhfs.objects.persistence.files.root") String tempDirectory; - @ConfigProperty(name = "dhfs.objects.root") - String tempDirectoryIdx; void init(@Observes @Priority(1) StartupEvent event) throws IOException { try { purgeDirectory(Path.of(tempDirectory).toFile()); - purgeDirectory(Path.of(tempDirectoryIdx).toFile()); } catch (Exception ignored) { Log.warn("Couldn't cleanup test data on init"); } @@ -31,7 +28,6 @@ public class TestDataCleaner { void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException { purgeDirectory(Path.of(tempDirectory).toFile()); - purgeDirectory(Path.of(tempDirectoryIdx).toFile()); } void purgeDirectory(File dir) { diff --git a/webui/src/api/dto.ts b/webui/src/api/dto.ts index ac35aa0a..dd90c71a 100644 --- a/webui/src/api/dto.ts +++ b/webui/src/api/dto.ts @@ -39,8 +39,8 @@ export type TTokenToResp = z.infer; // AvailablePeerInfo export const AvailablePeerInfoTo = z.object({ uuid: z.string(), - addr: z.string(), - port: z.number(), + // addr: z.string(), + // port: z.number(), }); export type TAvailablePeerInfoTo = z.infer; From 7284fe91e5412ab7fead8cc433039d43239ef54a Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Mon, 3 Feb 2025 21:21:14 +0100 Subject: [PATCH 052/105] fix tx hooks not quite working --- .../usatiuk/dhfs/objects/JObjectManager.java | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index c3c2df63..57b6fd39 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -30,6 +30,7 @@ public class JObjectManager { WritebackObjectPersistentStore writebackObjectPersistentStore; @Inject TransactionFactory transactionFactory; + JObjectManager(Instance preCommitTxHooks) { _preCommitTxHooks = preCommitTxHooks.stream().sorted(Comparator.comparingInt(PreCommitTxHook::getPriority)).toList(); } @@ -131,20 +132,21 @@ public class JObjectManager { boolean somethingChanged; do { somethingChanged = false; + Map> currentIteration = new HashMap(); for (var hook : _preCommitTxHooks) { - drained = tx.drainNewWrites(); - Log.trace("Commit iteration with " + drained.size() + " records for hook " + hook.getClass()); + for (var n : tx.drainNewWrites()) + currentIteration.put(n.key(), n); + Log.trace("Commit iteration with " + currentIteration.size() + " records for hook " + hook.getClass()); - drained.stream() - .map(TxRecord.TxObjectRecord::key) + currentIteration.keySet().stream() .sorted(Comparator.comparing(JObjectKey::toString)) .forEach(addDependency); - for (var entry : drained) { + for (var entry : currentIteration.entrySet()) { somethingChanged = true; Log.trace("Running pre-commit hook " + hook.getClass() + " for" + entry.toString()); - var oldObj = getCurrent.apply(entry.key()); - switch (entry) { + var oldObj = getCurrent.apply(entry.getKey()); + switch (entry.getValue()) { case TxRecord.TxObjectRecordWrite write -> { if (oldObj == null) { hook.onCreate(write.key(), write.data()); @@ -157,9 +159,9 @@ public class JObjectManager { } default -> throw new TxCommitException("Unexpected value: " + entry); } - current.put(entry.key(), entry); } } + current.putAll(currentIteration); } while (somethingChanged); } reads = tx.reads(); From 6c93504b2cdb5aa19c4b5b752a36917a00f2b8fa Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Mon, 3 Feb 2025 21:27:19 +0100 Subject: [PATCH 053/105] autoprotomap: record support --- .../deployment/ProtoSerializerGenerator.java | 9 +++++++-- .../autoprotomap/it/InterfaceObject.java | 8 ++++++++ .../usatiuk/autoprotomap/it/RecordObject.java | 7 +++++++ .../autoprotomap/it/RecordObject2.java | 7 +++++++ .../src/main/proto/autoprotomap_test.proto | 16 ++++++++++++++++ .../it/AutoprotomapResourceTest.java | 19 ++++++++++++++++++- 6 files changed, 63 insertions(+), 3 deletions(-) create mode 100644 dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/InterfaceObject.java create mode 100644 dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/RecordObject.java create mode 100644 dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/RecordObject2.java diff --git a/dhfs-parent/autoprotomap/deployment/src/main/java/com/usatiuk/autoprotomap/deployment/ProtoSerializerGenerator.java b/dhfs-parent/autoprotomap/deployment/src/main/java/com/usatiuk/autoprotomap/deployment/ProtoSerializerGenerator.java index 386f79f1..6ed94f3a 100644 --- a/dhfs-parent/autoprotomap/deployment/src/main/java/com/usatiuk/autoprotomap/deployment/ProtoSerializerGenerator.java +++ b/dhfs-parent/autoprotomap/deployment/src/main/java/com/usatiuk/autoprotomap/deployment/ProtoSerializerGenerator.java @@ -14,6 +14,7 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.Objects; import java.util.function.Consumer; +import java.util.function.Function; import java.util.function.IntConsumer; import java.util.function.Supplier; import java.util.stream.Collectors; @@ -61,7 +62,7 @@ public class ProtoSerializerGenerator { visitor.accept(cur); var next = cur.superClassType().name(); - if (next.equals(DotName.OBJECT_NAME)) break; + if (next.equals(DotName.OBJECT_NAME) || next.equals(DotName.RECORD_NAME)) break; cur = index.getClassByName(next); } } @@ -82,6 +83,10 @@ public class ProtoSerializerGenerator { var objectClass = index.getClassByName(objectType.name().toString()); + Function getterGetter = objectClass.isRecord() + ? Function.identity() + : s -> "get" + capitalize(stripPrefix(s, FIELD_PREFIX)); + for (var f : findAllFields(index, objectClass)) { var consideredFieldName = stripPrefix(f.name(), FIELD_PREFIX); @@ -89,7 +94,7 @@ public class ProtoSerializerGenerator { if ((f.flags() & Opcodes.ACC_PUBLIC) != 0) return bytecodeCreator.readInstanceField(f, object); else { - var fieldGetter = "get" + capitalize(stripPrefix(f.name(), FIELD_PREFIX)); + var fieldGetter = getterGetter.apply(f.name()); return bytecodeCreator.invokeVirtualMethod( MethodDescriptor.ofMethod(objectType.toString(), fieldGetter, f.type().name().toString()), object); } diff --git a/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/InterfaceObject.java b/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/InterfaceObject.java new file mode 100644 index 00000000..7b06b316 --- /dev/null +++ b/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/InterfaceObject.java @@ -0,0 +1,8 @@ +package com.usatiuk.autoprotomap.it; + +import com.usatiuk.autoprotomap.runtime.ProtoMirror; + +@ProtoMirror(InterfaceObjectProto.class) +public interface InterfaceObject { + String key(); +} diff --git a/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/RecordObject.java b/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/RecordObject.java new file mode 100644 index 00000000..b314ca9a --- /dev/null +++ b/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/RecordObject.java @@ -0,0 +1,7 @@ +package com.usatiuk.autoprotomap.it; + +import com.usatiuk.autoprotomap.runtime.ProtoMirror; + +@ProtoMirror(RecordObjectProto.class) +public record RecordObject(String key) implements InterfaceObject { +} diff --git a/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/RecordObject2.java b/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/RecordObject2.java new file mode 100644 index 00000000..4c66dfc3 --- /dev/null +++ b/dhfs-parent/autoprotomap/integration-tests/src/main/java/com/usatiuk/autoprotomap/it/RecordObject2.java @@ -0,0 +1,7 @@ +package com.usatiuk.autoprotomap.it; + +import com.usatiuk.autoprotomap.runtime.ProtoMirror; + +@ProtoMirror(RecordObject2Proto.class) +public record RecordObject2(String key, int value) implements InterfaceObject { +} diff --git a/dhfs-parent/autoprotomap/integration-tests/src/main/proto/autoprotomap_test.proto b/dhfs-parent/autoprotomap/integration-tests/src/main/proto/autoprotomap_test.proto index f606b3b4..c60bcec7 100644 --- a/dhfs-parent/autoprotomap/integration-tests/src/main/proto/autoprotomap_test.proto +++ b/dhfs-parent/autoprotomap/integration-tests/src/main/proto/autoprotomap_test.proto @@ -28,4 +28,20 @@ message AbstractProto { SimpleObjectProto simpleObject = 2; CustomObjectProto customObject = 3; } +} + +message RecordObjectProto { + string key = 1; +} + +message RecordObject2Proto { + string key = 1; + int32 value = 2; +} + +message InterfaceObjectProto { + oneof obj { + RecordObjectProto recordObject = 1; + RecordObject2Proto recordObject2 = 2; + } } \ No newline at end of file diff --git a/dhfs-parent/autoprotomap/integration-tests/src/test/java/com/usatiuk/autoprotomap/it/AutoprotomapResourceTest.java b/dhfs-parent/autoprotomap/integration-tests/src/test/java/com/usatiuk/autoprotomap/it/AutoprotomapResourceTest.java index 2d02ffd3..36f63bf6 100644 --- a/dhfs-parent/autoprotomap/integration-tests/src/test/java/com/usatiuk/autoprotomap/it/AutoprotomapResourceTest.java +++ b/dhfs-parent/autoprotomap/integration-tests/src/test/java/com/usatiuk/autoprotomap/it/AutoprotomapResourceTest.java @@ -16,6 +16,8 @@ public class AutoprotomapResourceTest { ProtoSerializer nestedProtoSerializer; @Inject ProtoSerializer abstractProtoSerializer; + @Inject + ProtoSerializer interfaceProtoSerializer; @Test public void testSimple() { @@ -74,7 +76,7 @@ public class AutoprotomapResourceTest { } @Test - public void tesAbstractNested() { + public void testAbstractNested() { var ret = abstractProtoSerializer.serialize( new NestedObject( new SimpleObject(333, "nested so", ByteString.copyFrom(new byte[]{1, 2, 3})), @@ -93,4 +95,19 @@ public class AutoprotomapResourceTest { Assertions.assertEquals("nested obj", des.get_nestedName()); Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), des.get_nestedSomeBytes()); } + + @Test + public void testInterface() { + var ret = interfaceProtoSerializer.serialize(new RecordObject("record test")); + Assertions.assertEquals("record test", ret.getRecordObject().getKey()); + var des = (RecordObject) interfaceProtoSerializer.deserialize(ret); + Assertions.assertEquals("record test", des.key()); + + var ret2 = interfaceProtoSerializer.serialize(new RecordObject2("record test 2", 1234)); + Assertions.assertEquals("record test 2", ret2.getRecordObject2().getKey()); + Assertions.assertEquals(1234, ret2.getRecordObject2().getValue()); + var des2 = (RecordObject2) interfaceProtoSerializer.deserialize(ret2); + Assertions.assertEquals("record test 2", des2.key()); + Assertions.assertEquals(1234, des2.value()); + } } From 7c06241876f2ebed8ccfb0f3df9b4644ef5daa2e Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Mon, 3 Feb 2025 22:02:05 +0100 Subject: [PATCH 054/105] dump some server changes --- .../usatiuk/dhfs/files/objects/ChunkData.java | 24 +- .../files/objects/ChunkDataSerializer.java | 22 ++ .../com/usatiuk/dhfs/files/objects/File.java | 34 +- .../dhfs/files/objects/FileSerializer.java | 44 +++ .../usatiuk/dhfs/files/objects/FsNode.java | 4 +- .../files/service/DhfsFileServiceImpl.java | 62 ++-- .../com/usatiuk/dhfs/objects/JDataRemote.java | 19 +- .../java/com/usatiuk/dhfs/objects/PeerId.java | 7 +- .../usatiuk/dhfs/objects/ReceivedObject.java | 6 + .../dhfs/objects/RefcounterTxHook.java | 26 +- .../dhfs/objects/RemoteObjPusherTxHook.java | 49 +++ .../usatiuk/dhfs/objects/RemoteObject.java | 80 ++--- .../dhfs/objects/RemoteObjectMeta.java | 53 ++++ .../dhfs/objects/RemoteTransaction.java | 91 +++++- .../jkleppmanntree/JKleppmannTreeManager.java | 203 ++++++------ .../JKleppmannTreeOpWrapper.java | 18 +- .../JKleppmannTreePeerInterface.java | 20 +- .../JKleppmannTreePeriodicPushOp.java | 8 +- .../structs/JKleppmannTreeNode.java | 8 +- .../structs/JKleppmannTreePersistentData.java | 37 +-- .../dhfs/objects/repository/PeerManager.java | 33 +- .../repository/ReceivedObjectSerializer.java | 46 +++ .../repository/RemoteObjectServiceClient.java | 167 +++++----- .../repository/RemoteObjectServiceServer.java | 174 +++++------ .../objects/repository/RpcClientFactory.java | 4 +- .../dhfs/objects/repository/SyncHandler.java | 290 ++++++++++-------- .../DeferredInvalidationQueueData.java | 17 + .../DeferredInvalidationQueueService.java | 85 +++++ .../invalidation/IndexUpdateOp.java | 12 + .../invalidation/IndexUpdateOpSerializer.java | 36 +++ .../InvalidationQueueService.java | 190 ++++++++++++ .../JKleppmannTreeOpPTempSerializer.java | 22 ++ .../objects/repository/invalidation/Op.java | 8 + .../repository/invalidation/OpHandler.java | 27 ++ .../repository/invalidation/OpPusher.java | 52 ++++ .../invalidation/PushOpHandler.java | 25 ++ .../objects/repository/peersync/PeerInfo.java | 21 +- .../peersync/PeerInfoSerializer.java | 24 ++ .../repository/peersync/PeerInfoService.java | 7 +- .../src/main/proto/dhfs_objects_serial.proto | 36 ++- .../src/main/proto/dhfs_objects_sync.proto | 39 +-- .../src/main/resources/application.properties | 2 +- .../files/DhfsFileServiceSimpleTestImpl.java | 7 +- 43 files changed, 1484 insertions(+), 655 deletions(-) create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/ReceivedObject.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjPusherTxHook.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectMeta.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/ReceivedObjectSerializer.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOp.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOpSerializer.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/JKleppmannTreeOpPTempSerializer.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/Op.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpHandler.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/PushOpHandler.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoSerializer.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java index 99811ff6..0f1033c7 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java @@ -1,27 +1,13 @@ package com.usatiuk.dhfs.files.objects; import com.google.protobuf.ByteString; -import com.usatiuk.dhfs.objects.JDataRefcounted; +import com.usatiuk.autoprotomap.runtime.ProtoMirror; +import com.usatiuk.dhfs.objects.JDataRemote; import com.usatiuk.dhfs.objects.JObjectKey; -import org.pcollections.PCollection; -import org.pcollections.TreePSet; - -public record ChunkData(JObjectKey key, PCollection refsFrom, boolean frozen, - ByteString data) implements JDataRefcounted { - public ChunkData(JObjectKey key, ByteString data) { - this(key, TreePSet.empty(), false, data); - } - - @Override - public ChunkData withRefsFrom(PCollection refs) { - return new ChunkData(key, refs, frozen, data); - } - - @Override - public ChunkData withFrozen(boolean frozen) { - return new ChunkData(key, refsFrom, frozen, data); - } +import com.usatiuk.dhfs.objects.persistence.ChunkDataP; +//@ProtoMirror(ChunkDataP.class) +public record ChunkData(JObjectKey key, ByteString data) implements JDataRemote { @Override public int estimateSize() { return data.size(); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java new file mode 100644 index 00000000..f23a8da0 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java @@ -0,0 +1,22 @@ +package com.usatiuk.dhfs.files.objects; + +import com.usatiuk.autoprotomap.runtime.ProtoSerializer; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.persistence.ChunkDataP; +import jakarta.enterprise.context.ApplicationScoped; + +@ApplicationScoped +public class ChunkDataSerializer implements ProtoSerializer { + @Override + public ChunkData deserialize(ChunkDataP message) { + return new ChunkData(JObjectKey.of(message.getName()), message.getData()); + } + + @Override + public ChunkDataP serialize(ChunkData object) { + return ChunkDataP.newBuilder() + .setName(object.key().toString()) + .setData(object.data()) + .build(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java index d6012ef9..a1878128 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java @@ -1,52 +1,48 @@ package com.usatiuk.dhfs.files.objects; +import com.usatiuk.autoprotomap.runtime.ProtoMirror; import com.usatiuk.dhfs.objects.JObjectKey; -import org.pcollections.PCollection; +import com.usatiuk.dhfs.objects.persistence.ChunkDataP; import org.pcollections.TreePMap; import java.util.Collection; import java.util.Set; -public record File(JObjectKey key, PCollection refsFrom, boolean frozen, - long mode, long cTime, long mTime, +//@ProtoMirror(ChunkDataP.class) +public record File(JObjectKey key, long mode, long cTime, long mTime, TreePMap chunks, boolean symlink, long size ) implements FsNode { - @Override - public File withRefsFrom(PCollection refs) { - return new File(key, refs, frozen, mode, cTime, mTime, chunks, symlink, size); - } - - @Override - public File withFrozen(boolean frozen) { - return new File(key, refsFrom, frozen, mode, cTime, mTime, chunks, symlink, size); - } - public File withChunks(TreePMap chunks) { - return new File(key, refsFrom, frozen, mode, cTime, mTime, chunks, symlink, size); + return new File(key, mode, cTime, mTime, chunks, symlink, size); } public File withSymlink(boolean symlink) { - return new File(key, refsFrom, frozen, mode, cTime, mTime, chunks, symlink, size); + return new File(key, mode, cTime, mTime, chunks, symlink, size); } public File withSize(long size) { - return new File(key, refsFrom, frozen, mode, cTime, mTime, chunks, symlink, size); + return new File(key, mode, cTime, mTime, chunks, symlink, size); } public File withMode(long mode) { - return new File(key, refsFrom, frozen, mode, cTime, mTime, chunks, symlink, size); + return new File(key, mode, cTime, mTime, chunks, symlink, size); } public File withCTime(long cTime) { - return new File(key, refsFrom, frozen, mode, cTime, mTime, chunks, symlink, size); + return new File(key, mode, cTime, mTime, chunks, symlink, size); } public File withMTime(long mTime) { - return new File(key, refsFrom, frozen, mode, cTime, mTime, chunks, symlink, size); + return new File(key, mode, cTime, mTime, chunks, symlink, size); } @Override public Collection collectRefsTo() { return Set.copyOf(chunks().values()); } + + @Override + public int estimateSize() { + return chunks.size() * 64; + } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java new file mode 100644 index 00000000..d5550a7c --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java @@ -0,0 +1,44 @@ +package com.usatiuk.dhfs.files.objects; + +import com.usatiuk.autoprotomap.runtime.ProtoSerializer; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.persistence.FileP; +import jakarta.enterprise.context.ApplicationScoped; +import org.pcollections.TreePMap; + +@ApplicationScoped +public class FileSerializer implements ProtoSerializer { + @Override + public File deserialize(FileP message) { + TreePMap chunks = TreePMap.empty(); + for (var chunk : message.getChunksList()) { + chunks = chunks.plus(chunk.getStart(), JObjectKey.of(chunk.getId())); + } + var ret = new File(JObjectKey.of(message.getUuid()), + message.getMode(), + message.getCtime(), + message.getMtime(), + chunks, + message.getSymlink(), + message.getSize() + ); + return ret; + } + + @Override + public FileP serialize(File object) { + var builder = FileP.newBuilder() + .setUuid(object.key().toString()) + .setMode(object.mode()) + .setCtime(object.cTime()) + .setMtime(object.mTime()) + .setSymlink(object.symlink()) + .setSize(object.size()); + object.chunks().forEach((s, i) -> { + builder.addChunksBuilder() + .setStart(s) + .setId(i.toString()); + }); + return builder.build(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java index 09b76015..a359d2b7 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java @@ -1,8 +1,8 @@ package com.usatiuk.dhfs.files.objects; -import com.usatiuk.dhfs.objects.JDataRefcounted; +import com.usatiuk.dhfs.objects.JDataRemote; -public interface FsNode extends JDataRefcounted { +public interface FsNode extends JDataRemote { long mode(); long cTime(); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java index e1056659..53963c00 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java @@ -4,9 +4,7 @@ import com.google.protobuf.ByteString; import com.google.protobuf.UnsafeByteOperations; import com.usatiuk.dhfs.files.objects.ChunkData; import com.usatiuk.dhfs.files.objects.File; -import com.usatiuk.dhfs.objects.JData; -import com.usatiuk.dhfs.objects.JObjectKey; -import com.usatiuk.dhfs.objects.TransactionManager; +import com.usatiuk.dhfs.objects.*; import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeManager; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; @@ -26,7 +24,6 @@ import jakarta.inject.Inject; import org.apache.commons.lang3.tuple.Pair; import org.eclipse.microprofile.config.inject.ConfigProperty; import org.pcollections.TreePMap; -import org.pcollections.TreePSet; import java.nio.charset.StandardCharsets; import java.nio.file.Path; @@ -38,6 +35,8 @@ public class DhfsFileServiceImpl implements DhfsFileService { @Inject Transaction curTx; @Inject + RemoteTransaction remoteTx; + @Inject TransactionManager jObjectTxManager; @ConfigProperty(name = "dhfs.files.target_chunk_size") @@ -76,7 +75,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { private ChunkData createChunk(ByteString bytes) { var newChunk = new ChunkData(JObjectKey.of(UUID.randomUUID().toString()), bytes); - curTx.put(newChunk); + remoteTx.put(newChunk); return newChunk; } @@ -105,8 +104,13 @@ public class DhfsFileServiceImpl implements DhfsFileService { var ref = curTx.get(JData.class, uuid).orElse(null); if (ref == null) return Optional.empty(); GetattrRes ret; - if (ref instanceof File f) { - ret = new GetattrRes(f.mTime(), f.cTime(), f.mode(), f.symlink() ? GetattrType.SYMLINK : GetattrType.FILE); + if (ref instanceof RemoteObject r) { + var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null); + if (remote instanceof File f) { + ret = new GetattrRes(f.mTime(), f.cTime(), f.mode(), f.symlink() ? GetattrType.SYMLINK : GetattrType.FILE); + } else { + throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key())); + } } else if (ref instanceof JKleppmannTreeNode) { ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY); } else { @@ -152,8 +156,8 @@ public class DhfsFileServiceImpl implements DhfsFileService { var fuuid = UUID.randomUUID(); Log.debug("Creating file " + fuuid); - File f = new File(JObjectKey.of(fuuid.toString()), TreePSet.empty(), false, mode, System.currentTimeMillis(), System.currentTimeMillis(), TreePMap.empty(), false, 0); - curTx.put(f); + File f = new File(JObjectKey.of(fuuid.toString()), mode, System.currentTimeMillis(), System.currentTimeMillis(), TreePMap.empty(), false, 0); + remoteTx.put(f); try { getTree().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTree().getNewNodeId()); @@ -226,9 +230,14 @@ public class DhfsFileServiceImpl implements DhfsFileService { if (dent instanceof JKleppmannTreeNode) { return true; - } else if (dent instanceof File f) { - curTx.put(f.withMode(mode).withMTime(System.currentTimeMillis())); - return true; + } else if (dent instanceof RemoteObject) { + var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null); + if (remote instanceof File f) { + remoteTx.put(f.withMode(mode).withMTime(System.currentTimeMillis())); + return true; + } else { + throw new IllegalArgumentException(uuid + " is not a file"); + } } else { throw new IllegalArgumentException(uuid + " is not a file"); } @@ -255,7 +264,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { if (offset < 0) throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset)); - var file = curTx.get(File.class, fileUuid).orElse(null); + var file = remoteTx.getData(File.class, fileUuid).orElse(null); if (file == null) { Log.error("File not found when trying to read: " + fileUuid); return Optional.empty(); @@ -315,7 +324,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { } private ByteString readChunk(JObjectKey uuid) { - var chunkRead = curTx.get(ChunkData.class, uuid).orElse(null); + var chunkRead = remoteTx.getData(ChunkData.class, uuid).orElse(null); if (chunkRead == null) { Log.error("Chunk requested not found: " + uuid); @@ -354,7 +363,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset)); // FIXME: - var file = curTx.get(File.class, fileUuid, LockingStrategy.WRITE).orElse(null); + var file = remoteTx.getData(File.class, fileUuid, LockingStrategy.WRITE).orElse(null); if (file == null) { Log.error("File not found when trying to write: " + fileUuid); return -1L; @@ -367,7 +376,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { if (size(fileUuid) < offset) { truncate(fileUuid, offset); - file = curTx.get(File.class, fileUuid).orElse(null); + file = remoteTx.getData(File.class, fileUuid).orElse(null); } var chunksAll = file.chunks(); @@ -493,7 +502,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { } file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis()); - curTx.put(file); + remoteTx.put(file); cleanupChunks(file, removedChunks.values()); updateFileSize(file); @@ -507,7 +516,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { if (length < 0) throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length)); - var file = curTx.get(File.class, fileUuid).orElse(null); + var file = remoteTx.getData(File.class, fileUuid).orElse(null); if (file == null) { Log.error("File not found when trying to write: " + fileUuid); return false; @@ -517,7 +526,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { var oldChunks = file.chunks(); file = file.withChunks(TreePMap.empty()).withMTime(System.currentTimeMillis()); - curTx.put(file); + remoteTx.put(file); cleanupChunks(file, oldChunks.values()); updateFileSize(file); return true; @@ -578,7 +587,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { } file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis()); - curTx.put(file); + remoteTx.put(file); cleanupChunks(file, removedChunks.values()); updateFileSize(file); return true; @@ -595,7 +604,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { @Override public ByteString readlinkBS(JObjectKey uuid) { return jObjectTxManager.executeTx(() -> { - var fileOpt = curTx.get(File.class, uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid))); + var fileOpt = remoteTx.getData(File.class, uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid))); return read(uuid, 0, Math.toIntExact(size(uuid))).get(); }); } @@ -614,8 +623,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { Log.debug("Creating file " + fuuid); ChunkData newChunkData = createChunk(UnsafeByteOperations.unsafeWrap(oldpath.getBytes(StandardCharsets.UTF_8))); - File f = new File(JObjectKey.of(fuuid.toString()), TreePSet.empty(), - false, 0, System.currentTimeMillis(), System.currentTimeMillis(), TreePMap.empty().plus(0L, newChunkData.key()), true, 0); + File f = new File(JObjectKey.of(fuuid.toString()), 0, System.currentTimeMillis(), System.currentTimeMillis(), TreePMap.empty().plus(0L, newChunkData.key()), true, 0); updateFileSize(f); @@ -627,12 +635,12 @@ public class DhfsFileServiceImpl implements DhfsFileService { @Override public Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs) { return jObjectTxManager.executeTx(() -> { - var file = curTx.get(File.class, fileUuid).orElseThrow( + var file = remoteTx.getData(File.class, fileUuid).orElseThrow( () -> new StatusRuntimeException(Status.NOT_FOUND.withDescription( "File not found for setTimes: " + fileUuid)) ); - curTx.put(file.withCTime(atimeMs).withMTime(mtimeMs)); + remoteTx.put(file.withCTime(atimeMs).withMTime(mtimeMs)); return true; }); } @@ -649,7 +657,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { } if (realSize != file.size()) { - curTx.put(file.withSize(realSize)); + remoteTx.put(file.withSize(realSize)); } }); } @@ -657,7 +665,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { @Override public Long size(JObjectKey uuid) { return jObjectTxManager.executeTx(() -> { - var read = curTx.get(File.class, uuid) + var read = remoteTx.getData(File.class, uuid) .orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND)); return read.size(); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRemote.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRemote.java index 4386f03e..531fe8ad 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRemote.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRemote.java @@ -1,4 +1,21 @@ package com.usatiuk.dhfs.objects; -public interface JDataRemote { +import com.usatiuk.autoprotomap.runtime.ProtoMirror; +import com.usatiuk.dhfs.objects.persistence.RemoteObjectP; + +import java.io.Serializable; +import java.util.Collection; +import java.util.List; + +@ProtoMirror(RemoteObjectP.class) +public interface JDataRemote extends Serializable { + JObjectKey key(); + + default int estimateSize() { + return 100; + } + + default Collection collectRefsTo() { + return List.of(); + } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/PeerId.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/PeerId.java index 5c34de0e..a85ae068 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/PeerId.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/PeerId.java @@ -3,7 +3,7 @@ package com.usatiuk.dhfs.objects; import java.io.Serializable; import java.util.UUID; -public record PeerId(UUID id) implements Serializable { +public record PeerId(UUID id) implements Serializable, Comparable { public static PeerId of(UUID id) { return new PeerId(id); } @@ -20,4 +20,9 @@ public record PeerId(UUID id) implements Serializable { public JObjectKey toJObjectKey() { return JObjectKey.of(id.toString()); } + + @Override + public int compareTo(PeerId o) { + return id.compareTo(o.id); + } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/ReceivedObject.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/ReceivedObject.java new file mode 100644 index 00000000..30e92654 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/ReceivedObject.java @@ -0,0 +1,6 @@ +package com.usatiuk.dhfs.objects; + +import org.pcollections.PMap; + +public record ReceivedObject(JObjectKey key, PMap changelog, JDataRemote data) { +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java index e4f945c7..e239b8f2 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java @@ -1,5 +1,8 @@ package com.usatiuk.dhfs.objects; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreePersistentData; import com.usatiuk.dhfs.objects.transaction.Transaction; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; @@ -9,6 +12,21 @@ public class RefcounterTxHook implements PreCommitTxHook { @Inject Transaction curTx; + private JDataRefcounted getRef(JDataRefcounted cur, JObjectKey key) { + var found = curTx.get(JDataRefcounted.class, key).orElse(null); + + if (found != null) { + return found; + } + + if (cur instanceof RemoteObject || cur instanceof JKleppmannTreeNode) { + return new RemoteObject<>(key); + } else { + return found; + } + + } + @Override public void onChange(JObjectKey key, JData old, JData cur) { if (!(cur instanceof JDataRefcounted refCur)) { @@ -21,14 +39,14 @@ public class RefcounterTxHook implements PreCommitTxHook { for (var curRef : curRefs) { if (!oldRefs.contains(curRef)) { - var referenced = curTx.get(JDataRefcounted.class, curRef).orElse(null); + var referenced = getRef(refCur, curRef); curTx.put(referenced.withRefsFrom(referenced.refsFrom().plus(key))); } } for (var oldRef : oldRefs) { if (!curRefs.contains(oldRef)) { - var referenced = curTx.get(JDataRefcounted.class, oldRef).orElse(null); + var referenced = getRef(refCur, oldRef); curTx.put(referenced.withRefsFrom(referenced.refsFrom().minus(key))); } } @@ -41,7 +59,7 @@ public class RefcounterTxHook implements PreCommitTxHook { } for (var newRef : refCur.collectRefsTo()) { - var referenced = curTx.get(JDataRefcounted.class, newRef).orElse(null); + var referenced = getRef(refCur, newRef); curTx.put(referenced.withRefsFrom(referenced.refsFrom().plus(key))); } } @@ -53,7 +71,7 @@ public class RefcounterTxHook implements PreCommitTxHook { } for (var removedRef : refCur.collectRefsTo()) { - var referenced = curTx.get(JDataRefcounted.class, removedRef).orElse(null); + var referenced = getRef(refCur, removedRef); curTx.put(referenced.withRefsFrom(referenced.refsFrom().minus(key))); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjPusherTxHook.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjPusherTxHook.java new file mode 100644 index 00000000..e83bc163 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjPusherTxHook.java @@ -0,0 +1,49 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreePersistentData; +import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +@ApplicationScoped +public class RemoteObjPusherTxHook implements PreCommitTxHook { + @Inject + Transaction curTx; + @Inject + InvalidationQueueService invalidationQueueService; + + @Override + public void onChange(JObjectKey key, JData old, JData cur) { + boolean invalidate = switch (cur) { + case RemoteObject remote -> !remote.meta().changelog().equals(((RemoteObject) old).meta().changelog()); + case JKleppmannTreePersistentData pd -> !pd.queues().equals(((JKleppmannTreePersistentData) old).queues()); + default -> false; + }; + + if (invalidate) { + invalidationQueueService.pushInvalidationToAll(cur.key()); + } + } + + @Override + public void onCreate(JObjectKey key, JData cur) { + if (!(cur instanceof RemoteObject remote)) { + return; + } + + invalidationQueueService.pushInvalidationToAll(remote.key()); + } + + @Override + public void onDelete(JObjectKey key, JData cur) { + if (!(cur instanceof RemoteObject remote)) { + return; + } + } + + @Override + public int getPriority() { + return 100; + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObject.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObject.java index 46719854..a965c1ba 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObject.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObject.java @@ -1,67 +1,71 @@ package com.usatiuk.dhfs.objects; +import org.pcollections.HashTreePSet; import org.pcollections.PCollection; import org.pcollections.PMap; -import org.pcollections.PSet; +import org.pcollections.TreePMap; +import javax.annotation.Nullable; import java.util.Collection; import java.util.List; -public record RemoteObject( - JObjectKey key, PCollection refsFrom, boolean frozen, - PMap knownRemoteVersions, - Class knownType, - PSet confirmedDeletes, - boolean seen, - PMap changelog, - boolean haveLocal -) implements JDataRefcounted { +public record RemoteObject(PCollection refsFrom, boolean frozen, + RemoteObjectMeta meta, @Nullable T data) implements JDataRefcounted { + public RemoteObject(T data, PeerId initialPeer) { + this(HashTreePSet.empty(), false, new RemoteObjectMeta(data.key(), data.getClass(), initialPeer), data); + } + + public RemoteObject(JObjectKey key, PMap remoteChangelog) { + this(HashTreePSet.empty(), false, new RemoteObjectMeta(key, remoteChangelog), null); + } + + public RemoteObject(JObjectKey key) { + this(HashTreePSet.empty(), false, new RemoteObjectMeta(key, TreePMap.empty()), null); + } + + @Override + public JObjectKey key() { + if (data != null && !data.key().equals(meta.key())) + throw new IllegalStateException("Corrupted object, key mismatch: " + meta.key() + " vs " + data.key()); + return meta.key(); + } + @Override public RemoteObject withRefsFrom(PCollection refs) { - return new RemoteObject<>(key, refs, frozen, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, haveLocal); + return new RemoteObject<>(refs, frozen, meta, data); } @Override public RemoteObject withFrozen(boolean frozen) { - return new RemoteObject<>(key, refsFrom, frozen, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, haveLocal); + return new RemoteObject<>(refsFrom, frozen, meta, data); } - public RemoteObject withKnownRemoteVersions(PMap knownRemoteVersions) { - return new RemoteObject<>(key, refsFrom, frozen, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, haveLocal); + public RemoteObject withMeta(RemoteObjectMeta meta) { + return new RemoteObject<>(refsFrom, frozen, meta, data); } - public RemoteObject withKnownType(Class knownType) { - return new RemoteObject<>(key, refsFrom, frozen, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, haveLocal); + public RemoteObject withData(T data) { + return new RemoteObject<>(refsFrom, frozen, meta, data); } - public RemoteObject withConfirmedDeletes(PSet confirmedDeletes) { - return new RemoteObject<>(key, refsFrom, frozen, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, haveLocal); + public RemoteObject withRefsFrom(PCollection refs, boolean frozen) { + return new RemoteObject<>(refs, frozen, meta, data); } - public RemoteObject withSeen(boolean seen) { - return new RemoteObject<>(key, refsFrom, frozen, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, haveLocal); - } - - public RemoteObject withChangelog(PMap changelog) { - return new RemoteObject<>(key, refsFrom, frozen, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, haveLocal); - } - - public RemoteObject withHaveLocal(boolean haveLocal) { - return new RemoteObject<>(key, refsFrom, frozen, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, haveLocal); - } - - public static JObjectKey keyFrom(JObjectKey key) { - return new JObjectKey(key + "_remote"); - } - - public JObjectKey localKey() { - if (!haveLocal) throw new IllegalStateException("No local key"); - return JObjectKey.of(key.name().substring(0, key.name().length() - "_remote".length())); + public ReceivedObject toReceivedObject() { + if (data == null) + throw new IllegalStateException("Cannot convert to ReceivedObject without data: " + meta.key()); + return new ReceivedObject(meta.key(), meta.changelog(), data); } @Override public Collection collectRefsTo() { - if (haveLocal) return List.of(localKey()); + if (data != null) return data.collectRefsTo(); return List.of(); } + + @Override + public int estimateSize() { + return data == null ? 1000 : data.estimateSize(); + } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectMeta.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectMeta.java new file mode 100644 index 00000000..2642525a --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectMeta.java @@ -0,0 +1,53 @@ +package com.usatiuk.dhfs.objects; + +import org.pcollections.HashTreePMap; +import org.pcollections.HashTreePSet; +import org.pcollections.PMap; +import org.pcollections.PSet; + +import java.io.Serializable; + +public record RemoteObjectMeta( + JObjectKey key, + PMap knownRemoteVersions, + Class knownType, + PSet confirmedDeletes, + boolean seen, + PMap changelog) implements Serializable { + public RemoteObjectMeta(JObjectKey key, Class type, PeerId initialPeer) { + this(key, HashTreePMap.empty(), type, HashTreePSet.empty(), true, + HashTreePMap.empty().plus(initialPeer, 1L)); + } + + public RemoteObjectMeta(JObjectKey key, PMap remoteChangelog) { + this(key, HashTreePMap.empty(), JDataRemote.class, HashTreePSet.empty(), true, remoteChangelog); + } + + public RemoteObjectMeta withKnownRemoteVersions(PMap knownRemoteVersions) { + return new RemoteObjectMeta(key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog); + } + + public RemoteObjectMeta withKnownType(Class knownType) { + return new RemoteObjectMeta(key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog); + } + + public RemoteObjectMeta withConfirmedDeletes(PSet confirmedDeletes) { + return new RemoteObjectMeta(key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog); + } + + public RemoteObjectMeta withSeen(boolean seen) { + return new RemoteObjectMeta(key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog); + } + + public RemoteObjectMeta withChangelog(PMap changelog) { + return new RemoteObjectMeta(key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog); + } + + public RemoteObjectMeta withHaveLocal(boolean haveLocal) { + return new RemoteObjectMeta(key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog); + } + + public long versionSum() { + return changelog.values().stream().mapToLong(Long::longValue).sum(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java index b5086e4a..e7187193 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java @@ -1,10 +1,13 @@ package com.usatiuk.dhfs.objects; +import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; +import com.usatiuk.dhfs.objects.repository.SyncHandler; import com.usatiuk.dhfs.objects.transaction.LockingStrategy; import com.usatiuk.dhfs.objects.transaction.Transaction; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; -import org.apache.commons.lang3.NotImplementedException; +import org.apache.commons.lang3.mutable.MutableObject; import java.util.Optional; @@ -12,20 +15,96 @@ import java.util.Optional; public class RemoteTransaction { @Inject Transaction curTx; + @Inject + RemoteObjectServiceClient remoteObjectServiceClient; + @Inject + SyncHandler syncHandler; + @Inject + PersistentPeerDataService persistentPeerDataService; public long getId() { return curTx.getId(); } - public Optional get(Class type, JObjectKey key, LockingStrategy strategy) { - throw new NotImplementedException(); + private RemoteObject tryDownloadRemote(RemoteObject obj) { + MutableObject> success = new MutableObject<>(null); + + remoteObjectServiceClient.getObject(obj.key(), rcv -> { + if (!obj.meta().knownType().isInstance(rcv.getRight().data())) + throw new IllegalStateException("Object type mismatch: " + obj.meta().knownType() + " vs " + rcv.getRight().data().getClass()); + + if (!rcv.getRight().changelog().equals(obj.meta().changelog())) { + var updated = syncHandler.handleRemoteUpdate(rcv.getLeft(), obj.key(), obj, rcv.getRight().changelog()); + if (!rcv.getRight().changelog().equals(updated.meta().changelog())) + throw new IllegalStateException("Changelog mismatch, update failed?: " + rcv.getRight().changelog() + " vs " + updated.meta().changelog()); + success.setValue(updated.withData((T) rcv.getRight().data())); + } else { + success.setValue(obj.withData((T) rcv.getRight().data())); + } + return true; + }); + + curTx.put(success.getValue()); + return success.getValue(); } - public void put(JData obj) { - throw new NotImplementedException(); + @SuppressWarnings("unchecked") + public Optional> get(Class type, JObjectKey key, LockingStrategy strategy) { + return curTx.get(RemoteObject.class, key, strategy) + .map(obj -> { + if (obj.data() != null && !type.isInstance(obj.data())) + throw new IllegalStateException("Object (real) type mismatch: " + obj.data().getClass() + " vs " + type); + if (!type.isAssignableFrom(obj.meta().knownType())) + throw new IllegalStateException("Object (meta) type mismatch: " + obj.meta().knownType() + " vs " + type); + + if (obj.data() != null) + return obj; + else + return tryDownloadRemote(obj); + }); } - public Optional get(Class type, JObjectKey key) { + public Optional getMeta(JObjectKey key, LockingStrategy strategy) { + return curTx.get(RemoteObject.class, key, strategy).map(obj -> obj.meta()); + } + + public Optional getData(Class type, JObjectKey key, LockingStrategy strategy) { + return get(type, key, strategy).map(RemoteObject::data); + } + + + public void put(RemoteObject obj) { + curTx.put(obj); + } + + public void put(T obj) { + var cur = get((Class) obj.getClass(), obj.key()).orElse(null); + + if (cur == null) { + curTx.put(new RemoteObject<>(obj, persistentPeerDataService.getSelfUuid())); + return; + } + + if (cur.data() != null && cur.data().equals(obj)) + return; + if (cur.data() != null && !cur.data().getClass().equals(obj.getClass())) + throw new IllegalStateException("Object type mismatch: " + cur.data().getClass() + " vs " + obj.getClass()); + var newMeta = cur.meta(); + newMeta = newMeta.withChangelog(newMeta.changelog().plus(persistentPeerDataService.getSelfUuid(), + newMeta.changelog().get(persistentPeerDataService.getSelfUuid()) + 1)); + var newObj = cur.withData(obj).withMeta(newMeta); + curTx.put(newObj); + } + + public Optional> get(Class type, JObjectKey key) { return get(type, key, LockingStrategy.OPTIMISTIC); } + + public Optional getMeta(JObjectKey key) { + return getMeta(key, LockingStrategy.OPTIMISTIC); + } + + public Optional getData(Class type, JObjectKey key) { + return getData(type, key, LockingStrategy.OPTIMISTIC); + } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java index ecdc816c..cfcf5036 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java @@ -1,36 +1,39 @@ package com.usatiuk.dhfs.objects.jkleppmanntree; import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; import com.usatiuk.dhfs.objects.TransactionManager; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaDirectory; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreePersistentData; +import com.usatiuk.dhfs.objects.repository.invalidation.Op; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfoService; import com.usatiuk.dhfs.objects.transaction.LockingStrategy; import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.kleppmanntree.*; +import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; import org.apache.commons.lang3.tuple.Pair; +import org.pcollections.HashTreePMap; +import org.pcollections.TreePMap; import org.pcollections.TreePSet; -import java.util.HashMap; -import java.util.List; -import java.util.TreeMap; -import java.util.UUID; +import java.util.*; import java.util.function.Function; @ApplicationScoped public class JKleppmannTreeManager { private static final String dataFileName = "trees"; @Inject - JKleppmannTreePeerInterface jKleppmannTreePeerInterface; - @Inject Transaction curTx; @Inject TransactionManager txManager; @Inject JKleppmannTreePeerInterface peerInterface; + @Inject + PeerInfoService peerInfoService; public JKleppmannTree getTree(JObjectKey name) { return txManager.executeTx(() -> { @@ -41,7 +44,7 @@ public class JKleppmannTreeManager { TreePSet.empty(), true, 1L, - new HashMap<>(), + HashTreePMap.empty(), new HashMap<>(), new TreeMap<>() ); @@ -57,7 +60,7 @@ public class JKleppmannTreeManager { } public class JKleppmannTree { - private final KleppmannTree _tree; + private final KleppmannTree _tree; private final JKleppmannTreeStorageInterface _storageInterface; private final JKleppmannTreeClock _clock; private final JObjectKey _treeName; @@ -89,105 +92,71 @@ public class JKleppmannTreeManager { _tree.move(_storageInterface.getTrashId(), newMeta.withName(nodeKey.toString()), nodeKey); } -// @Override -// public boolean hasPendingOpsForHost(UUID host) { -// return _persistentData.get() -// .runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, -// (m, d) -> d.getQueues().containsKey(host) && -// !d.getQueues().get(host).isEmpty() -// ); -// } -// -// @Override -// public List getPendingOpsForHost(UUID host, int limit) { -// return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { -// if (d.getQueues().containsKey(host)) { -// var queue = d.getQueues().get(host); -// ArrayList collected = new ArrayList<>(); -// -// for (var node : queue.entrySet()) { -// collected.add(new JKleppmannTreeOpWrapper(node.getValue())); -// if (collected.size() >= limit) break; -// } -// -// return collected; -// } -// return List.of(); -// }); -// } + public boolean hasPendingOpsForHost(PeerId host) { + return !_data.queues().getOrDefault(host, TreePMap.empty()).isEmpty(); + } -// @Override -// public String getId() { -// return _treeName; -// } + public List getPendingOpsForHost(PeerId host, int limit) { + ArrayList collected = new ArrayList<>(); + for (var node : _data.queues().getOrDefault(host, TreePMap.empty()).entrySet()) { + collected.add(new JKleppmannTreeOpWrapper(_data.key(), node.getValue())); + if (collected.size() >= limit) break; + } + return Collections.unmodifiableList(collected); + } -// @Override -// public void commitOpForHost(UUID host, Op op) { -// if (!(op instanceof JKleppmannTreeOpWrapper jop)) -// throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass() + " " + getId()); -// _persistentData.get().assertRwLock(); -// _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); -// -// var got = _persistentData.get().getData().getQueues().get(host).firstEntry().getValue(); -// if (!Objects.equals(jop.getOp(), got)) -// throw new IllegalArgumentException("Committed op push was not the oldest"); -// -// _persistentData.get().mutate(new JMutator() { -// @Override -// public boolean mutate(JKleppmannTreePersistentData object) { -// object.getQueues().get(host).pollFirstEntry(); -// return true; -// } -// -// @Override -// public void revert(JKleppmannTreePersistentData object) { -// object.getQueues().get(host).put(jop.getOp().timestamp(), jop.getOp()); -// } -// }); -// -// } + // @Override + public void commitOpForHost(PeerId host, Op op) { + if (!(op instanceof JKleppmannTreeOpWrapper jop)) + throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass()); -// @Override -// public void pushBootstrap(UUID host) { -// _tree.recordBoostrapFor(host); -// } + var firstOp = _data.queues().get(host).firstEntry().getValue(); + if (!Objects.equals(firstOp, jop.op())) + throw new IllegalArgumentException("Committed op push was not the oldest"); - public Pair findParent(Function, Boolean> predicate) { - return _tree.findParent(predicate); + _data = _data.withQueues(_data.queues().plus(host, _data.queues().get(host).minus(_data.queues().get(host).firstKey()))); } // @Override -// public boolean acceptExternalOp(UUID from, Op op) { -// if (op instanceof JKleppmannTreePeriodicPushOp pushOp) { -// return _tree.updateExternalTimestamp(pushOp.getFrom(), pushOp.getTimestamp()); -// } -// -// if (!(op instanceof JKleppmannTreeOpWrapper jop)) -// throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass() + " " + getId()); -// -// JObject fileRef; -// if (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile f) { +// public void pushBootstrap(PeerId host) { +// _tree.recordBoostrapFor(host); +// } + + public Pair findParent(Function, Boolean> predicate) { + return _tree.findParent(predicate); + } + + // @Override + public boolean acceptExternalOp(PeerId from, Op op) { + if (op instanceof JKleppmannTreePeriodicPushOp pushOp) { + return _tree.updateExternalTimestamp(pushOp.getFrom(), pushOp.getTimestamp()); + } + + if (!(op instanceof JKleppmannTreeOpWrapper jop)) + throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass()); + +// if (jop.op().newMeta() instanceof JKleppmannTreeNodeMetaFile f) { // var fino = f.getFileIno(); // fileRef = jObjectManager.getOrPut(fino, File.class, Optional.of(jop.getOp().childId())); // } else { // fileRef = null; // } -// -// if (Log.isTraceEnabled()) -// Log.trace("Received op from " + from + ": " + jop.getOp().timestamp().timestamp() + " " + jop.getOp().childId() + "->" + jop.getOp().newParentId() + " as " + jop.getOp().newMeta().getName()); -// -// try { -// _tree.applyExternalOp(from, jop.getOp()); -// } catch (Exception e) { -// Log.error("Error applying external op", e); -// throw e; -// } finally { -// // FIXME: -// // Fixup the ref if it didn't really get applied -// + + if (Log.isTraceEnabled()) + Log.trace("Received op from " + from + ": " + jop.op().timestamp().timestamp() + " " + jop.op().childId() + "->" + jop.op().newParentId() + " as " + jop.op().newMeta().getName()); + + try { + _tree.applyExternalOp(from, jop.op()); + } catch (Exception e) { + Log.error("Error applying external op", e); + throw e; + } finally { + // FIXME: + // Fixup the ref if it didn't really get applied + // if ((fileRef == null) && (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile)) // Log.error("Could not create child of pushed op: " + jop.getOp()); -// + // if (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile f) { // if (fileRef != null) { // var got = jObjectManager.get(jop.getOp().childId()).orElse(null); @@ -216,9 +185,9 @@ public class JKleppmannTreeManager { // } // } // } -// } -// return true; -// } + } + return true; + } // @Override // public Op getPeriodicPushOp() { @@ -232,9 +201,12 @@ public class JKleppmannTreeManager { // _persistentData.get().rwUnlock(); // } - private class JOpRecorder implements OpRecorder { + private class JOpRecorder implements OpRecorder { @Override - public void recordOp(OpMove op) { + public void recordOp(OpMove op) { + for (var p : peerInfoService.getPeersNoSelf()) { + recordOpForPeer(p.id(), op); + } // _persistentData.get().assertRwLock(); // _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); // var hostUuds = persistentPeerDataService.getHostUuids().stream().toList(); @@ -254,7 +226,8 @@ public class JKleppmannTreeManager { } @Override - public void recordOpForPeer(UUID peer, OpMove op) { + public void recordOpForPeer(PeerId peer, OpMove op) { + _data = _data.withQueues(_data.queues().plus(peer, _data.queues().getOrDefault(peer, TreePMap.empty()).plus(op.timestamp(), op))); // _persistentData.get().assertRwLock(); // _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); // _persistentData.get().mutate(new JMutator() { @@ -296,7 +269,7 @@ public class JKleppmannTreeManager { } } - public class JKleppmannTreeStorageInterface implements StorageInterface { + public class JKleppmannTreeStorageInterface implements StorageInterface { private final LogWrapper _logWrapper = new LogWrapper(); private final PeerLogWrapper _peerLogWrapper = new PeerLogWrapper(); @@ -330,7 +303,7 @@ public class JKleppmannTreeManager { } @Override - public void putNode(TreeNode node) { + public void putNode(TreeNode node) { curTx.put(((JKleppmannTreeNode) node)); } @@ -340,23 +313,23 @@ public class JKleppmannTreeManager { } @Override - public LogInterface getLog() { + public LogInterface getLog() { return _logWrapper; } @Override - public PeerTimestampLogInterface getPeerTimestampLog() { + public PeerTimestampLogInterface getPeerTimestampLog() { return _peerLogWrapper; } - private class PeerLogWrapper implements PeerTimestampLogInterface { + private class PeerLogWrapper implements PeerTimestampLogInterface { @Override - public Long getForPeer(UUID peerId) { + public Long getForPeer(PeerId peerId) { return _data.peerTimestampLog().get(peerId); } @Override - public void putForPeer(UUID peerId, Long timestamp) { + public void putForPeer(PeerId peerId, Long timestamp) { var newPeerTimestampLog = new HashMap<>(_data.peerTimestampLog()); newPeerTimestampLog.put(peerId, timestamp); _data = _data.withPeerTimestampLog(newPeerTimestampLog); @@ -364,16 +337,16 @@ public class JKleppmannTreeManager { } } - private class LogWrapper implements LogInterface { + private class LogWrapper implements LogInterface { @Override - public Pair, LogRecord> peekOldest() { + public Pair, LogRecord> peekOldest() { var ret = _data.log().firstEntry(); if (ret == null) return null; return Pair.of(ret); } @Override - public Pair, LogRecord> takeOldest() { + public Pair, LogRecord> takeOldest() { var newLog = new TreeMap<>(_data.log()); var ret = newLog.pollFirstEntry(); _data = _data.withLog(newLog); @@ -383,19 +356,19 @@ public class JKleppmannTreeManager { } @Override - public Pair, LogRecord> peekNewest() { + public Pair, LogRecord> peekNewest() { var ret = _data.log().lastEntry(); if (ret == null) return null; return Pair.of(ret); } @Override - public List, LogRecord>> newestSlice(CombinedTimestamp since, boolean inclusive) { + public List, LogRecord>> newestSlice(CombinedTimestamp since, boolean inclusive) { return _data.log().tailMap(since, inclusive).entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); } @Override - public List, LogRecord>> getAll() { + public List, LogRecord>> getAll() { return _data.log().entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); } @@ -405,7 +378,7 @@ public class JKleppmannTreeManager { } @Override - public boolean containsKey(CombinedTimestamp timestamp) { + public boolean containsKey(CombinedTimestamp timestamp) { return _data.log().containsKey(timestamp); } @@ -415,7 +388,7 @@ public class JKleppmannTreeManager { } @Override - public void put(CombinedTimestamp timestamp, LogRecord record) { + public void put(CombinedTimestamp timestamp, LogRecord record) { if (_data.log().containsKey(timestamp)) throw new IllegalStateException("Overwriting log entry?"); var newLog = new TreeMap<>(_data.log()); @@ -425,7 +398,7 @@ public class JKleppmannTreeManager { } @Override - public void replace(CombinedTimestamp timestamp, LogRecord record) { + public void replace(CombinedTimestamp timestamp, LogRecord record) { var newLog = new TreeMap<>(_data.log()); newLog.put(timestamp, record); _data = _data.withLog(newLog); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java index cf734a4e..209c43df 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java @@ -1,24 +1,16 @@ package com.usatiuk.dhfs.objects.jkleppmanntree; import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; +import com.usatiuk.dhfs.objects.repository.invalidation.Op; import com.usatiuk.kleppmanntree.OpMove; -import java.util.UUID; +import java.io.Serializable; // Wrapper to avoid having to specify generic types -public class JKleppmannTreeOpWrapper { - private final OpMove _op; - - public JKleppmannTreeOpWrapper(OpMove op) { - if (op == null) throw new IllegalArgumentException("op shouldn't be null"); - _op = op; - } - - public OpMove getOp() { - return _op; - } - +public record JKleppmannTreeOpWrapper(JObjectKey treeName, + OpMove op) implements Op, Serializable { // @Override // public Collection getEscapedRefs() { // if (_op.newMeta() instanceof JKleppmannTreeNodeMetaFile mf) { diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java index 9088ecfd..0ea613f7 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java @@ -1,6 +1,11 @@ package com.usatiuk.dhfs.objects.jkleppmanntree; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfo; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfoService; import com.usatiuk.kleppmanntree.PeerInterface; +import jakarta.inject.Inject; import jakarta.inject.Singleton; import java.util.Collection; @@ -8,14 +13,19 @@ import java.util.List; import java.util.UUID; @Singleton -public class JKleppmannTreePeerInterface implements PeerInterface { +public class JKleppmannTreePeerInterface implements PeerInterface { + @Inject + PeerInfoService peerInfoService; + @Inject + PersistentPeerDataService persistentPeerDataService; + @Override - public UUID getSelfId() { - return UUID.nameUUIDFromBytes("1".getBytes()); + public PeerId getSelfId() { + return persistentPeerDataService.getSelfUuid(); } @Override - public Collection getAllPeers() { - return List.of(getSelfId()); + public Collection getAllPeers() { + return peerInfoService.getPeers().stream().map(PeerInfo::id).toList(); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java index f7526587..679e1249 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java @@ -1,17 +1,19 @@ package com.usatiuk.dhfs.objects.jkleppmanntree; +import com.usatiuk.dhfs.objects.PeerId; + import java.util.UUID; public class JKleppmannTreePeriodicPushOp { - private final UUID _from; + private final PeerId _from; private final long _timestamp; - public JKleppmannTreePeriodicPushOp(UUID from, long timestamp) { + public JKleppmannTreePeriodicPushOp(PeerId from, long timestamp) { _from = from; _timestamp = timestamp; } - public UUID getFrom() { + public PeerId getFrom() { return _from; } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java index 3b4a8687..1d1a4839 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java @@ -2,6 +2,7 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; import com.usatiuk.dhfs.objects.JDataRefcounted; import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; import com.usatiuk.dhfs.objects.repository.peersync.structs.JKleppmannTreeNodeMetaPeer; import com.usatiuk.kleppmanntree.OpMove; import com.usatiuk.kleppmanntree.TreeNode; @@ -12,15 +13,14 @@ import java.io.Serializable; import java.util.Collection; import java.util.Collections; import java.util.Map; -import java.util.UUID; import java.util.stream.Collectors; import java.util.stream.Stream; // FIXME: Ideally this is two classes? public record JKleppmannTreeNode(JObjectKey key, PCollection refsFrom, boolean frozen, JObjectKey parent, - OpMove lastEffectiveOp, + OpMove lastEffectiveOp, JKleppmannTreeNodeMeta meta, - Map children) implements TreeNode, JDataRefcounted, Serializable { + Map children) implements TreeNode, JDataRefcounted, Serializable { public JKleppmannTreeNode(JObjectKey id, JObjectKey parent, JKleppmannTreeNodeMeta meta) { this(id, TreePSet.empty(), false, parent, null, meta, Collections.emptyMap()); @@ -32,7 +32,7 @@ public record JKleppmannTreeNode(JObjectKey key, PCollection refsFro } @Override - public JKleppmannTreeNode withLastEffectiveOp(OpMove lastEffectiveOp) { + public JKleppmannTreeNode withLastEffectiveOp(OpMove lastEffectiveOp) { return new JKleppmannTreeNode(key, refsFrom, frozen, parent, lastEffectiveOp, meta, children); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java index 910a652b..440b38de 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java @@ -2,41 +2,24 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; import com.usatiuk.dhfs.objects.JDataRefcounted; import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; import com.usatiuk.kleppmanntree.CombinedTimestamp; import com.usatiuk.kleppmanntree.LogRecord; import com.usatiuk.kleppmanntree.OpMove; import org.pcollections.PCollection; +import org.pcollections.PMap; +import org.pcollections.PSortedMap; +import org.pcollections.TreePMap; import java.util.*; public record JKleppmannTreePersistentData( JObjectKey key, PCollection refsFrom, boolean frozen, long clock, - HashMap, OpMove>> queues, - HashMap peerTimestampLog, - TreeMap, LogRecord> log + PMap, OpMove>> queues, + HashMap peerTimestampLog, + TreeMap, LogRecord> log ) implements JDataRefcounted { - void recordOp(UUID host, OpMove opMove) { - queues().computeIfAbsent(host, h -> new TreeMap<>()); - queues().get(host).put(opMove.timestamp(), opMove); - } - - void removeOp(UUID host, OpMove opMove) { - queues().get(host).remove(opMove.timestamp(), opMove); - } - - void recordOp(Collection hosts, OpMove opMove) { - for (var u : hosts) { - recordOp(u, opMove); - } - } - - void removeOp(Collection hosts, OpMove opMove) { - for (var u : hosts) { - removeOp(u, opMove); - } - } - @Override public JKleppmannTreePersistentData withRefsFrom(PCollection refs) { return new JKleppmannTreePersistentData(key, refs, frozen, clock, queues, peerTimestampLog, log); @@ -51,15 +34,15 @@ public record JKleppmannTreePersistentData( return new JKleppmannTreePersistentData(key, refsFrom, frozen, clock, queues, peerTimestampLog, log); } - public JKleppmannTreePersistentData withQueues(HashMap, OpMove>> queues) { + public JKleppmannTreePersistentData withQueues(PMap, OpMove>> queues) { return new JKleppmannTreePersistentData(key, refsFrom, frozen, clock, queues, peerTimestampLog, log); } - public JKleppmannTreePersistentData withPeerTimestampLog(HashMap peerTimestampLog) { + public JKleppmannTreePersistentData withPeerTimestampLog(HashMap peerTimestampLog) { return new JKleppmannTreePersistentData(key, refsFrom, frozen, clock, queues, peerTimestampLog, log); } - public JKleppmannTreePersistentData withLog(TreeMap, LogRecord> log) { + public JKleppmannTreePersistentData withLog(TreeMap, LogRecord> log) { return new JKleppmannTreePersistentData(key, refsFrom, frozen, clock, queues, peerTimestampLog, log); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java index 81f3c764..eabd1b8c 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java @@ -23,6 +23,7 @@ import org.eclipse.microprofile.config.inject.ConfigProperty; import java.io.IOException; import java.util.*; import java.util.concurrent.*; +import java.util.stream.Collectors; @ApplicationScoped public class PeerManager { @@ -135,7 +136,7 @@ public class PeerManager { // FIXME: private boolean pingCheck(PeerInfo host, PeerAddress address) { try { - return rpcClientFactory.withObjSyncClient(host.id(), address, pingTimeout, c -> { + return rpcClientFactory.withObjSyncClient(host.id(), address, pingTimeout, (peer, c) -> { var ret = c.ping(PingRequest.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).build()); if (!UUID.fromString(ret.getSelfUuid()).equals(host.id().id())) { throw new IllegalStateException("Ping selfUuid returned " + ret.getSelfUuid() + " but expected " + host.id()); @@ -148,8 +149,12 @@ public class PeerManager { } } + public boolean isReachable(PeerId host) { + return _states.containsKey(host); + } + public boolean isReachable(PeerInfo host) { - return _states.containsKey(host.id()); + return isReachable(host.id()); } public PeerAddress getAddress(PeerId host) { @@ -166,21 +171,13 @@ public class PeerManager { // .map(Map.Entry::getKey).toList()); // } -// public HostStateSnapshot getHostStateSnapshot() { -// ArrayList available = new ArrayList<>(); -// ArrayList unavailable = new ArrayList<>(); -// _transientPeersState.runReadLocked(d -> { -// for (var v : d.getStates().entrySet()) { -// if (v.getValue().isReachable()) -// available.add(v.getKey()); -// else -// unavailable.add(v.getKey()); -// } -// return null; -// } -// ); -// return new HostStateSnapshot(available, unavailable); -// } + public HostStateSnapshot getHostStateSnapshot() { + return transactionManager.run(() -> { + var partition = peerInfoService.getPeersNoSelf().stream().map(PeerInfo::id) + .collect(Collectors.partitioningBy(this::isReachable)); + return new HostStateSnapshot(partition.get(true), partition.get(false)); + }); + } // public void removeRemoteHost(UUID host) { // persistentPeerDataService.removeHost(host); @@ -227,7 +224,7 @@ public class PeerManager { void apply(UUID host); } - public record HostStateSnapshot(List available, List unavailable) { + public record HostStateSnapshot(Collection available, Collection unavailable) { } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/ReceivedObjectSerializer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/ReceivedObjectSerializer.java new file mode 100644 index 00000000..73ab19cd --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/ReceivedObjectSerializer.java @@ -0,0 +1,46 @@ +package com.usatiuk.dhfs.objects.repository; + +import com.usatiuk.autoprotomap.runtime.ProtoSerializer; +import com.usatiuk.dhfs.objects.JDataRemote; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.ReceivedObject; +import com.usatiuk.dhfs.objects.persistence.RemoteObjectP; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.pcollections.HashTreePMap; +import org.pcollections.PMap; + +@ApplicationScoped +public class ReceivedObjectSerializer implements ProtoSerializer { + @Inject + ProtoSerializer remoteObjectSerializer; + + @Override + public ReceivedObject deserialize(GetObjectReply message) { + PMap changelog = HashTreePMap.empty(); + for (var entry : message.getHeader().getChangelog().getEntriesList()) { + changelog = changelog.plus(PeerId.of(entry.getHost()), entry.getVersion()); + } + return new ReceivedObject( + JObjectKey.of(message.getHeader().getName()), + changelog, + remoteObjectSerializer.deserialize(message.getContent()) + ); + } + + @Override + public GetObjectReply serialize(ReceivedObject object) { + var builder = GetObjectReply.newBuilder(); + var headerBuilder = builder.getHeaderBuilder(); + headerBuilder.setName(object.key().toString()); + var changelogBuilder = headerBuilder.getChangelogBuilder(); + object.changelog().forEach((peer, version) -> { + changelogBuilder.addEntriesBuilder() + .setHost(peer.toString()) + .setVersion(version); + }); + builder.setContent(remoteObjectSerializer.serialize(object.data())); + return builder.build(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java index 37458390..d591bcb7 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java @@ -1,82 +1,84 @@ -//package com.usatiuk.dhfs.objects.repository; -// -//import com.google.common.collect.Maps; -//import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -//import com.usatiuk.dhfs.objects.jrepository.*; -//import com.usatiuk.dhfs.objects.persistence.JObjectDataP; -//import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; -//import com.usatiuk.dhfs.objects.repository.opsupport.Op; -//import io.grpc.Status; -//import io.grpc.StatusRuntimeException; -//import io.quarkus.logging.Log; -//import jakarta.enterprise.context.ApplicationScoped; -//import jakarta.inject.Inject; -//import org.apache.commons.lang3.tuple.Pair; -// -//import javax.annotation.Nullable; -//import java.util.*; -//import java.util.concurrent.Callable; -//import java.util.concurrent.ConcurrentLinkedDeque; -//import java.util.concurrent.Executors; -//import java.util.stream.Collectors; -// -//@ApplicationScoped -//public class RemoteObjectServiceClient { -// @Inject -// PersistentPeerDataService persistentPeerDataService; -// -// @Inject -// RpcClientFactory rpcClientFactory; -// -// @Inject -// JObjectManager jObjectManager; -// -// @Inject -// SyncHandler syncHandler; -// @Inject -// InvalidationQueueService invalidationQueueService; -// @Inject +package com.usatiuk.dhfs.objects.repository; + +import com.usatiuk.autoprotomap.runtime.ProtoSerializer; +import com.usatiuk.dhfs.objects.*; +import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; +import com.usatiuk.dhfs.objects.repository.invalidation.Op; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +@ApplicationScoped +public class RemoteObjectServiceClient { + @Inject + PersistentPeerDataService persistentPeerDataService; + + @Inject + RpcClientFactory rpcClientFactory; + + @Inject + TransactionManager txm; + @Inject + Transaction curTx; + @Inject + RemoteTransaction remoteTx; + + @Inject + SyncHandler syncHandler; + @Inject + InvalidationQueueService invalidationQueueService; + // @Inject // ProtoSerializer dataProtoSerializer; -// @Inject -// ProtoSerializer opProtoSerializer; -// @Inject -// JObjectTxManager jObjectTxManager; -// + @Inject + ProtoSerializer opProtoSerializer; + + @Inject + ProtoSerializer receivedObjectProtoSerializer; + // public Pair getSpecificObject(UUID host, String name) { // return rpcClientFactory.withObjSyncClient(host, client -> { // var reply = client.getObject(GetObjectRequest.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).setName(name).build()); // return Pair.of(reply.getObject().getHeader(), reply.getObject().getContent()); // }); // } -// -// public JObjectDataP getObject(JObject jObject) { -// jObject.assertRwLock(); -// -// var targets = jObject.runReadLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (md, d) -> { -// var ourVersion = md.getOurVersion(); -// if (ourVersion >= 1) -// return md.getRemoteCopies().entrySet().stream() -// .filter(entry -> entry.getValue().equals(ourVersion)) -// .map(Map.Entry::getKey).toList(); -// else -// return persistentPeerDataService.getHostUuids(); -// }); -// -// if (targets.isEmpty()) -// throw new IllegalStateException("No targets for object " + jObject.getMeta().getName()); -// -// Log.info("Downloading object " + jObject.getMeta().getName() + " from " + targets.stream().map(UUID::toString).collect(Collectors.joining(", "))); -// -// return rpcClientFactory.withObjSyncClient(targets, client -> { -// var reply = client.getObject(GetObjectRequest.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).setName(jObject.getMeta().getName()).build()); -// -// var receivedMap = new HashMap(); -// for (var e : reply.getObject().getHeader().getChangelog().getEntriesList()) { -// receivedMap.put(UUID.fromString(e.getHost()), e.getVersion()); -// } -// + + public void getObject(JObjectKey key, Function, Boolean> onReceive) { + var objMeta = remoteTx.getMeta(key).orElse(null); + + if (objMeta == null) { + throw new IllegalArgumentException("Object " + key + " not found"); + } + + var targetVersion = objMeta.versionSum(); + var targets = objMeta.knownRemoteVersions().entrySet().stream() + .filter(entry -> entry.getValue().equals(targetVersion)) + .map(Map.Entry::getKey).toList(); + + if (targets.isEmpty()) + throw new IllegalStateException("No targets for object " + key); + + Log.info("Downloading object " + key + " from " + targets); + + rpcClientFactory.withObjSyncClient(targets, (peer, client) -> { + var reply = client.getObject(GetObjectRequest.newBuilder().setName(key.toString()).build()); + + var deserialized = receivedObjectProtoSerializer.deserialize(reply); + + if (!onReceive.apply(Pair.of(peer, deserialized))) { + throw new StatusRuntimeException(Status.ABORTED.withDescription("Failed to process object " + key + " from " + peer)); + } + + return null; // return jObjectTxManager.executeTx(() -> { -// return jObject.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (md, d, b, v) -> { +// return key.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (md, d, b, v) -> { // var unexpected = !Objects.equals( // Maps.filterValues(md.getChangelog(), val -> val != 0), // Maps.filterValues(receivedMap, val -> val != 0)); @@ -98,10 +100,10 @@ // return reply.getObject().getContent(); // }); // }); -// }); -// } -// -// @Nullable + }); + } + + // @Nullable // public IndexUpdateReply notifyUpdate(JObject obj, UUID host) { // var builder = IndexUpdatePush.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()); // @@ -128,7 +130,7 @@ // return rpcClientFactory.withObjSyncClient(host, client -> client.indexUpdate(send)); // } // -// public OpPushReply pushOps(List ops, String queueName, UUID host) { + public OpPushReply pushOps(PeerId target, List ops) { // for (Op op : ops) { // for (var ref : op.getEscapedRefs()) { // jObjectTxManager.executeTx(() -> { @@ -141,9 +143,14 @@ // .setQueueId(queueName); // for (var op : ops) // builder.addMsg(opProtoSerializer.serialize(op)); -// return rpcClientFactory.withObjSyncClient(host, client -> client.opPush(builder.build())); -// } -// + for (Op op : ops) { + var serialized = opProtoSerializer.serialize(op); + var built = OpPushRequest.newBuilder().addMsg(serialized).build(); + rpcClientFactory.withObjSyncClient(target, (tgt, client) -> client.opPush(built)); + } + return OpPushReply.getDefaultInstance(); + } + // public Collection canDelete(Collection targets, String object, Collection ourReferrers) { // ConcurrentLinkedDeque results = new ConcurrentLinkedDeque<>(); // Log.trace("Asking canDelete for " + object + " from " + targets.stream().map(UUID::toString).collect(Collectors.joining(", "))); @@ -171,4 +178,4 @@ // } // return results; // } -//} +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java index 990ad534..e4b6b468 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java @@ -1,60 +1,77 @@ package com.usatiuk.dhfs.objects.repository; +import com.usatiuk.autoprotomap.runtime.ProtoSerializer; +import com.usatiuk.dhfs.objects.*; +import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; +import com.usatiuk.dhfs.objects.repository.invalidation.Op; +import com.usatiuk.dhfs.objects.repository.invalidation.OpHandler; +import com.usatiuk.dhfs.objects.transaction.Transaction; import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.quarkus.grpc.GrpcService; +import io.quarkus.logging.Log; +import io.quarkus.security.identity.SecurityIdentity; import io.smallrye.common.annotation.Blocking; import io.smallrye.mutiny.Uni; import jakarta.annotation.security.RolesAllowed; import jakarta.inject.Inject; +import org.apache.commons.lang3.NotImplementedException; -/// / Note: RunOnVirtualThread hangs somehow +// Note: RunOnVirtualThread hangs somehow @GrpcService @RolesAllowed("cluster-member") public class RemoteObjectServiceServer implements DhfsObjectSyncGrpc { // @Inject // SyncHandler syncHandler; -// -// @Inject -// JObjectManager jObjectManager; -// -// @Inject -// PeerManager peerManager; -// -// @Inject -// AutoSyncProcessor autoSyncProcessor; -// -@Inject -PersistentPeerDataService persistentPeerDataService; -// -// @Inject -// InvalidationQueueService invalidationQueueService; -// -// @Inject + + @Inject + TransactionManager txm; + @Inject + PeerManager peerManager; + @Inject + Transaction curTx; + @Inject + PersistentPeerDataService persistentPeerDataService; + + @Inject + InvalidationQueueService invalidationQueueService; + @Inject + SecurityIdentity identity; + // @Inject // ProtoSerializer dataProtoSerializer; -// @Inject -// ProtoSerializer opProtoSerializer; -// -// @Inject -// OpObjectRegistry opObjectRegistry; -// -// @Inject -// JObjectTxManager jObjectTxManager; -// -// @Override -// @Blocking -// public Uni getObject(GetObjectRequest request) { -// if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); -// if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid()))) -// throw new StatusRuntimeException(Status.UNAUTHENTICATED); -// -// Log.info("<-- getObject: " + request.getName() + " from " + request.getSelfUuid()); -// -// var obj = jObjectManager.get(request.getName()).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND)); -// + @Inject + ProtoSerializer opProtoSerializer; + @Inject + ProtoSerializer receivedObjectProtoSerializer; + @Inject + RemoteTransaction remoteTx; + @Inject + OpHandler opHandler; + + @Override + @Blocking + public Uni getObject(GetObjectRequest request) { + Log.info("<-- getObject: " + request.getName() + " from " + identity.getPrincipal().getName().substring(3)); + + var obj = txm.run(() -> { + var got = remoteTx.get(JDataRemote.class, JObjectKey.of(request.getName())).orElse(null); + if (got == null) { + Log.info("<-- getObject NOT FOUND: " + request.getName() + " from " + identity.getPrincipal().getName().substring(3)); + throw new StatusRuntimeException(Status.NOT_FOUND); + } + return got; + }); + + var serialized = receivedObjectProtoSerializer.serialize(obj.toReceivedObject()); + return Uni.createFrom().item(serialized); // // Does @Blocking break this? // return Uni.createFrom().emitter(emitter -> { -// var replyObj = jObjectTxManager.executeTx(() -> { +// try { +// } catch (Exception e) { +// emitter.fail(e); +// } +// var replyObj = txm.run(() -> { +// var cur = curTx.get(JDataRemote.class, JObjectKey.of(request.getName())).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND)); // // Obj.markSeen before markSeen of its children // obj.markSeen(); // return obj.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (meta, data) -> { @@ -77,11 +94,17 @@ PersistentPeerDataService persistentPeerDataService; // var ret = GetObjectReply.newBuilder() // .setSelfUuid(persistentPeerDataService.getSelfUuid().toString()) // .setObject(replyObj).build(); +// emitter.complete(ret); // // TODO: Could this cause problems if we wait for too long? -// obj.commitFenceAsync(() -> emitter.complete(ret)); +//// obj.commitFenceAsync(() -> emitter.complete(ret)); // }); -// } -// + } + + @Override + public Uni canDelete(CanDeleteRequest request) { + throw new NotImplementedException(); + } + // @Override // @Blocking // public Uni canDelete(CanDeleteRequest request) { @@ -107,11 +130,12 @@ PersistentPeerDataService persistentPeerDataService; // return m.isDeletionCandidate() && !m.isDeleted(); // }); // // FIXME -//// if (tryUpdate) { -//// obj.get().runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> { -//// return null; -//// }); -//// } + + /// / if (tryUpdate) { + /// / obj.get().runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> { + /// / return null; + /// / }); + /// / } // } catch (DeletedObjectAccessException dox) { // builder.setDeletionCandidate(true); // } @@ -127,7 +151,7 @@ PersistentPeerDataService persistentPeerDataService; // // return Uni.createFrom().item(ret); // } -// + // @Override // @Blocking // public Uni indexUpdate(IndexUpdatePush request) { @@ -135,51 +159,29 @@ PersistentPeerDataService persistentPeerDataService; // if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid()))) // throw new StatusRuntimeException(Status.UNAUTHENTICATED); // - /// / Log.info("<-- indexUpdate: " + request.getHeader().getName()); +// Log.info("<-- indexUpdate: " + request.getHeader().getName()); // return jObjectTxManager.executeTxAndFlush(() -> { // return Uni.createFrom().item(syncHandler.handleRemoteUpdate(request)); // }); // } -// -// @Override -// @Blocking -// public Uni opPush(OpPushMsg request) { -// if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); -// if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid()))) -// throw new StatusRuntimeException(Status.UNAUTHENTICATED); -// -// try { -// var objs = request.getMsgList().stream().map(opProtoSerializer::deserialize).toList(); -// jObjectTxManager.executeTxAndFlush(() -> { -// opObjectRegistry.acceptExternalOps(request.getQueueId(), UUID.fromString(request.getSelfUuid()), objs); -// }); -// } catch (Exception e) { -// Log.error(e, e); -// throw e; -// } -// return Uni.createFrom().item(OpPushReply.getDefaultInstance()); -// } -// - @Override - public Uni getObject(GetObjectRequest request) { - return null; + @Blocking + public Uni opPush(OpPushRequest request) { + try { + var ops = request.getMsgList().stream().map(opProtoSerializer::deserialize).toList(); + for (var op : ops) { + Log.info("<-- op: " + op + " from " + identity.getPrincipal().getName().substring(3)); + txm.run(() -> { + opHandler.handleOp(PeerId.of(identity.getPrincipal().getName().substring(3)), op); + }); + } + } catch (Exception e) { + Log.error(e, e); + throw e; + } + return Uni.createFrom().item(OpPushReply.getDefaultInstance()); } - @Override - public Uni canDelete(CanDeleteRequest request) { - return null; - } - - @Override - public Uni indexUpdate(IndexUpdatePush request) { - return null; - } - - @Override - public Uni opPush(OpPushMsg request) { - return null; - } @Override @Blocking diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java index 3dddddf3..ab234a66 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java @@ -76,7 +76,7 @@ public class RpcClientFactory { .withMaxOutboundMessageSize(Integer.MAX_VALUE) .withMaxInboundMessageSize(Integer.MAX_VALUE); }); - return fn.apply(stub.withDeadlineAfter(timeout, TimeUnit.SECONDS)); + return fn.apply(host, stub.withDeadlineAfter(timeout, TimeUnit.SECONDS)); } public void dropCache() { @@ -85,7 +85,7 @@ public class RpcClientFactory { @FunctionalInterface public interface ObjectSyncClientFunction { - R apply(DhfsObjectSyncGrpcGrpc.DhfsObjectSyncGrpcBlockingStub client); + R apply(PeerId peer, DhfsObjectSyncGrpcGrpc.DhfsObjectSyncGrpcBlockingStub client); } private record ObjSyncStubKey(PeerId id, InetAddress addr, int port) { diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java index f47e34e2..4e7f883f 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java @@ -1,12 +1,35 @@ -//package com.usatiuk.dhfs.objects.repository; +package com.usatiuk.dhfs.objects.repository; + +import com.usatiuk.dhfs.objects.JDataRemote; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.RemoteObject; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.apache.commons.lang3.NotImplementedException; +import org.pcollections.PMap; + +import java.util.stream.Collectors; +import java.util.stream.Stream; + // + //import com.usatiuk.autoprotomap.runtime.ProtoSerializer; + //import com.usatiuk.dhfs.objects.jrepository.JObject; + //import com.usatiuk.dhfs.objects.jrepository.JObjectData; + //import com.usatiuk.dhfs.objects.jrepository.JObjectManager; + //import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; + //import com.usatiuk.dhfs.objects.persistence.JObjectDataP; + //import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; + //import com.usatiuk.dhfs.objects.repository.opsupport.OpObjectRegistry; //import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; //import io.grpc.Status; @@ -23,8 +46,12 @@ //import java.util.stream.Collectors; //import java.util.stream.Stream; // -//@ApplicationScoped -//public class SyncHandler { +@ApplicationScoped +public class SyncHandler { + @Inject + Transaction curTx; + @Inject + PersistentPeerDataService persistentPeerDataService; // @Inject // JObjectManager jObjectManager; // @Inject @@ -65,143 +92,150 @@ // ); // } // -// public void handleOneUpdate(UUID from, ObjectHeader header) { -// AtomicReference> foundExt = new AtomicReference<>(); + + public RemoteObject handleOneUpdate(PeerId from, RemoteObject current, PMap rcvChangelog) { +// if (!rcv.key().equals(current.key())) { +// Log.error("Received update for different object: " + rcv.key() + " from " + from); +// throw new IllegalArgumentException("Received update for different object: " + rcv.key() + " from " + from); +// } + + var receivedTotalVer = rcvChangelog.values().stream().mapToLong(Long::longValue).sum(); + + if (current.meta().knownRemoteVersions().getOrDefault(from, 0L) > receivedTotalVer) { + Log.error("Received older index update than was known for host: " + from + " " + current.key()); + throw new IllegalStateException(); // FIXME: OutdatedUpdateException + } + + Log.trace("Handling update: " + current.key() + " from " + from + "\n" + "ours: " + current + " \n" + "received: " + rcvChangelog); + + boolean conflict = false; + boolean updatedRemoteVersion = false; + + var newObj = current; + var curKnownRemoteVersion = current.meta().knownRemoteVersions().get(from); + + if (curKnownRemoteVersion == null || !curKnownRemoteVersion.equals(receivedTotalVer)) + updatedRemoteVersion = true; + + if (updatedRemoteVersion) + newObj = current.withMeta(current.meta().withKnownRemoteVersions( + current.meta().knownRemoteVersions().plus(from, receivedTotalVer) + )); + + + boolean hasLower = false; + boolean hasHigher = false; + for (var e : Stream.concat(current.meta().changelog().keySet().stream(), rcvChangelog.keySet().stream()).collect(Collectors.toUnmodifiableSet())) { + if (rcvChangelog.getOrDefault(e, 0L) < current.meta().changelog().getOrDefault(e, 0L)) + hasLower = true; + if (rcvChangelog.getOrDefault(e, 0L) > current.meta().changelog().getOrDefault(e, 0L)) + hasHigher = true; + } + + if (hasLower && hasHigher) { + Log.info("Conflict on update (inconsistent version): " + current.key() + " from " + from); +// Log. // -// boolean conflict = jObjectTxManager.executeTx(() -> { -// JObject found = jObjectManager.getOrPut(header.getName(), JObjectData.class, Optional.empty()); -// foundExt.set(found); -// -// var receivedTotalVer = header.getChangelog().getEntriesList() -// .stream().map(ObjectChangelogEntry::getVersion).reduce(0L, Long::sum); -// -// var receivedMap = new HashMap(); -// for (var e : header.getChangelog().getEntriesList()) { -// receivedMap.put(UUID.fromString(e.getHost()), e.getVersion()); -// } -// -// return found.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (md, data, bump, invalidate) -> { -// if (md.getRemoteCopies().getOrDefault(from, 0L) > receivedTotalVer) { -// Log.error("Received older index update than was known for host: " -// + from + " " + header.getName()); -// throw new OutdatedUpdateException(); -// } -// -// String rcv = ""; -// for (var e : header.getChangelog().getEntriesList()) { -// rcv += e.getHost() + ": " + e.getVersion() + "; "; -// } -// String ours = ""; -// for (var e : md.getChangelog().entrySet()) { -// ours += e.getKey() + ": " + e.getValue() + "; "; -// } -// Log.trace("Handling update: " + header.getName() + " from " + from + "\n" + "ours: " + ours + " \n" + "received: " + rcv); -// -// boolean updatedRemoteVersion = false; -// -// var oldRemoteVer = md.getRemoteCopies().put(from, receivedTotalVer); -// if (oldRemoteVer == null || !oldRemoteVer.equals(receivedTotalVer)) updatedRemoteVersion = true; -// -// boolean hasLower = false; -// boolean hasHigher = false; -// for (var e : Stream.concat(md.getChangelog().keySet().stream(), receivedMap.keySet().stream()).collect(Collectors.toSet())) { -// if (receivedMap.getOrDefault(e, 0L) < md.getChangelog().getOrDefault(e, 0L)) -// hasLower = true; -// if (receivedMap.getOrDefault(e, 0L) > md.getChangelog().getOrDefault(e, 0L)) -// hasHigher = true; -// } -// -// if (hasLower && hasHigher) { -// Log.info("Conflict on update (inconsistent version): " + header.getName() + " from " + from); -// return true; -// } -// -// if (hasLower) { -// Log.info("Received older index update than known: " -// + from + " " + header.getName()); -// throw new OutdatedUpdateException(); -// } -// -// if (hasHigher) { -// invalidate.apply(); -// md.getChangelog().clear(); -// md.getChangelog().putAll(receivedMap); -// md.getChangelog().putIfAbsent(persistentPeerDataService.getSelfUuid(), 0L); -// if (header.hasPushedData()) -// found.externalResolution(dataProtoSerializer.deserialize(header.getPushedData())); -// return false; -// } else if (data == null && header.hasPushedData()) { -// found.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); -// if (found.getData() == null) -// found.externalResolution(dataProtoSerializer.deserialize(header.getPushedData())); -// } -// -// assert Objects.equals(receivedTotalVer, md.getOurVersion()); -// -// if (!updatedRemoteVersion) -// Log.debug("No action on update: " + header.getName() + " from " + from); -// -// return false; -// }); -// }); -// -// // TODO: Is the lock gap here ok? -// if (conflict) { -// Log.info("Trying conflict resolution: " + header.getName() + " from " + from); +// info("Trying conflict resolution: " + header.getName() + " from " + from); // var found = foundExt.get(); // // JObjectData theirsData; // ObjectHeader theirsHeader; -// if (header.hasPushedData()) { +// if (header. hasPushedData()) { // theirsHeader = header; -// theirsData = dataProtoSerializer.deserialize(header.getPushedData()); +// theirsData = dataProtoSerializer. +// +// deserialize(header.getPushedData()); // } else { // var got = remoteObjectServiceClient.getSpecificObject(from, header.getName()); -// theirsData = dataProtoSerializer.deserialize(got.getRight()); -// theirsHeader = got.getLeft(); +// theirsData = dataProtoSerializer. +// +// deserialize(got.getRight()); +// theirsHeader = got. +// +// getLeft(); // } // -// jObjectTxManager.executeTx(() -> { -// var resolverClass = found.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { -// if (d == null) -// throw new StatusRuntimeExceptionNoStacktrace(Status.UNAVAILABLE.withDescription("No local data when conflict " + header.getName())); -// return d.getConflictResolver(); -// }); -// var resolver = conflictResolvers.select(resolverClass); -// resolver.get().resolve(from, theirsHeader, theirsData, found); -// }); -// Log.info("Resolved conflict for " + from + " " + header.getName()); +// jObjectTxManager. +// +// executeTx(() -> { +// var resolverClass = found.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { +// if (d == null) +// throw new StatusRuntimeExceptionNoStacktrace(Status.UNAVAILABLE.withDescription("No local data when conflict " + header.getName())); +// return d.getConflictResolver(); +// }); +// var resolver = conflictResolvers.select(resolverClass); +// resolver. +// +// get(). +// +// resolve(from, theirsHeader, theirsData, found); +// }); +// Log. info("Resolved conflict for " + from + " " + header.getName()); + throw new NotImplementedException(); + } else if (hasLower) { + Log.info("Received older index update than known: " + from + " " + current.key()); +// throw new OutdatedUpdateException(); + throw new NotImplementedException(); + } else if (hasHigher) { + var newChangelog = rcvChangelog.containsKey(persistentPeerDataService.getSelfUuid()) ? + rcvChangelog : rcvChangelog.plus(persistentPeerDataService.getSelfUuid(), 0L); + + newObj = newObj.withData(null).withMeta(newObj.meta().withChangelog(newChangelog)); +// if (header.hasPushedData()) +// found.externalResolution(dataProtoSerializer.deserialize(header.getPushedData())); + } +// else if (data == null && header.hasPushedData()) { +// found.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); +// if (found.getData() == null) +// found.externalResolution(dataProtoSerializer.deserialize(header.getPushedData())); // } -// -// } -// -// public IndexUpdateReply handleRemoteUpdate(IndexUpdatePush request) { -// // TODO: Dedup -// try { -// handleOneUpdate(UUID.fromString(request.getSelfUuid()), request.getHeader()); + +// assert Objects.equals(receivedTotalVer, md.getOurVersion()); + + if (!updatedRemoteVersion) + Log.debug("No action on update: " + current.meta().key() + " from " + from); + + return newObj; + } + + public RemoteObject handleRemoteUpdate(PeerId from, JObjectKey key, RemoteObject current, PMap rcv) { + // TODO: Dedup + try { + if (current == null) { + var obj = new RemoteObject<>(key, rcv); + curTx.put(obj); + return (RemoteObject) obj; + } + + var newObj = handleOneUpdate(from, current, rcv); + if (newObj != current) { + curTx.put(newObj); + } + return newObj; // } catch (OutdatedUpdateException ignored) { // Log.warn("Outdated update of " + request.getHeader().getName() + " from " + request.getSelfUuid()); // invalidationQueueService.pushInvalidationToOne(UUID.fromString(request.getSelfUuid()), request.getHeader().getName()); -// } catch (Exception ex) { -// Log.info("Error when handling update from " + request.getSelfUuid() + " of " + request.getHeader().getName(), ex); -// throw ex; -// } -// + } catch (Exception ex) { + Log.info("Error when handling update from " + from + " of " + current.meta().key(), ex); + throw ex; + } + // return IndexUpdateReply.getDefaultInstance(); -// } -// -// protected static class OutdatedUpdateException extends RuntimeException { -// OutdatedUpdateException() { -// super(); -// } -// -// OutdatedUpdateException(String message) { -// super(message); -// } -// -// @Override -// public synchronized Throwable fillInStackTrace() { -// return this; -// } -// } -//} \ No newline at end of file + } + + protected static class OutdatedUpdateException extends RuntimeException { + OutdatedUpdateException() { + super(); + } + + OutdatedUpdateException(String message) { + super(message); + } + + @Override + public synchronized Throwable fillInStackTrace() { + return this; + } + } +} \ No newline at end of file diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java new file mode 100644 index 00000000..1df4136a --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java @@ -0,0 +1,17 @@ +package com.usatiuk.dhfs.objects.repository.invalidation; + +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; +import org.apache.commons.collections4.MultiValuedMap; +import org.apache.commons.collections4.multimap.HashSetValuedHashMap; + +import java.io.Serial; +import java.io.Serializable; +import java.util.UUID; + +public class DeferredInvalidationQueueData implements Serializable { + @Serial + private static final long serialVersionUID = 1L; + + public final MultiValuedMap deferredInvalidations = new HashSetValuedHashMap<>(); +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java new file mode 100644 index 00000000..8d2a30b9 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java @@ -0,0 +1,85 @@ +package com.usatiuk.dhfs.objects.repository.invalidation; + +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.repository.PeerManager; +import com.usatiuk.dhfs.utils.SerializationHelper; +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import io.quarkus.scheduler.Scheduled; +import io.smallrye.common.annotation.Blocking; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import org.apache.commons.lang3.SerializationUtils; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.UUID; + +@ApplicationScoped +public class DeferredInvalidationQueueService { + private static final String dataFileName = "invqueue"; + @Inject + PeerManager remoteHostManager; + @Inject + InvalidationQueueService invalidationQueueService; + @ConfigProperty(name = "dhfs.objects.persistence.files.root") + String dataRoot; + private DeferredInvalidationQueueData _persistentData = new DeferredInvalidationQueueData(); + + void init(@Observes @Priority(290) StartupEvent event) throws IOException { + Paths.get(dataRoot).toFile().mkdirs(); + Log.info("Initializing with root " + dataRoot); + if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) { + Log.info("Reading invalidation queue"); + _persistentData = SerializationHelper.deserialize(Files.readAllBytes(Paths.get(dataRoot).resolve(dataFileName))); + } +// remoteHostManager.registerConnectEventListener(this::returnForHost); + } + + void shutdown(@Observes @Priority(300) ShutdownEvent event) throws IOException { + Log.info("Saving deferred invalidations"); + writeData(); + Log.info("Saved deferred invalidations"); + } + + private void writeData() { + try { + Files.write(Paths.get(dataRoot).resolve(dataFileName), SerializationUtils.serialize(_persistentData)); + } catch (IOException iex) { + Log.error("Error writing deferred invalidations data", iex); + throw new RuntimeException(iex); + } + } + + // FIXME: + @Scheduled(every = "15s", concurrentExecution = Scheduled.ConcurrentExecution.SKIP) + @Blocking + void periodicReturn() { + for (var reachable : remoteHostManager.getAvailableHosts()) + returnForHost(reachable); + } + + void returnForHost(PeerId host) { + synchronized (this) { + var col = _persistentData.deferredInvalidations.get(host); + for (var s : col) { + Log.trace("Un-deferred invalidation to " + host + " of " + s); + invalidationQueueService.pushDeferredInvalidations(host, s); + } + col.clear(); + } + } + + void defer(PeerId host, JObjectKey object) { + synchronized (this) { + Log.trace("Deferred invalidation to " + host + " of " + object); + _persistentData.deferredInvalidations.put(host, object); + } + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOp.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOp.java new file mode 100644 index 00000000..e2162139 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOp.java @@ -0,0 +1,12 @@ +package com.usatiuk.dhfs.objects.repository.invalidation; + +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.RemoteObject; +import org.pcollections.PMap; + +public record IndexUpdateOp(JObjectKey key, PMap changelog) implements Op { + public IndexUpdateOp(RemoteObject object) { + this(object.key(), object.meta().changelog()); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOpSerializer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOpSerializer.java new file mode 100644 index 00000000..bb614c1a --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOpSerializer.java @@ -0,0 +1,36 @@ +package com.usatiuk.dhfs.objects.repository.invalidation; + +import com.usatiuk.autoprotomap.runtime.ProtoSerializer; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.repository.IndexUpdateOpP; +import jakarta.enterprise.context.ApplicationScoped; +import org.pcollections.HashTreePMap; +import org.pcollections.PMap; + +@ApplicationScoped +public class IndexUpdateOpSerializer implements ProtoSerializer { + + @Override + public IndexUpdateOp deserialize(IndexUpdateOpP message) { + PMap map = HashTreePMap.empty(); + for (var entry : message.getHeader().getChangelog().getEntriesList()) { + map = map.plus(PeerId.of(entry.getHost()), entry.getVersion()); + } + return new IndexUpdateOp(JObjectKey.of(message.getHeader().getName()), map); + } + + @Override + public IndexUpdateOpP serialize(IndexUpdateOp object) { + var builder = IndexUpdateOpP.newBuilder(); + var headerBuilder = builder.getHeaderBuilder(); + headerBuilder.setName(object.key().name()); + var changelogBuilder = headerBuilder.getChangelogBuilder(); + for (var entry : object.changelog().entrySet()) { + var entryBuilder = changelogBuilder.addEntriesBuilder(); + entryBuilder.setHost(entry.getKey().id().toString()); + entryBuilder.setVersion(entry.getValue()); + } + return builder.build(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java new file mode 100644 index 00000000..b2c3c024 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java @@ -0,0 +1,190 @@ +package com.usatiuk.dhfs.objects.repository.invalidation; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.TransactionManager; +import com.usatiuk.dhfs.objects.repository.PeerManager; +import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; +import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfoService; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue; +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import io.vertx.core.impl.ConcurrentHashSet; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import org.apache.commons.lang3.concurrent.BasicThreadFactory; +import org.apache.commons.lang3.tuple.Pair; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +@ApplicationScoped +public class InvalidationQueueService { + private final HashSetDelayedBlockingQueue> _queue; + private final AtomicReference> _toAllQueue = new AtomicReference<>(new ConcurrentHashSet<>()); + @Inject + PeerManager remoteHostManager; + @Inject + RemoteObjectServiceClient remoteObjectServiceClient; + @Inject + TransactionManager txm; + @Inject + Transaction curTx; + @Inject + PersistentPeerDataService persistentPeerDataService; + @Inject + DeferredInvalidationQueueService deferredInvalidationQueueService; + @Inject + PeerInfoService peerInfoService; + @Inject + OpPusher opPusher; + @ConfigProperty(name = "dhfs.objects.invalidation.threads") + int threads; + private ExecutorService _executor; + private volatile boolean _shutdown = false; + + public InvalidationQueueService(@ConfigProperty(name = "dhfs.objects.invalidation.delay") int delay) { + _queue = new HashSetDelayedBlockingQueue<>(delay); + } + + void init(@Observes @Priority(300) StartupEvent event) throws InterruptedException { + BasicThreadFactory factory = new BasicThreadFactory.Builder() + .namingPattern("invalidation-%d") + .build(); + + _executor = Executors.newFixedThreadPool(threads, factory); + + for (int i = 0; i < threads; i++) { + _executor.submit(this::sender); + } + } + + void shutdown(@Observes @Priority(10) ShutdownEvent event) throws InterruptedException { + _shutdown = true; + _executor.shutdownNow(); + if (!_executor.awaitTermination(30, TimeUnit.SECONDS)) { + Log.error("Failed to shut down invalidation sender thread"); + } + var data = _queue.close(); + Log.info("Will defer " + data.size() + " invalidations on shutdown"); + for (var e : data) + deferredInvalidationQueueService.defer(e.getLeft(), e.getRight()); + } + + private void sender() { + while (!_shutdown) { + try { + try { + if (!_queue.hasImmediate()) { + ConcurrentHashSet toAllQueue; + + while (true) { + toAllQueue = _toAllQueue.get(); + if (toAllQueue != null) { + if (_toAllQueue.compareAndSet(toAllQueue, null)) + break; + } else { + break; + } + } + + if (toAllQueue != null) { + var hostInfo = remoteHostManager.getHostStateSnapshot(); + for (var o : toAllQueue) { + for (var h : hostInfo.available()) + _queue.add(Pair.of(h, o)); + for (var u : hostInfo.unavailable()) + deferredInvalidationQueueService.defer(u, o); + } + } + } + + var data = _queue.getAllWait(100, _queue.getDelay()); // TODO: config? + if (data.isEmpty()) continue; + String stats = "Sent invalidation: "; + long success = 0; + + for (var e : data) { + if (peerInfoService.getPeerInfo(e.getLeft()).isEmpty()) continue; + + if (!remoteHostManager.isReachable(e.getLeft())) { + deferredInvalidationQueueService.defer(e.getLeft(), e.getRight()); + continue; + } + + try { + opPusher.doPush(e.getLeft(), e.getRight()); + success++; + } catch (Exception ex) { + Log.info("Failed to send invalidation to " + e.getLeft() + ", will retry", ex); + pushInvalidationToOne(e.getLeft(), e.getRight()); + } + if (_shutdown) { + Log.info("Invalidation sender exiting"); + break; + } + } + + stats += success + "/" + data.size() + " "; + Log.info(stats); + } catch (InterruptedException ie) { + throw ie; + } catch (Exception e) { + Log.error("Exception in invalidation sender thread: ", e); + } + } catch (InterruptedException ignored) { + } + } + Log.info("Invalidation sender exiting"); + } + + public void pushInvalidationToAll(JObjectKey key) { +// if (obj.getMeta().isOnlyLocal()) return; + while (true) { + var queue = _toAllQueue.get(); + if (queue == null) { + var nq = new ConcurrentHashSet(); + if (!_toAllQueue.compareAndSet(null, nq)) continue; + queue = nq; + } + + queue.add(key); + + if (_toAllQueue.get() == queue) break; + } + } + + public void pushInvalidationToOne(PeerId host, JObjectKey obj) { +// if (obj.getMeta().isOnlyLocal()) return; + if (remoteHostManager.isReachable(host)) + _queue.add(Pair.of(host, obj)); + else + deferredInvalidationQueueService.defer(host, obj); + } + + public void pushInvalidationToOne(PeerId host, JData obj) { +// if (obj.getMeta().isOnlyLocal()) return; + pushInvalidationToOne(host, obj.key()); + } + +// public void pushInvalidationToAll(String name) { +// pushInvalidationToAll(jObjectManager.get(name).orElseThrow(() -> new IllegalArgumentException("Object " + name + " not found"))); +// } +// +// public void pushInvalidationToOne(PeerId host, JObjectKey name) { +// pushInvalidationToOne(host, jObjectManager.get(name).orElseThrow(() -> new IllegalArgumentException("Object " + name + " not found"))); +// } + + protected void pushDeferredInvalidations(PeerId host, JObjectKey name) { + _queue.add(Pair.of(host, name)); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/JKleppmannTreeOpPTempSerializer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/JKleppmannTreeOpPTempSerializer.java new file mode 100644 index 00000000..13399feb --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/JKleppmannTreeOpPTempSerializer.java @@ -0,0 +1,22 @@ +package com.usatiuk.dhfs.objects.repository.invalidation; + +import com.usatiuk.autoprotomap.runtime.ProtoSerializer; +import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeOpWrapper; +import com.usatiuk.dhfs.objects.repository.JKleppmannTreeOpPTemp; +import com.usatiuk.dhfs.utils.SerializationHelper; +import jakarta.enterprise.context.ApplicationScoped; + +@ApplicationScoped +public class JKleppmannTreeOpPTempSerializer implements ProtoSerializer { + @Override + public JKleppmannTreeOpWrapper deserialize(JKleppmannTreeOpPTemp message) { + return SerializationHelper.deserialize(message.getSerialized().toByteArray()); + } + + @Override + public JKleppmannTreeOpPTemp serialize(JKleppmannTreeOpWrapper object) { + return JKleppmannTreeOpPTemp.newBuilder() + .setSerialized(SerializationHelper.serialize(object)) + .build(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/Op.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/Op.java new file mode 100644 index 00000000..eb5c6029 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/Op.java @@ -0,0 +1,8 @@ +package com.usatiuk.dhfs.objects.repository.invalidation; + +import com.usatiuk.autoprotomap.runtime.ProtoMirror; +import com.usatiuk.dhfs.objects.repository.OpPushPayload; + +@ProtoMirror(OpPushPayload.class) +public interface Op { +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpHandler.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpHandler.java new file mode 100644 index 00000000..6b78661a --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpHandler.java @@ -0,0 +1,27 @@ +package com.usatiuk.dhfs.objects.repository.invalidation; + +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeManager; +import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeOpWrapper; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +@ApplicationScoped +public class OpHandler { + @Inject + PushOpHandler pushOpHandler; + @Inject + Transaction curTx; + @Inject + JKleppmannTreeManager jKleppmannTreeManager; + + public void handleOp(PeerId from, Op op) { + if (op instanceof IndexUpdateOp iu) { + pushOpHandler.handlePush(from, iu); + } else if (op instanceof JKleppmannTreeOpWrapper jk) { + var tree = jKleppmannTreeManager.getTree(jk.treeName()); + tree.acceptExternalOp(from, jk); + } + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java new file mode 100644 index 00000000..f653c265 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java @@ -0,0 +1,52 @@ +package com.usatiuk.dhfs.objects.repository.invalidation; + +import com.usatiuk.dhfs.objects.*; +import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeOpWrapper; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreePersistentData; +import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +import java.util.List; + +@ApplicationScoped +public class OpPusher { + @Inject + Transaction curTx; + @Inject + TransactionManager txm; + @Inject + RemoteTransaction remoteTransaction; + @Inject + RemoteObjectServiceClient remoteObjectServiceClient; + @Inject + InvalidationQueueService invalidationQueueService; + + public void doPush(PeerId op, JObjectKey key) { + Op info = txm.run(() -> { + var obj = curTx.get(JData.class, key).orElse(null); + switch (obj) { + case RemoteObject remote -> { + return new IndexUpdateOp(key, remote.meta().changelog()); + } + case JKleppmannTreePersistentData pd -> { + var ret = new JKleppmannTreeOpWrapper(key, pd.queues().get(op).firstEntry().getValue()); + var newPd = pd.withQueues(pd.queues().plus(op, pd.queues().get(op).minus(ret.op().timestamp()))); + curTx.put(newPd); + if (!newPd.queues().get(op).isEmpty()) + invalidationQueueService.pushInvalidationToOne(op, pd.key()); + return ret; + } + case null, + default -> { + return null; + } + } + }); + if (info == null) { + return; + } + remoteObjectServiceClient.pushOps(op, List.of(info)); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/PushOpHandler.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/PushOpHandler.java new file mode 100644 index 00000000..8ee79b77 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/PushOpHandler.java @@ -0,0 +1,25 @@ +package com.usatiuk.dhfs.objects.repository.invalidation; + +import com.usatiuk.dhfs.objects.JDataRemote; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.RemoteTransaction; +import com.usatiuk.dhfs.objects.repository.SyncHandler; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +@ApplicationScoped +public class PushOpHandler { + @Inject + Transaction curTx; + @Inject + SyncHandler syncHandler; + @Inject + RemoteTransaction remoteTransaction; + + public void handlePush(PeerId peer, IndexUpdateOp obj) { + syncHandler.handleRemoteUpdate(peer, obj.key(), + remoteTransaction.get(JDataRemote.class, obj.key()).orElse(null), + obj.changelog()); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java index caa45d3a..cc9d4586 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java @@ -1,30 +1,19 @@ package com.usatiuk.dhfs.objects.repository.peersync; -import com.usatiuk.dhfs.objects.JDataRefcounted; +import com.usatiuk.autoprotomap.runtime.ProtoMirror; import com.usatiuk.dhfs.objects.JDataRemote; import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.persistence.ChunkDataP; +import com.usatiuk.dhfs.objects.persistence.PeerInfoP; import com.usatiuk.dhfs.objects.repository.CertificateTools; -import org.pcollections.HashTreePSet; -import org.pcollections.PCollection; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; -public record PeerInfo(JObjectKey key, PCollection refsFrom, boolean frozen, PeerId id, - byte[] cert) implements JDataRefcounted, JDataRemote { +public record PeerInfo(JObjectKey key, PeerId id, byte[] cert) implements JDataRemote { public PeerInfo(PeerId id, byte[] cert) { - this(id.toJObjectKey(), HashTreePSet.empty(), false, id, cert); - } - - @Override - public JDataRefcounted withRefsFrom(PCollection refs) { - return new PeerInfo(key, refs, frozen, id, cert); - } - - @Override - public JDataRefcounted withFrozen(boolean frozen) { - return new PeerInfo(key, refsFrom, frozen, id, cert); + this(id.toJObjectKey(), id, cert); } public X509Certificate parsedCert() { diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoSerializer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoSerializer.java new file mode 100644 index 00000000..5f00c155 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoSerializer.java @@ -0,0 +1,24 @@ +package com.usatiuk.dhfs.objects.repository.peersync; + +import com.google.protobuf.ByteString; +import com.usatiuk.autoprotomap.runtime.ProtoSerializer; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.persistence.PeerInfoP; +import jakarta.enterprise.context.ApplicationScoped; + +@ApplicationScoped +public class PeerInfoSerializer implements ProtoSerializer { + + @Override + public PeerInfo deserialize(PeerInfoP message) { + return new PeerInfo(PeerId.of(message.getUuid()), message.getCert().toByteArray()); + } + + @Override + public PeerInfoP serialize(PeerInfo object) { + return PeerInfoP.newBuilder() + .setUuid(object.key().toString()) + .setCert(ByteString.copyFrom(object.cert())) + .build(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoService.java index c83fe311..be8d8a2a 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoService.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoService.java @@ -2,6 +2,7 @@ package com.usatiuk.dhfs.objects.repository.peersync; import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.RemoteTransaction; import com.usatiuk.dhfs.objects.TransactionManager; import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeManager; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; @@ -24,6 +25,8 @@ public class PeerInfoService { JKleppmannTreeManager jKleppmannTreeManager; @Inject PersistentPeerDataService persistentPeerDataService; + @Inject + RemoteTransaction remoteTx; private JKleppmannTreeManager.JKleppmannTree getTree() { return jKleppmannTreeManager.getTree(JObjectKey.of("peers")); @@ -37,7 +40,7 @@ public class PeerInfoService { } return curTx.get(JKleppmannTreeNode.class, gotKey).flatMap(node -> { var meta = (JKleppmannTreeNodeMetaPeer) node.meta(); - return curTx.get(PeerInfo.class, meta.getPeerId()); + return remoteTx.getData(PeerInfo.class, meta.getPeerId()); }); }); } @@ -69,7 +72,7 @@ public class PeerInfoService { jObjectTxManager.run(() -> { var parent = getTree().traverse(List.of()); var newPeerInfo = new PeerInfo(id, cert); - curTx.put(newPeerInfo); + remoteTx.put(newPeerInfo); getTree().move(parent, new JKleppmannTreeNodeMetaPeer(newPeerInfo.id()), getTree().getNewNodeId()); }); } diff --git a/dhfs-parent/server/src/main/proto/dhfs_objects_serial.proto b/dhfs-parent/server/src/main/proto/dhfs_objects_serial.proto index 0f93fdd5..3acb21a3 100644 --- a/dhfs-parent/server/src/main/proto/dhfs_objects_serial.proto +++ b/dhfs-parent/server/src/main/proto/dhfs_objects_serial.proto @@ -33,10 +33,13 @@ message FilePChunksEntry { } message FileP { - FsNodeP fsNode = 1; - repeated FilePChunksEntry chunks = 2; - bool symlink = 3; - int64 size = 4; + string uuid = 1; + int64 mode = 2; + int64 ctime = 3; + int64 mtime = 4; + repeated FilePChunksEntry chunks = 5; + bool symlink = 6; + int64 size = 7; } message DirectoryP { @@ -53,7 +56,7 @@ message PeerDirectoryP { repeated string peers = 1; } -message PersistentPeerInfoP { +message PeerInfoP { string uuid = 1; bytes cert = 2; } @@ -141,15 +144,26 @@ message PeerDirectoryLocalP { repeated string initialObjSyncDonePeers = 2; } +message RemoteObjectP { + oneof obj { + FileP file = 2; + ChunkDataP chunkData = 5; + // PeerDirectoryP peerDirectory = 6; + PeerInfoP peerInfo = 7; + // JKleppmannTreeNodeP jKleppmannTreeNode = 8; + // JKleppmannTreePersistentDataP jKleppmannTreePersistentData = 9; + // PeerDirectoryLocalP peerDirectoryLocal = 10; + } +} + message JObjectDataP { oneof obj { FileP file = 2; - DirectoryP directory = 3; ChunkDataP chunkData = 5; - PeerDirectoryP peerDirectory = 6; - PersistentPeerInfoP persistentPeerInfo = 7; - JKleppmannTreeNodeP jKleppmannTreeNode = 8; - JKleppmannTreePersistentDataP jKleppmannTreePersistentData = 9; - PeerDirectoryLocalP peerDirectoryLocal = 10; +// PeerDirectoryP peerDirectory = 6; + PeerInfoP peerInfo = 7; +// JKleppmannTreeNodeP jKleppmannTreeNode = 8; +// JKleppmannTreePersistentDataP jKleppmannTreePersistentData = 9; +// PeerDirectoryLocalP peerDirectoryLocal = 10; } } \ No newline at end of file diff --git a/dhfs-parent/server/src/main/proto/dhfs_objects_sync.proto b/dhfs-parent/server/src/main/proto/dhfs_objects_sync.proto index 8ef94946..5820aefb 100644 --- a/dhfs-parent/server/src/main/proto/dhfs_objects_sync.proto +++ b/dhfs-parent/server/src/main/proto/dhfs_objects_sync.proto @@ -11,8 +11,7 @@ package dhfs.objects.sync; service DhfsObjectSyncGrpc { rpc GetObject (GetObjectRequest) returns (GetObjectReply) {} rpc CanDelete (CanDeleteRequest) returns (CanDeleteReply) {} - rpc IndexUpdate (IndexUpdatePush) returns (IndexUpdateReply) {} - rpc OpPush (OpPushMsg) returns (OpPushReply) {} + rpc OpPush (OpPushRequest) returns (OpPushReply) {} rpc Ping (PingRequest) returns (PingReply) {} } @@ -37,63 +36,53 @@ message ObjectChangelog { message ObjectHeader { string name = 2; ObjectChangelog changelog = 5; - optional dhfs.objects.persistence.JObjectDataP pushedData = 6; -} - -message ApiObject { - ObjectHeader header = 1; - dhfs.objects.persistence.JObjectDataP content = 2; + optional dhfs.objects.persistence.RemoteObjectP pushedData = 6; } message GetObjectRequest { - string selfUuid = 10; - string name = 2; } message GetObjectReply { - string selfUuid = 10; - - ApiObject object = 1; + ObjectHeader header = 1; + dhfs.objects.persistence.RemoteObjectP content = 2; } message CanDeleteRequest { - string selfUuid = 10; - string name = 2; repeated string ourReferrers = 3; } message CanDeleteReply { - string selfUuid = 10; string objName = 1; bool deletionCandidate = 2; repeated string referrers = 3; } -message IndexUpdatePush { - string selfUuid = 10; - +message IndexUpdateOpP { ObjectHeader header = 1; } message IndexUpdateReply {} message JKleppmannTreePeriodicPushOpP { - string fromUuid = 1; int64 timestamp = 2; } +message JKleppmannTreeOpPTemp { + bytes serialized = 2; +} + message OpPushPayload { oneof payload { - dhfs.objects.persistence.JKleppmannTreeOpP jKleppmannTreeOpWrapper = 1; - JKleppmannTreePeriodicPushOpP jKleppmannTreePeriodicPushOp = 2; + JKleppmannTreeOpPTemp jKleppmannTreeOpWrapper = 1; + // dhfs.objects.persistence.JKleppmannTreeOpP jKleppmannTreeOpWrapper = 1; + // JKleppmannTreePeriodicPushOpP jKleppmannTreePeriodicPushOp = 2; + IndexUpdateOpP indexUpdateOp = 3; } } -message OpPushMsg { - string selfUuid = 10; - string queueId = 1; +message OpPushRequest { repeated OpPushPayload msg = 2; } diff --git a/dhfs-parent/server/src/main/resources/application.properties b/dhfs-parent/server/src/main/resources/application.properties index bbf3bab4..220ba49b 100644 --- a/dhfs-parent/server/src/main/resources/application.properties +++ b/dhfs-parent/server/src/main/resources/application.properties @@ -4,7 +4,7 @@ dhfs.objects.peerdiscovery.interval=5s dhfs.objects.peerdiscovery.broadcast=true dhfs.objects.sync.timeout=30 dhfs.objects.sync.ping.timeout=5 -dhfs.objects.invalidation.threads=4 +dhfs.objects.invalidation.threads=1 dhfs.objects.invalidation.delay=1000 dhfs.objects.reconnect_interval=5s dhfs.objects.write_log=false diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java index 381fca46..6fa33055 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java @@ -4,6 +4,7 @@ import com.usatiuk.dhfs.TempDataProfile; import com.usatiuk.dhfs.files.objects.ChunkData; import com.usatiuk.dhfs.files.objects.File; import com.usatiuk.dhfs.files.service.DhfsFileService; +import com.usatiuk.dhfs.objects.RemoteTransaction; import com.usatiuk.dhfs.objects.TransactionManager; import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.kleppmanntree.AlreadyExistsException; @@ -45,6 +46,8 @@ public class DhfsFileServiceSimpleTestImpl { Transaction curTx; @Inject TransactionManager jObjectTxManager; + @Inject + RemoteTransaction remoteTx; // @Test // void readTest() { @@ -207,9 +210,9 @@ public class DhfsFileServiceSimpleTestImpl { jObjectTxManager.run(() -> { - var oldfile = curTx.get(File.class, ret2.get()).orElseThrow(IllegalStateException::new); + var oldfile = remoteTx.getData(File.class, ret2.get()).orElseThrow(IllegalStateException::new); var chunk = oldfile.chunks().get(0L); - var chunkObj = curTx.get(ChunkData.class, chunk).orElseThrow(IllegalStateException::new); + var chunkObj = remoteTx.getData(ChunkData.class, chunk).orElseThrow(IllegalStateException::new); }); Assertions.assertTrue(fileService.rename("/moveOverTest1", "/moveOverTest2")); From 9bd4d1914766307d741d4b85b2c123711962c9d6 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Mon, 3 Feb 2025 22:43:58 +0100 Subject: [PATCH 055/105] barely working file sync --- .../com/usatiuk/dhfs/objects/JObjectKey.java | 5 ++ .../dhfs/objects/ConflictResolver.java | 5 ++ .../usatiuk/dhfs/objects/DeleterTxHook.java | 9 ++++ .../dhfs/objects/RemoteTransaction.java | 46 +++++++++++-------- .../dhfs/objects/repository/SyncHandler.java | 7 ++- 5 files changed, 48 insertions(+), 24 deletions(-) create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/ConflictResolver.java diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java index 2da79825..67e368ac 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java @@ -11,4 +11,9 @@ public record JObjectKey(String name) implements Serializable, Comparable ours, RemoteObject theirs); +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java index d91911be..5fae3165 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java @@ -16,6 +16,9 @@ public class DeleterTxHook implements PreCommitTxHook { @Override public void onChange(JObjectKey key, JData old, JData cur) { + if (cur instanceof RemoteObject) { + return; // FIXME: + } if (!(cur instanceof JDataRefcounted refCur)) { return; } @@ -28,6 +31,9 @@ public class DeleterTxHook implements PreCommitTxHook { @Override public void onCreate(JObjectKey key, JData cur) { + if (cur instanceof RemoteObject) { + return; + } if (!(cur instanceof JDataRefcounted refCur)) { return; } @@ -40,6 +46,9 @@ public class DeleterTxHook implements PreCommitTxHook { @Override public void onDelete(JObjectKey key, JData cur) { + if (cur instanceof RemoteObject) { + return; + } if (!(cur instanceof JDataRefcounted refCur)) { return; } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java index e7187193..81e53222 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java @@ -5,6 +5,7 @@ import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; import com.usatiuk.dhfs.objects.repository.SyncHandler; import com.usatiuk.dhfs.objects.transaction.LockingStrategy; import com.usatiuk.dhfs.objects.transaction.Transaction; +import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; import org.apache.commons.lang3.mutable.MutableObject; @@ -26,39 +27,45 @@ public class RemoteTransaction { return curTx.getId(); } - private RemoteObject tryDownloadRemote(RemoteObject obj) { + private Optional> tryDownloadRemote(RemoteObject obj) { MutableObject> success = new MutableObject<>(null); - remoteObjectServiceClient.getObject(obj.key(), rcv -> { - if (!obj.meta().knownType().isInstance(rcv.getRight().data())) - throw new IllegalStateException("Object type mismatch: " + obj.meta().knownType() + " vs " + rcv.getRight().data().getClass()); + try { + remoteObjectServiceClient.getObject(obj.key(), rcv -> { + if (!obj.meta().knownType().isInstance(rcv.getRight().data())) + throw new IllegalStateException("Object type mismatch: " + obj.meta().knownType() + " vs " + rcv.getRight().data().getClass()); - if (!rcv.getRight().changelog().equals(obj.meta().changelog())) { - var updated = syncHandler.handleRemoteUpdate(rcv.getLeft(), obj.key(), obj, rcv.getRight().changelog()); - if (!rcv.getRight().changelog().equals(updated.meta().changelog())) - throw new IllegalStateException("Changelog mismatch, update failed?: " + rcv.getRight().changelog() + " vs " + updated.meta().changelog()); - success.setValue(updated.withData((T) rcv.getRight().data())); - } else { - success.setValue(obj.withData((T) rcv.getRight().data())); - } - return true; - }); + if (!rcv.getRight().changelog().equals(obj.meta().changelog())) { + var updated = syncHandler.handleRemoteUpdate(rcv.getLeft(), obj.key(), obj, rcv.getRight().changelog()); + if (!rcv.getRight().changelog().equals(updated.meta().changelog())) + throw new IllegalStateException("Changelog mismatch, update failed?: " + rcv.getRight().changelog() + " vs " + updated.meta().changelog()); + success.setValue(updated.withData((T) rcv.getRight().data())); + } else { + success.setValue(obj.withData((T) rcv.getRight().data())); + } + return true; + }); + } catch (Exception e) { + Log.error("Failed to download object " + obj.key(), e); + return Optional.empty(); + } curTx.put(success.getValue()); - return success.getValue(); + return Optional.of(success.getValue()); } @SuppressWarnings("unchecked") public Optional> get(Class type, JObjectKey key, LockingStrategy strategy) { return curTx.get(RemoteObject.class, key, strategy) - .map(obj -> { + .flatMap(obj -> { if (obj.data() != null && !type.isInstance(obj.data())) throw new IllegalStateException("Object (real) type mismatch: " + obj.data().getClass() + " vs " + type); - if (!type.isAssignableFrom(obj.meta().knownType())) - throw new IllegalStateException("Object (meta) type mismatch: " + obj.meta().knownType() + " vs " + type); +// FIXME: +// if (!type.isAssignableFrom(obj.meta().knownType())) +// throw new IllegalStateException("Object (meta) type mismatch: " + obj.meta().knownType() + " vs " + type); if (obj.data() != null) - return obj; + return Optional.of(obj); else return tryDownloadRemote(obj); }); @@ -72,7 +79,6 @@ public class RemoteTransaction { return get(type, key, strategy).map(RemoteObject::data); } - public void put(RemoteObject obj) { curTx.put(obj); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java index 4e7f883f..d8f9516f 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java @@ -8,7 +8,6 @@ import com.usatiuk.dhfs.objects.transaction.Transaction; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; -import org.apache.commons.lang3.NotImplementedException; import org.pcollections.PMap; import java.util.stream.Collectors; @@ -172,11 +171,11 @@ public class SyncHandler { // resolve(from, theirsHeader, theirsData, found); // }); // Log. info("Resolved conflict for " + from + " " + header.getName()); - throw new NotImplementedException(); +// throw new NotImplementedException(); } else if (hasLower) { Log.info("Received older index update than known: " + from + " " + current.key()); // throw new OutdatedUpdateException(); - throw new NotImplementedException(); +// throw new NotImplementedException(); } else if (hasHigher) { var newChangelog = rcvChangelog.containsKey(persistentPeerDataService.getSelfUuid()) ? rcvChangelog : rcvChangelog.plus(persistentPeerDataService.getSelfUuid(), 0L); @@ -205,7 +204,7 @@ public class SyncHandler { if (current == null) { var obj = new RemoteObject<>(key, rcv); curTx.put(obj); - return (RemoteObject) obj; + current = (RemoteObject) obj; // Will update known remote version too } var newObj = handleOneUpdate(from, current, rcv); From d5714629ba86c62a39875cbc780694db45832153 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Mon, 3 Feb 2025 23:07:17 +0100 Subject: [PATCH 056/105] fix exceptions in hooks hanging --- .../com/usatiuk/dhfs/objects/JObjectManager.java | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 57b6fd39..c94abdb5 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -128,7 +128,7 @@ public class JObjectManager { // TODO: check deletions, inserts try { Collection> drained; - { + try { boolean somethingChanged; do { somethingChanged = false; @@ -163,12 +163,13 @@ public class JObjectManager { } current.putAll(currentIteration); } while (somethingChanged); - } - reads = tx.reads(); - for (var read : reads.entrySet()) { - addDependency.accept(read.getKey()); - if (read.getValue() instanceof TransactionObjectLocked locked) { - toUnlock.add(locked.lock); + } finally { + reads = tx.reads(); + for (var read : reads.entrySet()) { + addDependency.accept(read.getKey()); + if (read.getValue() instanceof TransactionObjectLocked locked) { + toUnlock.add(locked.lock); + } } } From 7ad26dc0ef26a6378c0803e84457c40200a4c65f Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Fri, 14 Feb 2025 15:40:34 +0100 Subject: [PATCH 057/105] do not forget the transaction id --- .../com/usatiuk/dhfs/objects/JDataDummy.java | 28 ++++ .../usatiuk/dhfs/objects/JObjectManager.java | 91 +++++++----- .../dhfs/objects/TransactionManager.java | 32 ++++- .../usatiuk/dhfs/objects/TxWritebackImpl.java | 44 +++--- .../CachingObjectPersistentStore.java | 10 +- .../FileObjectPersistentStore.java | 129 ++++++++---------- .../MemoryObjectPersistentStore.java | 14 +- .../persistence/ObjectPersistentStore.java | 4 +- .../SerializingObjectPersistentStore.java | 13 +- .../dhfs/objects/persistence/TxManifest.java | 10 -- .../objects/persistence/TxManifestObj.java | 12 ++ .../objects/persistence/TxManifestRaw.java | 13 ++ .../com/usatiuk/dhfs/objects/ObjectsTest.java | 2 + .../objects/repository/CertificateTools.java | 58 ++++---- .../repository/PersistentPeerDataService.java | 5 +- .../objects/repository/peersync/PeerInfo.java | 10 +- 16 files changed, 265 insertions(+), 210 deletions(-) create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataDummy.java delete mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifestObj.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifestRaw.java diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataDummy.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataDummy.java new file mode 100644 index 00000000..cbc3dc29 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataDummy.java @@ -0,0 +1,28 @@ +package com.usatiuk.dhfs.objects; + +public class JDataDummy implements JData { + public static final JObjectKey TX_ID_OBJ_NAME = JObjectKey.of("tx_id"); + private static final JDataDummy INSTANCE = new JDataDummy(); + + public static JDataDummy getInstance() { + return INSTANCE; + } + + @Override + public JObjectKey key() { + return TX_ID_OBJ_NAME; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + return true; + } + + // hashCode + @Override + public int hashCode() { + return 0; + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index c94abdb5..d72cc71d 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -4,7 +4,10 @@ import com.usatiuk.dhfs.objects.transaction.*; import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; import com.usatiuk.dhfs.utils.DataLocker; import io.quarkus.logging.Log; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; import jakarta.enterprise.inject.Instance; import jakarta.inject.Inject; @@ -26,16 +29,30 @@ public class JObjectManager { private final DataLocker _objLocker = new DataLocker(); private final ConcurrentHashMap> _objects = new ConcurrentHashMap<>(); private final AtomicLong _txCounter = new AtomicLong(); + private boolean _ready = false; @Inject WritebackObjectPersistentStore writebackObjectPersistentStore; @Inject TransactionFactory transactionFactory; + private void verifyReady() { + if (!_ready) throw new IllegalStateException("Wrong service order!"); + } + + void init(@Observes @Priority(200) StartupEvent event) { + var read = writebackObjectPersistentStore.readObject(JDataDummy.TX_ID_OBJ_NAME).orElse(null); + if (read != null) { + _txCounter.set(read.version()); + } + _ready = true; + } + JObjectManager(Instance preCommitTxHooks) { _preCommitTxHooks = preCommitTxHooks.stream().sorted(Comparator.comparingInt(PreCommitTxHook::getPriority)).toList(); } private JDataVersionedWrapper get(Class type, JObjectKey key) { + verifyReady(); while (true) { { var got = _objects.get(key); @@ -73,24 +90,30 @@ public class JObjectManager { } private TransactionObjectNoLock getObj(Class type, JObjectKey key) { + verifyReady(); var got = get(type, key); return new TransactionObjectNoLock<>(Optional.ofNullable(got)); } private TransactionObjectLocked getObjLock(Class type, JObjectKey key) { + verifyReady(); var lock = _objLocker.lock(key); var got = get(type, key); return new TransactionObjectLocked<>(Optional.ofNullable(got), lock); } public TransactionPrivate createTransaction() { + verifyReady(); var counter = _txCounter.getAndIncrement(); Log.trace("Creating transaction " + counter); return transactionFactory.createTransaction(counter, new TransactionObjectSourceImpl(counter)); } public void commit(TransactionPrivate tx) { + verifyReady(); Log.trace("Committing transaction " + tx.getId()); + // FIXME: Better way? + tx.put(JDataDummy.getInstance()); var current = new LinkedHashMap>(); var dependenciesLocked = new LinkedHashMap>(); @@ -101,7 +124,7 @@ public class JObjectManager { key -> { dependenciesLocked.computeIfAbsent(key, k -> { var got = getObjLock(JData.class, k); - Log.trace("Adding dependency " + k.toString() + " -> " + got); + Log.trace("Adding dependency " + k.toString() + " -> " + got.data().map(JDataVersionedWrapper::data).map(JData::key).orElse(null)); toUnlock.add(got.lock); return got; }); @@ -127,7 +150,6 @@ public class JObjectManager { // Check that their version is not higher than the version of transaction being committed // TODO: check deletions, inserts try { - Collection> drained; try { boolean somethingChanged; do { @@ -143,8 +165,12 @@ public class JObjectManager { .forEach(addDependency); for (var entry : currentIteration.entrySet()) { + // FIXME: Kinda hack? + if (entry.getKey().equals(JDataDummy.TX_ID_OBJ_NAME)) { + continue; + } somethingChanged = true; - Log.trace("Running pre-commit hook " + hook.getClass() + " for" + entry.toString()); + Log.trace("Running pre-commit hook " + hook.getClass() + " for" + entry.getKey()); var oldObj = getCurrent.apply(entry.getKey()); switch (entry.getValue()) { case TxRecord.TxObjectRecordWrite write -> { @@ -173,37 +199,39 @@ public class JObjectManager { } } - for (var dep : dependenciesLocked.entrySet()) { - if (dep.getValue().data().isEmpty()) { - Log.trace("Checking dependency " + dep.getKey() + " - not found"); + for (var read : reads.entrySet()) { + var dep = dependenciesLocked.get(read.getKey()); + + if (dep.data().isEmpty()) { + Log.trace("Checking read dependency " + read.getKey() + " - not found"); continue; } - if (dep.getValue().data().get().version() >= tx.getId()) { - Log.trace("Checking dependency " + dep.getKey() + " - newer than"); - throw new TxCommitException("Serialization hazard: " + dep.getValue().data().get().version() + " vs " + tx.getId()); + if (dep.data().get().version() >= tx.getId()) { + Log.trace("Checking dependency " + read.getKey() + " - newer than"); + throw new TxCommitException("Serialization hazard: " + dep.data().get().version() + " vs " + tx.getId()); } - var read = reads.get(dep.getKey()); - if (read != null && read.data().orElse(null) != dep.getValue().data().orElse(null)) { - Log.trace("Checking dependency " + dep.getKey() + " - read mismatch"); - throw new TxCommitException("Read mismatch for " + dep.getKey() + ": " + read + " vs " + dep.getValue()); - } - - Log.trace("Checking dependency " + dep.getKey() + " - ok with read " + read); + Log.trace("Checking dependency " + read.getKey() + " - ok with read"); } Log.tracef("Flushing transaction %d to storage", tx.getId()); for (var action : current.entrySet()) { + var dep = dependenciesLocked.get(action.getKey()); + if (dep.data().isPresent() && dep.data.get().version() >= tx.getId()) { + Log.trace("Skipping write " + action.getKey() + " - dependency " + dep.data().get().version() + " vs " + tx.getId()); + continue; + } + switch (action.getValue()) { case TxRecord.TxObjectRecordWrite write -> { - Log.trace("Flushing object " + action.getKey()); + Log.trace("Writing " + action.getKey()); var wrapped = new JDataVersionedWrapper<>(write.data(), tx.getId()); _objects.put(action.getKey(), new JDataWrapper<>(wrapped)); } case TxRecord.TxObjectRecordDeleted deleted -> { - Log.trace("Deleting object " + action.getKey()); + Log.trace("Deleting " + action.getKey()); _objects.remove(action.getKey()); } default -> { @@ -225,6 +253,7 @@ public class JObjectManager { } public void rollback(TransactionPrivate tx) { + verifyReady(); Log.trace("Rolling back transaction " + tx.getId()); tx.reads().forEach((key, value) -> { if (value instanceof TransactionObjectLocked locked) { @@ -271,25 +300,21 @@ public class JObjectManager { @Override public TransactionObject get(Class type, JObjectKey key) { - return getObj(type, key); -// return getObj(type, key).map(got -> { -// if (got.data().getVersion() > _txId) { -// throw new IllegalStateException("Serialization race for " + key + ": " + got.data().getVersion() + " vs " + _txId); -// } -// return got; -// }); + var got = getObj(type, key); + if (got.data().isPresent() && got.data().get().version() > _txId) { + throw new TxCommitException("Serialization race for " + key + ": " + got.data().get().version() + " vs " + _txId); + } + return got; } @Override public TransactionObject getWriteLocked(Class type, JObjectKey key) { - return getObjLock(type, key); -// return getObjLock(type, key).map(got -> { -// if (got.data().getVersion() > _txId) { -// got.lock.close(); -// throw new IllegalStateException("Serialization race for " + key + ": " + got.data().getVersion() + " vs " + _txId); -// } -// return got; -// }); + var got = getObjLock(type, key); + if (got.data().isPresent() && got.data().get().version() > _txId) { + got.lock().close(); + throw new TxCommitException("Serialization race for " + key + ": " + got.data().get().version() + " vs " + _txId); + } + return got; } } } \ No newline at end of file diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java index f4a5bc8d..ffff3751 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java @@ -19,11 +19,11 @@ public interface TransactionManager { } begin(); + T ret; try { - var ret = supplier.get(); - commit(); - return ret; + ret = supplier.get(); } catch (TxCommitException txCommitException) { + rollback(); if (tries == 0) { Log.error("Transaction commit failed", txCommitException); throw txCommitException; @@ -33,6 +33,16 @@ public interface TransactionManager { rollback(); throw e; } + try { + commit(); + return ret; + } catch (TxCommitException txCommitException) { + if (tries == 0) { + Log.error("Transaction commit failed", txCommitException); + throw txCommitException; + } + return runTries(supplier, tries - 1); + } } default void runTries(VoidFn fn, int tries) { @@ -44,6 +54,19 @@ public interface TransactionManager { begin(); try { fn.apply(); + } catch (TxCommitException txCommitException) { + rollback(); + if (tries == 0) { + Log.error("Transaction commit failed", txCommitException); + throw txCommitException; + } + runTries(fn, tries - 1); + return; + } catch (Throwable e) { + rollback(); + throw e; + } + try { commit(); } catch (TxCommitException txCommitException) { if (tries == 0) { @@ -51,9 +74,6 @@ public interface TransactionManager { throw txCommitException; } runTries(fn, tries - 1); - } catch (Throwable e) { - rollback(); - throw e; } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java index c603a61b..66ad87b4 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.persistence.CachingObjectPersistentStore; -import com.usatiuk.dhfs.objects.persistence.TxManifest; +import com.usatiuk.dhfs.objects.persistence.TxManifestObj; import com.usatiuk.dhfs.utils.VoidFn; import io.quarkus.logging.Log; import io.quarkus.runtime.ShutdownEvent; @@ -11,10 +11,14 @@ import jakarta.enterprise.context.ApplicationScoped; import jakarta.enterprise.event.Observes; import jakarta.inject.Inject; import org.apache.commons.lang3.concurrent.BasicThreadFactory; +import org.apache.commons.lang3.tuple.Pair; import org.eclipse.microprofile.config.inject.ConfigProperty; import java.util.*; -import java.util.concurrent.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicLong; @ApplicationScoped @@ -108,39 +112,27 @@ public class TxWritebackImpl implements TxWriteback { } } - var latch = new CountDownLatch((int) bundle._entries.values().stream().filter(e -> e instanceof TxBundleImpl.CommittedEntry).count()); - ConcurrentLinkedQueue errors = new ConcurrentLinkedQueue<>(); + var toWrite = new ArrayList>>(); + var toDelete = new ArrayList(); for (var e : bundle._entries.values()) { switch (e) { - case TxBundleImpl.CommittedEntry c -> _commitExecutor.execute(() -> { - try { - Log.trace("Writing new " + c.key()); - objectPersistentStore.writeObject(c.key(), c.data()); - } catch (Throwable t) { - Log.error("Error writing " + c.key(), t); - errors.add(t); - } finally { - latch.countDown(); - } - }); - case TxBundleImpl.DeletedEntry d -> { - if (Log.isDebugEnabled()) - Log.debug("Deleting from persistent storage " + d.key()); // FIXME: For tests + case TxBundleImpl.CommittedEntry(JObjectKey key, JDataVersionedWrapper data, int size) -> { + Log.trace("Writing new " + key); + toWrite.add(Pair.of(key, data)); + } + case TxBundleImpl.DeletedEntry(JObjectKey key) -> { + Log.trace("Deleting from persistent storage " + key); + toDelete.add(key); } default -> throw new IllegalStateException("Unexpected value: " + e); } } - latch.await(); - if (!errors.isEmpty()) { - throw new RuntimeException("Errors in writeback!"); - } - objectPersistentStore.commitTx( - new TxManifest( - bundle._entries.values().stream().filter(e -> e instanceof TxBundleImpl.CommittedEntry).map(TxBundleImpl.BundleEntry::key).toList(), - bundle._entries.values().stream().filter(e -> e instanceof TxBundleImpl.DeletedEntry).map(TxBundleImpl.BundleEntry::key).toList() + new TxManifestObj<>( + Collections.unmodifiableList(toWrite), + Collections.unmodifiableList(toDelete) )); Log.trace("Bundle " + bundle.getId() + " committed"); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java index fa6f8799..127cdde7 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java @@ -7,6 +7,7 @@ import io.quarkus.logging.Log; import io.quarkus.runtime.Startup; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; +import org.apache.commons.lang3.tuple.Pair; import org.eclipse.microprofile.config.inject.ConfigProperty; import javax.annotation.Nonnull; @@ -89,15 +90,12 @@ public class CachingObjectPersistentStore { } } - public void writeObject(JObjectKey name, JDataVersionedWrapper object) { - delegate.writeObject(name, object); - } - - public void commitTx(TxManifest names) { + public void commitTx(TxManifestObj> names) { // During commit, readObject shouldn't be called for these items, // it should be handled by the upstream store synchronized (_cache) { - for (var key : Stream.concat(names.written().stream(), names.deleted().stream()).toList()) { + for (var key : Stream.concat(names.written().stream().map(Pair::getLeft), + names.deleted().stream()).toList()) { _curSize -= Optional.ofNullable(_cache.get(key)).map(CacheEntry::size).orElse(0L); _cache.remove(key); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java index 1549c4e2..b668534c 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java @@ -16,7 +16,6 @@ import jakarta.annotation.Priority; import jakarta.enterprise.context.ApplicationScoped; import jakarta.enterprise.event.Observes; import net.openhft.hashing.LongHashFunction; -import org.apache.commons.lang3.concurrent.BasicThreadFactory; import org.eclipse.microprofile.config.inject.ConfigProperty; import javax.annotation.Nonnull; @@ -27,10 +26,11 @@ import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.util.*; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.stream.Stream; import static java.nio.file.StandardCopyOption.ATOMIC_MOVE; import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; @@ -50,7 +50,7 @@ public class FileObjectPersistentStore implements ObjectPersistentStore { private final Path _txManifest; private ExecutorService _flushExecutor; private RandomAccessFile _txFile; - private volatile boolean _ready = false; + private boolean _ready = false; public FileObjectPersistentStore(@ConfigProperty(name = "dhfs.objects.persistence.files.root") String root) { this._root = Path.of(root).resolve("objects"); @@ -69,13 +69,7 @@ public class FileObjectPersistentStore implements ObjectPersistentStore { Files.createFile(_txManifest); } _txFile = new RandomAccessFile(_txManifest.toFile(), "rw"); - { - BasicThreadFactory factory = new BasicThreadFactory.Builder() - .namingPattern("persistent-commit-%d") - .build(); - - _flushExecutor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(), factory); - } + _flushExecutor = Executors.newVirtualThreadPerTaskExecutor(); tryReplay(); Log.info("Transaction replay done"); @@ -181,18 +175,7 @@ public class FileObjectPersistentStore implements ObjectPersistentStore { } } - @Override - public void writeObject(JObjectKey name, ByteString obj) { - verifyReady(); - try { - var tmpPath = getTmpObjPath(name); - writeObjectImpl(tmpPath, obj, true); - } catch (IOException e) { - Log.error("Error writing new file " + name, e); - } - } - - private TxManifest readTxManifest() { + private TxManifestRaw readTxManifest() { try { var channel = _txFile.getChannel(); @@ -219,7 +202,7 @@ public class FileObjectPersistentStore implements ObjectPersistentStore { } } - private void putTxManifest(TxManifest manifest) { + private void putTxManifest(TxManifestRaw manifest) { try { var channel = _txFile.getChannel(); var data = SerializationHelper.serializeArray(manifest); @@ -237,62 +220,58 @@ public class FileObjectPersistentStore implements ObjectPersistentStore { } @Override - public void commitTx(TxManifest manifest) { + public void commitTx(TxManifestRaw manifest) { verifyReady(); + try { + _flushExecutor.invokeAll( + manifest.written().stream().map(p -> (Callable) () -> { + var tmpPath = getTmpObjPath(p.getKey()); + writeObjectImpl(tmpPath, p.getValue(), true); + return null; + }).toList() + ).forEach(p -> { + try { + p.get(); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + }); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } commitTxImpl(manifest, true); } - public void commitTxImpl(TxManifest manifest, boolean failIfNotFound) { + public void commitTxImpl(TxManifestRaw manifest, boolean failIfNotFound) { + if (manifest.deleted().isEmpty() && manifest.written().isEmpty()) { + Log.debug("Empty manifest, skipping"); + return; + } + + putTxManifest(manifest); + try { - if (manifest.deleted().isEmpty() && manifest.written().isEmpty()) { - Log.debug("Empty manifest, skipping"); - return; - } - - putTxManifest(manifest); - - var latch = new CountDownLatch(manifest.written().size() + manifest.deleted().size()); - ConcurrentLinkedQueue errors = new ConcurrentLinkedQueue<>(); - - for (var n : manifest.written()) { - _flushExecutor.execute(() -> { - try { - Files.move(getTmpObjPath(n), getObjPath(n), ATOMIC_MOVE, REPLACE_EXISTING); - } catch (Throwable t) { - if (!failIfNotFound && (t instanceof NoSuchFileException)) return; - Log.error("Error writing " + n, t); - errors.add(t); - } finally { - latch.countDown(); - } - }); - } - for (var d : manifest.deleted()) { - _flushExecutor.execute(() -> { - try { - deleteImpl(getObjPath(d)); - } catch (Throwable t) { - Log.error("Error deleting " + d, t); - errors.add(t); - } finally { - latch.countDown(); - } - }); - } - - latch.await(); - - if (!errors.isEmpty()) { - throw new RuntimeException("Errors when commiting tx!"); - } - - // No real need to truncate here -// try (var channel = _txFile.getChannel()) { -// channel.truncate(0); -// } -// } catch (IOException e) { -// Log.error("Failed committing transaction to disk: ", e); -// throw new RuntimeException(e); + _flushExecutor.invokeAll( + Stream.concat(manifest.written().stream().map(p -> (Callable) () -> { + try { + Files.move(getTmpObjPath(p.getKey()), getObjPath(p.getKey()), ATOMIC_MOVE, REPLACE_EXISTING); + } catch (NoSuchFileException n) { + if (failIfNotFound) + throw n; + } + return null; + }), + manifest.deleted().stream().map(p -> (Callable) () -> { + deleteImpl(getObjPath(p)); + return null; + })).toList() + ).forEach(p -> { + try { + p.get(); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + }); } catch (InterruptedException e) { throw new RuntimeException(e); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java index dd73ce6a..cc7bd59e 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java @@ -15,7 +15,6 @@ import java.util.Optional; @IfBuildProperty(name = "dhfs.objects.persistence", stringValue = "memory") public class MemoryObjectPersistentStore implements ObjectPersistentStore { private final Map _objects = new HashMap<>(); - private final Map _pending = new HashMap<>(); @Nonnull @Override @@ -34,17 +33,10 @@ public class MemoryObjectPersistentStore implements ObjectPersistentStore { } @Override - public void writeObject(JObjectKey name, ByteString object) { + public void commitTx(TxManifestRaw names) { synchronized (this) { - _pending.put(name, object); - } - } - - @Override - public void commitTx(TxManifest names) { - synchronized (this) { - for (JObjectKey key : names.written()) { - _objects.put(key, _pending.get(key)); + for (var written : names.written()) { + _objects.put(written.getKey(), written.getValue()); } for (JObjectKey key : names.deleted()) { _objects.remove(key); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java index f1db0be4..19fe5d42 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java @@ -16,9 +16,7 @@ public interface ObjectPersistentStore { @Nonnull Optional readObject(JObjectKey name); - void writeObject(JObjectKey name, ByteString object); - - void commitTx(TxManifest names); + void commitTx(TxManifestRaw names); long getTotalSpace(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java index 318c025a..99abf09c 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java @@ -5,6 +5,7 @@ import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.ObjectSerializer; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; +import org.apache.commons.lang3.tuple.Pair; import javax.annotation.Nonnull; import java.util.Collection; @@ -28,11 +29,11 @@ public class SerializingObjectPersistentStore { return delegate.readObject(name).map(serializer::deserialize); } - void writeObject(JObjectKey name, JDataVersionedWrapper object) { - delegate.writeObject(name, serializer.serialize(object)); - } - - void commitTx(TxManifest names) { - delegate.commitTx(names); + void commitTx(TxManifestObj> names) { + delegate.commitTx(new TxManifestRaw( + names.written().stream() + .map(e -> Pair.of(e.getKey(), serializer.serialize(e.getValue()))) + .toList() + , names.deleted())); } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java deleted file mode 100644 index bd855980..00000000 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifest.java +++ /dev/null @@ -1,10 +0,0 @@ -package com.usatiuk.dhfs.objects.persistence; - -import com.usatiuk.dhfs.objects.JObjectKey; - -import java.io.Serializable; -import java.util.Collection; - -// FIXME: Serializable -public record TxManifest(Collection written, Collection deleted) implements Serializable { -} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifestObj.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifestObj.java new file mode 100644 index 00000000..19bc6e36 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifestObj.java @@ -0,0 +1,12 @@ +package com.usatiuk.dhfs.objects.persistence; + +import com.usatiuk.dhfs.objects.JObjectKey; +import org.apache.commons.lang3.tuple.Pair; + +import java.io.Serializable; +import java.util.Collection; + +// FIXME: Serializable +public record TxManifestObj(Collection> written, + Collection deleted) implements Serializable { +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifestRaw.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifestRaw.java new file mode 100644 index 00000000..fd7ec742 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/TxManifestRaw.java @@ -0,0 +1,13 @@ +package com.usatiuk.dhfs.objects.persistence; + +import com.google.protobuf.ByteString; +import com.usatiuk.dhfs.objects.JObjectKey; +import org.apache.commons.lang3.tuple.Pair; + +import java.io.Serializable; +import java.util.Collection; + +// FIXME: Serializable +public record TxManifestRaw(Collection> written, + Collection deleted) implements Serializable { +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index e5d3e83d..1c3db657 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -7,6 +7,7 @@ import io.quarkus.logging.Log; import io.quarkus.test.junit.QuarkusTest; import jakarta.inject.Inject; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; @@ -113,6 +114,7 @@ public class ObjectsTest { } @Test + @Disabled void createObjectConflict() throws InterruptedException { AtomicBoolean thread1Failed = new AtomicBoolean(true); AtomicBoolean thread2Failed = new AtomicBoolean(true); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java index fcb5a07e..aeea40b5 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java @@ -24,40 +24,52 @@ import java.util.Date; public class CertificateTools { - public static X509Certificate certFromBytes(byte[] bytes) throws CertificateException { - CertificateFactory certFactory = CertificateFactory.getInstance("X.509"); - InputStream in = new ByteArrayInputStream(bytes); - return (X509Certificate) certFactory.generateCertificate(in); + public static X509Certificate certFromBytes(byte[] bytes) { + try { + CertificateFactory certFactory = CertificateFactory.getInstance("X.509"); + InputStream in = new ByteArrayInputStream(bytes); + return (X509Certificate) certFactory.generateCertificate(in); + } catch (CertificateException e) { + throw new RuntimeException(e); + } } - public static KeyPair generateKeyPair() throws NoSuchAlgorithmException { - KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); - keyGen.initialize(2048); //FIXME: - return keyGen.generateKeyPair(); + public static KeyPair generateKeyPair() { + try { + KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); + keyGen.initialize(2048); //FIXME: + return keyGen.generateKeyPair(); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } } - public static X509Certificate generateCertificate(KeyPair keyPair, String subject) throws CertificateException, CertIOException, NoSuchAlgorithmException, OperatorCreationException { - Provider bcProvider = new BouncyCastleProvider(); - Security.addProvider(bcProvider); + public static X509Certificate generateCertificate(KeyPair keyPair, String subject) { + try { + Provider bcProvider = new BouncyCastleProvider(); + Security.addProvider(bcProvider); - Date startDate = new Date(); + Date startDate = new Date(); - X500Name cnName = new X500Name("CN=" + subject); - BigInteger certSerialNumber = new BigInteger(DigestUtils.sha256(subject)); + X500Name cnName = new X500Name("CN=" + subject); + BigInteger certSerialNumber = new BigInteger(DigestUtils.sha256(subject)); - Calendar calendar = Calendar.getInstance(); - calendar.setTime(startDate); - calendar.add(Calendar.YEAR, 999); + Calendar calendar = Calendar.getInstance(); + calendar.setTime(startDate); + calendar.add(Calendar.YEAR, 999); - Date endDate = calendar.getTime(); + Date endDate = calendar.getTime(); - ContentSigner contentSigner = new JcaContentSignerBuilder("SHA256WithRSA").build(keyPair.getPrivate()); + ContentSigner contentSigner = new JcaContentSignerBuilder("SHA256WithRSA").build(keyPair.getPrivate()); - JcaX509v3CertificateBuilder certBuilder = new JcaX509v3CertificateBuilder(cnName, certSerialNumber, startDate, endDate, cnName, keyPair.getPublic()); + JcaX509v3CertificateBuilder certBuilder = new JcaX509v3CertificateBuilder(cnName, certSerialNumber, startDate, endDate, cnName, keyPair.getPublic()); - BasicConstraints basicConstraints = new BasicConstraints(false); - certBuilder.addExtension(new ASN1ObjectIdentifier("2.5.29.19"), true, basicConstraints); + BasicConstraints basicConstraints = new BasicConstraints(false); + certBuilder.addExtension(new ASN1ObjectIdentifier("2.5.29.19"), true, basicConstraints); - return new JcaX509CertificateConverter().setProvider(bcProvider).getCertificate(certBuilder.build(contentSigner)); + return new JcaX509CertificateConverter().setProvider(bcProvider).getCertificate(certBuilder.build(contentSigner)); + } catch (OperatorCreationException | CertificateException | CertIOException e) { + throw new RuntimeException(e); + } } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java index 523aa7cc..3f3f0eae 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java @@ -16,6 +16,7 @@ import org.eclipse.microprofile.config.inject.ConfigProperty; import java.io.IOException; import java.security.KeyPair; +import java.security.cert.CertificateEncodingException; import java.security.cert.X509Certificate; import java.util.Optional; import java.util.UUID; @@ -54,15 +55,15 @@ public class PersistentPeerDataService { _selfKeyPair = selfData.selfKeyPair(); return; } else { - _selfUuid = presetUuid.map(s -> PeerId.of(UUID.fromString(s))).orElseGet(() -> PeerId.of(UUID.randomUUID())); try { + _selfUuid = presetUuid.map(s -> PeerId.of(UUID.fromString(s))).orElseGet(() -> PeerId.of(UUID.randomUUID())); Log.info("Generating a key pair, please wait"); _selfKeyPair = CertificateTools.generateKeyPair(); _selfCertificate = CertificateTools.generateCertificate(_selfKeyPair, _selfUuid.toString()); curTx.put(new PersistentRemoteHostsData(_selfUuid, 0, _selfCertificate, _selfKeyPair)); peerInfoService.putPeer(_selfUuid, _selfCertificate.getEncoded()); - } catch (Exception e) { + } catch (CertificateEncodingException e) { throw new RuntimeException(e); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java index cc9d4586..f8b7fcf2 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java @@ -1,14 +1,10 @@ package com.usatiuk.dhfs.objects.repository.peersync; -import com.usatiuk.autoprotomap.runtime.ProtoMirror; import com.usatiuk.dhfs.objects.JDataRemote; import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.PeerId; -import com.usatiuk.dhfs.objects.persistence.ChunkDataP; -import com.usatiuk.dhfs.objects.persistence.PeerInfoP; import com.usatiuk.dhfs.objects.repository.CertificateTools; -import java.security.cert.CertificateException; import java.security.cert.X509Certificate; public record PeerInfo(JObjectKey key, PeerId id, byte[] cert) implements JDataRemote { @@ -17,10 +13,6 @@ public record PeerInfo(JObjectKey key, PeerId id, byte[] cert) implements JDataR } public X509Certificate parsedCert() { - try { - return CertificateTools.certFromBytes(cert); - } catch (CertificateException e) { - throw new RuntimeException(e); - } + return CertificateTools.certFromBytes(cert); } } From b554e6c96fe2df3b6479bdda9b2d129cbb48afb2 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sat, 15 Feb 2025 15:09:00 +0100 Subject: [PATCH 058/105] hopefully working deletion todo: mark seen --- .../usatiuk/dhfs/objects/DeleterTxHook.java | 20 +- .../usatiuk/dhfs/objects/RemoteObject.java | 3 +- .../dhfs/objects/RemoteObjectDeleter.java | 231 ++++++++++++++++++ .../dhfs/objects/RemoteObjectMeta.java | 4 +- .../repository/RemoteObjectServiceClient.java | 55 +++-- .../repository/RemoteObjectServiceServer.java | 78 +++--- .../repository/invalidation/OpPusher.java | 4 + 7 files changed, 305 insertions(+), 90 deletions(-) create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDeleter.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java index 5fae3165..25354d8c 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java @@ -9,6 +9,8 @@ import jakarta.inject.Inject; public class DeleterTxHook implements PreCommitTxHook { @Inject Transaction curTx; + @Inject + RemoteObjectDeleter remoteObjectDeleter; private boolean canDelete(JDataRefcounted data) { return !data.frozen() && data.refsFrom().isEmpty(); @@ -16,14 +18,14 @@ public class DeleterTxHook implements PreCommitTxHook { @Override public void onChange(JObjectKey key, JData old, JData cur) { - if (cur instanceof RemoteObject) { - return; // FIXME: - } if (!(cur instanceof JDataRefcounted refCur)) { return; } - if (canDelete(refCur)) { + if (refCur instanceof RemoteObject ro) { + remoteObjectDeleter.putDeletionCandidate(ro); + return; + } Log.trace("Deleting object on change: " + key); curTx.delete(key); } @@ -31,14 +33,15 @@ public class DeleterTxHook implements PreCommitTxHook { @Override public void onCreate(JObjectKey key, JData cur) { - if (cur instanceof RemoteObject) { - return; - } if (!(cur instanceof JDataRefcounted refCur)) { return; } if (canDelete(refCur)) { + if (refCur instanceof RemoteObject ro) { + remoteObjectDeleter.putDeletionCandidate(ro); + return; + } Log.warn("Deleting object on creation: " + key); curTx.delete(key); } @@ -46,9 +49,6 @@ public class DeleterTxHook implements PreCommitTxHook { @Override public void onDelete(JObjectKey key, JData cur) { - if (cur instanceof RemoteObject) { - return; - } if (!(cur instanceof JDataRefcounted refCur)) { return; } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObject.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObject.java index a965c1ba..ce9a99fd 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObject.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObject.java @@ -11,8 +11,9 @@ import java.util.List; public record RemoteObject(PCollection refsFrom, boolean frozen, RemoteObjectMeta meta, @Nullable T data) implements JDataRefcounted { + // Self put public RemoteObject(T data, PeerId initialPeer) { - this(HashTreePSet.empty(), false, new RemoteObjectMeta(data.key(), data.getClass(), initialPeer), data); + this(HashTreePSet.empty(), false, new RemoteObjectMeta(data.key(), data.getClass(), false, initialPeer), data); } public RemoteObject(JObjectKey key, PMap remoteChangelog) { diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDeleter.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDeleter.java new file mode 100644 index 00000000..c48559a0 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDeleter.java @@ -0,0 +1,231 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfo; +import com.usatiuk.dhfs.objects.repository.peersync.PeerInfoService; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue; +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import org.apache.commons.lang3.concurrent.BasicThreadFactory; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.io.IOException; +import java.util.HashSet; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; + +@ApplicationScoped +public class RemoteObjectDeleter { + private final HashSetDelayedBlockingQueue _quickCandidates = new HashSetDelayedBlockingQueue<>(0); + private final HashSetDelayedBlockingQueue _candidates; + private final HashSetDelayedBlockingQueue _canDeleteRetries; + private final HashSet _movablesInProcessing = new HashSet<>(); + + @Inject + TransactionManager txm; + @Inject + Transaction curTx; + @Inject + RemoteTransaction remoteTx; + @Inject + PeerInfoService peerInfoService; + @Inject + RemoteObjectServiceClient remoteObjectServiceClient; + + @ConfigProperty(name = "dhfs.objects.move-processor.threads") + int moveProcessorThreads; + @ConfigProperty(name = "dhfs.objects.ref-processor.threads") + int refProcessorThreads; + @ConfigProperty(name = "dhfs.objects.deletion.can-delete-retry-delay") + long canDeleteRetryDelay; + + private ExecutorService _movableProcessorExecutorService; + private ExecutorService _refProcessorExecutorService; + + public RemoteObjectDeleter(@ConfigProperty(name = "dhfs.objects.deletion.delay") long deletionDelay, + @ConfigProperty(name = "dhfs.objects.deletion.can-delete-retry-delay") long canDeleteRetryDelay) { + _candidates = new HashSetDelayedBlockingQueue<>(deletionDelay); + _canDeleteRetries = new HashSetDelayedBlockingQueue<>(canDeleteRetryDelay); + } + + void init(@Observes @Priority(200) StartupEvent event) throws IOException { + BasicThreadFactory factory = new BasicThreadFactory.Builder() + .namingPattern("move-proc-%d") + .build(); + _movableProcessorExecutorService = Executors.newFixedThreadPool(moveProcessorThreads, factory); + + BasicThreadFactory factoryRef = new BasicThreadFactory.Builder() + .namingPattern("ref-proc-%d") + .build(); + _refProcessorExecutorService = Executors.newFixedThreadPool(refProcessorThreads, factoryRef); + for (int i = 0; i < refProcessorThreads; i++) { + _refProcessorExecutorService.submit(this::refProcessor); + } + + // Continue GC from last shutdown + //FIXME +// executorService.submit(() -> +// jObjectManager.findAll().forEach(n -> { +// jObjectManager.get(n).ifPresent(o -> o.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> { +// return null; +// })); +// })); + } + + void shutdown(@Observes @Priority(800) ShutdownEvent event) throws InterruptedException { + _refProcessorExecutorService.shutdownNow(); + if (!_refProcessorExecutorService.awaitTermination(30, TimeUnit.SECONDS)) { + Log.error("Refcounting threads didn't exit in 30 seconds"); + } + } + +// public void putQuickDeletionCandidate(JObjectKey obj) { +// _quickCandidates.add(obj); +// } + + public void putDeletionCandidate(RemoteObject obj) { + synchronized (_movablesInProcessing) { + if (_movablesInProcessing.contains(obj.key())) return; + if (!obj.meta().seen()) { + if (_quickCandidates.add(obj.key())) + Log.debug("Quick deletion candidate: " + obj.key()); + return; + } + if (_candidates.add(obj.key())) + Log.debug("Deletion candidate: " + obj.key()); + } + } + + private void asyncProcessMovable(JObjectKey objName) { + synchronized (_movablesInProcessing) { + if (_movablesInProcessing.contains(objName)) return; + _movablesInProcessing.add(objName); + } + Log.debugv("Async processing of remote obj del: {0}", objName); + + _movableProcessorExecutorService.submit(() -> { + boolean delay = true; + try { + delay = txm.run(() -> { + Log.debugv("Starting async processing of remote obj del: {0}", objName); + RemoteObject target = curTx.get(RemoteObject.class, objName).orElse(null); + if (target == null) return true; + + if (canDelete(target)) { + Log.debugv("Async processing of remote obj del: immediate {0}", objName); + curTx.delete(objName); + return true; + } + var knownHosts = peerInfoService.getPeersNoSelf(); + List missing = knownHosts.stream() + .map(PeerInfo::id) + .filter(id -> !target.meta().confirmedDeletes().contains(id)).toList(); + + var ret = remoteObjectServiceClient.canDelete(missing, objName, target.refsFrom()); + + long ok = 0; + + for (var r : ret) { + if (!r.getDeletionCandidate()) { +// for (var rr : r.getReferrersList()) +// autoSyncProcessor.add(rr); + } else { + ok++; + } + } + + if (ok != missing.size()) { + Log.debugv("Delaying deletion check of {0}", objName); + return true; + } else { + assert canDelete(target); + Log.debugv("Async processing of remote obj del: after query {0}", objName); + curTx.delete(objName); + return false; + } + }); + } finally { + synchronized (_movablesInProcessing) { + _movablesInProcessing.remove(objName); + if (!delay) + _candidates.add(objName); + else + _canDeleteRetries.add(objName); + } + } + }); + } + + // Returns true if the object can be deleted + private boolean canDelete(RemoteObject obj) { + if (!obj.meta().seen()) + return true; + + var knownHosts = peerInfoService.getPeers(); + boolean missing = false; + for (var x : knownHosts) { + if (!obj.meta().confirmedDeletes().contains(x.id())) { + missing = true; + break; + } + } + return !missing; + } + + private void refProcessor() { + while (true) { + try { + while (!Thread.interrupted()) { + JObjectKey next = null; + JObjectKey nextQuick = null; + + while (next == null && nextQuick == null) { + nextQuick = _quickCandidates.tryGet(); + + if (nextQuick != null) break; + + next = _canDeleteRetries.tryGet(); + if (next == null) + next = _candidates.tryGet(); + if (next == null) + nextQuick = _quickCandidates.get(canDeleteRetryDelay); + } + + Stream.of(next, nextQuick).filter(Objects::nonNull).forEach(realNext -> { + Log.debugv("Processing remote object deletion candidate: {0}", realNext); + var deleted = txm.run(() -> { + RemoteObject target = curTx.get(RemoteObject.class, realNext).orElse(null); + if (target == null) return true; + + if (canDelete(target)) { + Log.debugv("Immediate deletion of: {0}", realNext); + curTx.delete(realNext); + return true; + } + + return false; + }); + if (!deleted) + asyncProcessMovable(realNext); + }); + } + } catch (InterruptedException ignored) { + return; + } catch (Throwable error) { + Log.error("Exception in refcounter thread", error); + } + Log.info("JObject Refcounter thread exiting"); + } + } + +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectMeta.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectMeta.java index 2642525a..ea360a06 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectMeta.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectMeta.java @@ -14,8 +14,8 @@ public record RemoteObjectMeta( PSet confirmedDeletes, boolean seen, PMap changelog) implements Serializable { - public RemoteObjectMeta(JObjectKey key, Class type, PeerId initialPeer) { - this(key, HashTreePMap.empty(), type, HashTreePSet.empty(), true, + public RemoteObjectMeta(JObjectKey key, Class type, boolean seen, PeerId initialPeer) { + this(key, HashTreePMap.empty(), type, HashTreePSet.empty(), seen, HashTreePMap.empty().plus(initialPeer, 1L)); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java index d591bcb7..1d9606c0 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java @@ -12,9 +12,15 @@ import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; import org.apache.commons.lang3.tuple.Pair; +import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.function.Function; +import java.util.stream.Collectors; @ApplicationScoped public class RemoteObjectServiceClient { @@ -43,6 +49,8 @@ public class RemoteObjectServiceClient { @Inject ProtoSerializer receivedObjectProtoSerializer; + private final ExecutorService _batchExecutor = Executors.newVirtualThreadPerTaskExecutor(); + // public Pair getSpecificObject(UUID host, String name) { // return rpcClientFactory.withObjSyncClient(host, client -> { // var reply = client.getObject(GetObjectRequest.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).setName(name).build()); @@ -151,31 +159,24 @@ public class RemoteObjectServiceClient { return OpPushReply.getDefaultInstance(); } -// public Collection canDelete(Collection targets, String object, Collection ourReferrers) { -// ConcurrentLinkedDeque results = new ConcurrentLinkedDeque<>(); -// Log.trace("Asking canDelete for " + object + " from " + targets.stream().map(UUID::toString).collect(Collectors.joining(", "))); -// try (var executor = Executors.newVirtualThreadPerTaskExecutor()) { -// try { -// executor.invokeAll(targets.stream().>map(h -> () -> { -// try { -// var req = CanDeleteRequest.newBuilder() -// .setSelfUuid(persistentPeerDataService.getSelfUuid().toString()) -// .setName(object); -// req.addAllOurReferrers(ourReferrers); -// var res = rpcClientFactory.withObjSyncClient(h, client -> client.canDelete(req.build())); -// if (res != null) -// results.add(res); -// } catch (Exception e) { -// Log.debug("Error when asking canDelete for object " + object, e); -// } -// return null; -// }).toList()); -// } catch (InterruptedException e) { -// Log.warn("Interrupted waiting for canDelete for object " + object); -// } -// if (!executor.shutdownNow().isEmpty()) -// Log.warn("Didn't ask all targets when asking canDelete for " + object); -// } -// return results; -// } + public Collection canDelete(Collection targets, JObjectKey object, Collection ourReferrers) { + Log.trace("Asking canDelete for " + object + " from " + targets.stream().map(PeerId::toString).collect(Collectors.joining(", "))); + try { + return _batchExecutor.invokeAll(targets.stream().>map(h -> () -> { + var req = CanDeleteRequest.newBuilder().setName(object.toString()); + for (var ref : ourReferrers) { + req.addOurReferrers(ref.toString()); + } + return rpcClientFactory.withObjSyncClient(h, (p, client) -> client.canDelete(req.build())); + }).toList()).stream().map(f -> { + try { + return f.get(); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + }).toList(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java index e4b6b468..b4cb82c6 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java @@ -15,7 +15,6 @@ import io.smallrye.common.annotation.Blocking; import io.smallrye.mutiny.Uni; import jakarta.annotation.security.RolesAllowed; import jakarta.inject.Inject; -import org.apache.commons.lang3.NotImplementedException; // Note: RunOnVirtualThread hangs somehow @GrpcService @@ -101,58 +100,37 @@ public class RemoteObjectServiceServer implements DhfsObjectSyncGrpc { } @Override + @Blocking public Uni canDelete(CanDeleteRequest request) { - throw new NotImplementedException(); + var peerId = identity.getPrincipal().getName().substring(3); + + Log.info("<-- canDelete: " + request.getName() + " from " + peerId); + + var builder = CanDeleteReply.newBuilder(); + builder.setObjName(request.getName()); + + txm.run(() -> { + var obj = curTx.get(RemoteObject.class, JObjectKey.of(request.getName())).orElse(null); + + if (obj == null) { + builder.setDeletionCandidate(true); + return; + } + + builder.setDeletionCandidate(!obj.frozen() && obj.refsFrom().isEmpty()); + + if (!builder.getDeletionCandidate()) + for (var r : obj.refsFrom()) + builder.addReferrers(r.toString()); + +// if (!ret.getDeletionCandidate()) +// for (var rr : request.getOurReferrersList()) +// autoSyncProcessor.add(rr); + }); + return Uni.createFrom().item(builder.build()); } -// @Override -// @Blocking -// public Uni canDelete(CanDeleteRequest request) { -// if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); -// if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid()))) -// throw new StatusRuntimeException(Status.UNAUTHENTICATED); -// -// Log.info("<-- canDelete: " + request.getName() + " from " + request.getSelfUuid()); -// -// var builder = CanDeleteReply.newBuilder(); -// -// var obj = jObjectManager.get(request.getName()); -// -// builder.setSelfUuid(persistentPeerDataService.getSelfUuid().toString()); -// builder.setObjName(request.getName()); -// -// if (obj.isPresent()) try { -// boolean tryUpdate = obj.get().runReadLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d) -> { -// if (m.isDeleted() && !m.isDeletionCandidate()) -// throw new IllegalStateException("Object " + m.getName() + " is deleted but not a deletion candidate"); -// builder.setDeletionCandidate(m.isDeletionCandidate()); -// builder.addAllReferrers(m.getReferrers()); -// return m.isDeletionCandidate() && !m.isDeleted(); -// }); -// // FIXME - - /// / if (tryUpdate) { - /// / obj.get().runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> { - /// / return null; - /// / }); - /// / } -// } catch (DeletedObjectAccessException dox) { -// builder.setDeletionCandidate(true); -// } -// else { -// builder.setDeletionCandidate(true); -// } -// -// var ret = builder.build(); -// -// if (!ret.getDeletionCandidate()) -// for (var rr : request.getOurReferrersList()) -// autoSyncProcessor.add(rr); -// -// return Uni.createFrom().item(ret); -// } - -// @Override + // @Override // @Blocking // public Uni indexUpdate(IndexUpdatePush request) { // if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java index f653c265..acf06393 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java @@ -31,6 +31,10 @@ public class OpPusher { return new IndexUpdateOp(key, remote.meta().changelog()); } case JKleppmannTreePersistentData pd -> { + var maybeQueue = pd.queues().get(op); + if(maybeQueue == null || maybeQueue.isEmpty()) { + return null; + } var ret = new JKleppmannTreeOpWrapper(key, pd.queues().get(op).firstEntry().getValue()); var newPd = pd.withQueues(pd.queues().plus(op, pd.queues().get(op).minus(ret.op().timestamp()))); curTx.put(newPd); From ecee392a39349a6523d1ba8944321471d3cc7811 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sat, 15 Feb 2025 22:02:31 +0100 Subject: [PATCH 059/105] fix deadlock and possible inconsistency in transactions --- .../usatiuk/dhfs/objects/JObjectManager.java | 44 ++++++++++--------- .../transaction/TransactionFactoryImpl.java | 5 +++ .../transaction/TransactionPrivate.java | 2 + 3 files changed, 30 insertions(+), 21 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index d72cc71d..eb4c466d 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -18,6 +18,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; import java.util.function.Function; +import java.util.stream.Stream; // Manages all access to com.usatiuk.dhfs.objects.JData objects. // In particular, it serves as a source of truth for what is committed to the backing storage. @@ -130,27 +131,22 @@ public class JObjectManager { }); }; - Function getCurrent = - key -> switch (current.get(key)) { - case TxRecord.TxObjectRecordWrite write -> write.data(); - case TxRecord.TxObjectRecordDeleted deleted -> null; - case null -> { - var dep = dependenciesLocked.get(key); - if (dep == null) { - throw new TxCommitException("No dependency for " + key); - } - yield dep.data.map(JDataVersionedWrapper::data).orElse(null); - } - default -> { - throw new TxCommitException("Unexpected value: " + current.get(key)); - } - }; - // For existing objects: // Check that their version is not higher than the version of transaction being committed // TODO: check deletions, inserts try { try { + Function getCurrent = + key -> switch (current.get(key)) { + case TxRecord.TxObjectRecordWrite write -> write.data(); + case TxRecord.TxObjectRecordDeleted deleted -> null; + case null -> + tx.readSource().get(JData.class, key).data().map(JDataVersionedWrapper::data).orElse(null); + default -> { + throw new TxCommitException("Unexpected value: " + current.get(key)); + } + }; + boolean somethingChanged; do { somethingChanged = false; @@ -160,10 +156,6 @@ public class JObjectManager { currentIteration.put(n.key(), n); Log.trace("Commit iteration with " + currentIteration.size() + " records for hook " + hook.getClass()); - currentIteration.keySet().stream() - .sorted(Comparator.comparing(JObjectKey::toString)) - .forEach(addDependency); - for (var entry : currentIteration.entrySet()) { // FIXME: Kinda hack? if (entry.getKey().equals(JDataDummy.TX_ID_OBJ_NAME)) { @@ -191,8 +183,12 @@ public class JObjectManager { } while (somethingChanged); } finally { reads = tx.reads(); + + Stream.concat(reads.keySet().stream(), current.keySet().stream()) + .sorted(Comparator.comparing(JObjectKey::toString)) + .forEach(addDependency); + for (var read : reads.entrySet()) { - addDependency.accept(read.getKey()); if (read.getValue() instanceof TransactionObjectLocked locked) { toUnlock.add(locked.lock); } @@ -207,7 +203,13 @@ public class JObjectManager { continue; } + if (dep.data().orElse(null) != read.getValue().data().orElse(null)) { + Log.trace("Checking dependency " + read.getKey() + " - changed already"); + throw new TxCommitException("Serialization hazard: " + dep.data().get().version() + " vs " + tx.getId()); + } + if (dep.data().get().version() >= tx.getId()) { + assert false; Log.trace("Checking dependency " + read.getKey() + " - newer than"); throw new TxCommitException("Serialization hazard: " + dep.data().get().version() + " vs " + tx.getId()); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index 5cef472d..f7357822 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -97,6 +97,11 @@ public class TransactionFactoryImpl implements TransactionFactory { public Map> reads() { return _source.getRead(); } + + @Override + public ReadTrackingObjectSource readSource() { + return _source; + } } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java index 4229e939..e7ca7d05 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java @@ -10,4 +10,6 @@ public interface TransactionPrivate extends Transaction { Collection> drainNewWrites(); Map> reads(); + + ReadTrackingObjectSource readSource(); } From 206542337649dabf74f8c3a5dcd91bd2386019eb Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sat, 15 Feb 2025 22:12:10 +0100 Subject: [PATCH 060/105] file sync not compleeeetely broken --- .../files/service/DhfsFileServiceImpl.java | 20 +- .../dhfs/objects/ConflictResolver.java | 2 +- .../usatiuk/dhfs/objects/DeleterTxHook.java | 4 +- .../dhfs/objects/RefcounterTxHook.java | 6 +- .../dhfs/objects/RemoteObjPusherTxHook.java | 6 +- .../usatiuk/dhfs/objects/RemoteObject.java | 72 ----- .../dhfs/objects/RemoteObjectDataWrapper.java | 43 +++ .../dhfs/objects/RemoteObjectDeleter.java | 44 +-- .../dhfs/objects/RemoteObjectMeta.java | 94 ++++-- .../dhfs/objects/RemoteTransaction.java | 107 +++---- .../repository/RemoteObjectServiceServer.java | 35 ++- .../dhfs/objects/repository/SyncHandler.java | 275 +++++++++--------- .../dhfs/objects/repository/SyncHelper.java | 42 +++ .../invalidation/IndexUpdateOp.java | 6 +- .../repository/invalidation/OpPusher.java | 4 +- .../invalidation/PushOpHandler.java | 5 +- .../repository/peersync/PeerInfoService.java | 2 +- .../com/usatiuk/dhfs/utils/DataLocker.java | 1 + 18 files changed, 434 insertions(+), 334 deletions(-) delete mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObject.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDataWrapper.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHelper.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java index 53963c00..9172ee1c 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java @@ -75,7 +75,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { private ChunkData createChunk(ByteString bytes) { var newChunk = new ChunkData(JObjectKey.of(UUID.randomUUID().toString()), bytes); - remoteTx.put(newChunk); + remoteTx.putData(newChunk); return newChunk; } @@ -104,7 +104,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { var ref = curTx.get(JData.class, uuid).orElse(null); if (ref == null) return Optional.empty(); GetattrRes ret; - if (ref instanceof RemoteObject r) { + if (ref instanceof RemoteObjectMeta r) { var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null); if (remote instanceof File f) { ret = new GetattrRes(f.mTime(), f.cTime(), f.mode(), f.symlink() ? GetattrType.SYMLINK : GetattrType.FILE); @@ -157,7 +157,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { var fuuid = UUID.randomUUID(); Log.debug("Creating file " + fuuid); File f = new File(JObjectKey.of(fuuid.toString()), mode, System.currentTimeMillis(), System.currentTimeMillis(), TreePMap.empty(), false, 0); - remoteTx.put(f); + remoteTx.putData(f); try { getTree().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTree().getNewNodeId()); @@ -230,10 +230,10 @@ public class DhfsFileServiceImpl implements DhfsFileService { if (dent instanceof JKleppmannTreeNode) { return true; - } else if (dent instanceof RemoteObject) { + } else if (dent instanceof RemoteObjectMeta) { var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null); if (remote instanceof File f) { - remoteTx.put(f.withMode(mode).withMTime(System.currentTimeMillis())); + remoteTx.putData(f.withMode(mode).withMTime(System.currentTimeMillis())); return true; } else { throw new IllegalArgumentException(uuid + " is not a file"); @@ -502,7 +502,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { } file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis()); - remoteTx.put(file); + remoteTx.putData(file); cleanupChunks(file, removedChunks.values()); updateFileSize(file); @@ -526,7 +526,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { var oldChunks = file.chunks(); file = file.withChunks(TreePMap.empty()).withMTime(System.currentTimeMillis()); - remoteTx.put(file); + remoteTx.putData(file); cleanupChunks(file, oldChunks.values()); updateFileSize(file); return true; @@ -587,7 +587,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { } file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis()); - remoteTx.put(file); + remoteTx.putData(file); cleanupChunks(file, removedChunks.values()); updateFileSize(file); return true; @@ -640,7 +640,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { "File not found for setTimes: " + fileUuid)) ); - remoteTx.put(file.withCTime(atimeMs).withMTime(mtimeMs)); + remoteTx.putData(file.withCTime(atimeMs).withMTime(mtimeMs)); return true; }); } @@ -657,7 +657,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { } if (realSize != file.size()) { - remoteTx.put(file.withSize(realSize)); + remoteTx.putData(file.withSize(realSize)); } }); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/ConflictResolver.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/ConflictResolver.java index 1faf082c..f3b1acc7 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/ConflictResolver.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/ConflictResolver.java @@ -1,5 +1,5 @@ package com.usatiuk.dhfs.objects; public interface ConflictResolver { - void resolve(PeerId fromPeer, RemoteObject ours, RemoteObject theirs); + void resolve(PeerId fromPeer, RemoteObjectMeta ours, RemoteObjectMeta theirs); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java index 25354d8c..8917ef6c 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java @@ -22,7 +22,7 @@ public class DeleterTxHook implements PreCommitTxHook { return; } if (canDelete(refCur)) { - if (refCur instanceof RemoteObject ro) { + if (refCur instanceof RemoteObjectMeta ro) { remoteObjectDeleter.putDeletionCandidate(ro); return; } @@ -38,7 +38,7 @@ public class DeleterTxHook implements PreCommitTxHook { } if (canDelete(refCur)) { - if (refCur instanceof RemoteObject ro) { + if (refCur instanceof RemoteObjectMeta ro) { remoteObjectDeleter.putDeletionCandidate(ro); return; } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java index e239b8f2..7cb22447 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java @@ -1,8 +1,6 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile; -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreePersistentData; import com.usatiuk.dhfs.objects.transaction.Transaction; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; @@ -19,8 +17,8 @@ public class RefcounterTxHook implements PreCommitTxHook { return found; } - if (cur instanceof RemoteObject || cur instanceof JKleppmannTreeNode) { - return new RemoteObject<>(key); + if (cur instanceof RemoteObjectMeta || cur instanceof JKleppmannTreeNode) { + return new RemoteObjectMeta(key); } else { return found; } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjPusherTxHook.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjPusherTxHook.java index e83bc163..47100484 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjPusherTxHook.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjPusherTxHook.java @@ -16,7 +16,7 @@ public class RemoteObjPusherTxHook implements PreCommitTxHook { @Override public void onChange(JObjectKey key, JData old, JData cur) { boolean invalidate = switch (cur) { - case RemoteObject remote -> !remote.meta().changelog().equals(((RemoteObject) old).meta().changelog()); + case RemoteObjectMeta remote -> !remote.changelog().equals(((RemoteObjectMeta) old).changelog()); case JKleppmannTreePersistentData pd -> !pd.queues().equals(((JKleppmannTreePersistentData) old).queues()); default -> false; }; @@ -28,7 +28,7 @@ public class RemoteObjPusherTxHook implements PreCommitTxHook { @Override public void onCreate(JObjectKey key, JData cur) { - if (!(cur instanceof RemoteObject remote)) { + if (!(cur instanceof RemoteObjectMeta remote)) { return; } @@ -37,7 +37,7 @@ public class RemoteObjPusherTxHook implements PreCommitTxHook { @Override public void onDelete(JObjectKey key, JData cur) { - if (!(cur instanceof RemoteObject remote)) { + if (!(cur instanceof RemoteObjectMeta remote)) { return; } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObject.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObject.java deleted file mode 100644 index ce9a99fd..00000000 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObject.java +++ /dev/null @@ -1,72 +0,0 @@ -package com.usatiuk.dhfs.objects; - -import org.pcollections.HashTreePSet; -import org.pcollections.PCollection; -import org.pcollections.PMap; -import org.pcollections.TreePMap; - -import javax.annotation.Nullable; -import java.util.Collection; -import java.util.List; - -public record RemoteObject(PCollection refsFrom, boolean frozen, - RemoteObjectMeta meta, @Nullable T data) implements JDataRefcounted { - // Self put - public RemoteObject(T data, PeerId initialPeer) { - this(HashTreePSet.empty(), false, new RemoteObjectMeta(data.key(), data.getClass(), false, initialPeer), data); - } - - public RemoteObject(JObjectKey key, PMap remoteChangelog) { - this(HashTreePSet.empty(), false, new RemoteObjectMeta(key, remoteChangelog), null); - } - - public RemoteObject(JObjectKey key) { - this(HashTreePSet.empty(), false, new RemoteObjectMeta(key, TreePMap.empty()), null); - } - - @Override - public JObjectKey key() { - if (data != null && !data.key().equals(meta.key())) - throw new IllegalStateException("Corrupted object, key mismatch: " + meta.key() + " vs " + data.key()); - return meta.key(); - } - - @Override - public RemoteObject withRefsFrom(PCollection refs) { - return new RemoteObject<>(refs, frozen, meta, data); - } - - @Override - public RemoteObject withFrozen(boolean frozen) { - return new RemoteObject<>(refsFrom, frozen, meta, data); - } - - public RemoteObject withMeta(RemoteObjectMeta meta) { - return new RemoteObject<>(refsFrom, frozen, meta, data); - } - - public RemoteObject withData(T data) { - return new RemoteObject<>(refsFrom, frozen, meta, data); - } - - public RemoteObject withRefsFrom(PCollection refs, boolean frozen) { - return new RemoteObject<>(refs, frozen, meta, data); - } - - public ReceivedObject toReceivedObject() { - if (data == null) - throw new IllegalStateException("Cannot convert to ReceivedObject without data: " + meta.key()); - return new ReceivedObject(meta.key(), meta.changelog(), data); - } - - @Override - public Collection collectRefsTo() { - if (data != null) return data.collectRefsTo(); - return List.of(); - } - - @Override - public int estimateSize() { - return data == null ? 1000 : data.estimateSize(); - } -} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDataWrapper.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDataWrapper.java new file mode 100644 index 00000000..12877b9b --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDataWrapper.java @@ -0,0 +1,43 @@ +package com.usatiuk.dhfs.objects; + +import org.pcollections.HashTreePSet; +import org.pcollections.PCollection; + +import java.util.Collection; + +public record RemoteObjectDataWrapper(PCollection refsFrom, + boolean frozen, + T data) implements JDataRefcounted { + public RemoteObjectDataWrapper(T data) { + this(HashTreePSet.empty(), false, data); + } + + @Override + public RemoteObjectDataWrapper withRefsFrom(PCollection refs) { + return new RemoteObjectDataWrapper<>(refs, frozen, data); + } + + @Override + public RemoteObjectDataWrapper withFrozen(boolean frozen) { + return new RemoteObjectDataWrapper<>(refsFrom, frozen, data); + } + + public RemoteObjectDataWrapper withData(T data) { + return new RemoteObjectDataWrapper<>(refsFrom, frozen, data); + } + + @Override + public JObjectKey key() { + return RemoteObjectMeta.ofDataKey(data.key()); + } + + @Override + public Collection collectRefsTo() { + return data.collectRefsTo(); + } + + @Override + public int estimateSize() { + return data.estimateSize(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDeleter.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDeleter.java index c48559a0..9fe55a39 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDeleter.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDeleter.java @@ -93,17 +93,14 @@ public class RemoteObjectDeleter { // _quickCandidates.add(obj); // } - public void putDeletionCandidate(RemoteObject obj) { - synchronized (_movablesInProcessing) { - if (_movablesInProcessing.contains(obj.key())) return; - if (!obj.meta().seen()) { - if (_quickCandidates.add(obj.key())) - Log.debug("Quick deletion candidate: " + obj.key()); - return; - } - if (_candidates.add(obj.key())) - Log.debug("Deletion candidate: " + obj.key()); + public void putDeletionCandidate(RemoteObjectMeta obj) { + if (!obj.seen()) { + if (_quickCandidates.add(obj.key())) + Log.debug("Quick deletion candidate: " + obj.key()); + return; } + if (_candidates.add(obj.key())) + Log.debug("Deletion candidate: " + obj.key()); } private void asyncProcessMovable(JObjectKey objName) { @@ -118,18 +115,20 @@ public class RemoteObjectDeleter { try { delay = txm.run(() -> { Log.debugv("Starting async processing of remote obj del: {0}", objName); - RemoteObject target = curTx.get(RemoteObject.class, objName).orElse(null); + RemoteObjectMeta target = curTx.get(RemoteObjectMeta.class, objName).orElse(null); if (target == null) return true; + if (!canDelete(target)) return true; - if (canDelete(target)) { + if (canDeleteImmediately(target)) { Log.debugv("Async processing of remote obj del: immediate {0}", objName); curTx.delete(objName); return true; } + var knownHosts = peerInfoService.getPeersNoSelf(); List missing = knownHosts.stream() .map(PeerInfo::id) - .filter(id -> !target.meta().confirmedDeletes().contains(id)).toList(); + .filter(id -> !target.confirmedDeletes().contains(id)).toList(); var ret = remoteObjectServiceClient.canDelete(missing, objName, target.refsFrom()); @@ -148,7 +147,7 @@ public class RemoteObjectDeleter { Log.debugv("Delaying deletion check of {0}", objName); return true; } else { - assert canDelete(target); + assert canDeleteImmediately(target); Log.debugv("Async processing of remote obj del: after query {0}", objName); curTx.delete(objName); return false; @@ -166,15 +165,20 @@ public class RemoteObjectDeleter { }); } + // FIXME: + private boolean canDelete(JDataRefcounted obj) { + return obj.refsFrom().isEmpty() && !obj.frozen(); + } + // Returns true if the object can be deleted - private boolean canDelete(RemoteObject obj) { - if (!obj.meta().seen()) + private boolean canDeleteImmediately(RemoteObjectMeta obj) { + if (!obj.seen()) return true; var knownHosts = peerInfoService.getPeers(); boolean missing = false; for (var x : knownHosts) { - if (!obj.meta().confirmedDeletes().contains(x.id())) { + if (!obj.confirmedDeletes().contains(x.id())) { missing = true; break; } @@ -204,10 +208,12 @@ public class RemoteObjectDeleter { Stream.of(next, nextQuick).filter(Objects::nonNull).forEach(realNext -> { Log.debugv("Processing remote object deletion candidate: {0}", realNext); var deleted = txm.run(() -> { - RemoteObject target = curTx.get(RemoteObject.class, realNext).orElse(null); + RemoteObjectMeta target = curTx.get(RemoteObjectMeta.class, realNext).orElse(null); if (target == null) return true; - if (canDelete(target)) { + if (!canDelete(target)) return true; + + if (canDeleteImmediately(target)) { Log.debugv("Immediate deletion of: {0}", realNext); curTx.delete(realNext); return true; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectMeta.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectMeta.java index ea360a06..6db896a2 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectMeta.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectMeta.java @@ -1,53 +1,103 @@ package com.usatiuk.dhfs.objects; -import org.pcollections.HashTreePMap; -import org.pcollections.HashTreePSet; -import org.pcollections.PMap; -import org.pcollections.PSet; +import org.pcollections.*; -import java.io.Serializable; +import java.util.Collection; +import java.util.List; -public record RemoteObjectMeta( - JObjectKey key, - PMap knownRemoteVersions, - Class knownType, - PSet confirmedDeletes, - boolean seen, - PMap changelog) implements Serializable { - public RemoteObjectMeta(JObjectKey key, Class type, boolean seen, PeerId initialPeer) { - this(key, HashTreePMap.empty(), type, HashTreePSet.empty(), seen, - HashTreePMap.empty().plus(initialPeer, 1L)); +public record RemoteObjectMeta(PCollection refsFrom, boolean frozen, + JObjectKey key, + PMap knownRemoteVersions, + Class knownType, + PSet confirmedDeletes, + boolean seen, + PMap changelog, + boolean hasLocalData) implements JDataRefcounted { + // Self put + public RemoteObjectMeta(JDataRemote data, PeerId initialPeer) { + this(HashTreePSet.empty(), false, + data.key(), HashTreePMap.empty(), data.getClass(), HashTreePSet.empty(), false, + HashTreePMap.empty().plus(initialPeer, 1L), + true); } public RemoteObjectMeta(JObjectKey key, PMap remoteChangelog) { - this(key, HashTreePMap.empty(), JDataRemote.class, HashTreePSet.empty(), true, remoteChangelog); + this(HashTreePSet.empty(), false, + key, HashTreePMap.empty(), JDataRemote.class, HashTreePSet.empty(), true, + remoteChangelog, + false); + } + + public RemoteObjectMeta(JObjectKey key) { + this(HashTreePSet.empty(), false, + key, HashTreePMap.empty(), JDataRemote.class, HashTreePSet.empty(), true, + TreePMap.empty(), + false); + } + + @Override + public JObjectKey key() { + return ofMetaKey(key); + } + + public static JObjectKey ofMetaKey(JObjectKey key) { + return key; + } + + public static JObjectKey ofDataKey(JObjectKey key) { + return JObjectKey.of(key.name() + "_data"); + } + + public JObjectKey dataKey() { + return ofDataKey(key); + } + + @Override + public RemoteObjectMeta withRefsFrom(PCollection refs) { + return new RemoteObjectMeta(refs, frozen, key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, hasLocalData); + } + + @Override + public RemoteObjectMeta withFrozen(boolean frozen) { + return new RemoteObjectMeta(refsFrom, frozen, key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, hasLocalData); } public RemoteObjectMeta withKnownRemoteVersions(PMap knownRemoteVersions) { - return new RemoteObjectMeta(key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog); + return new RemoteObjectMeta(refsFrom, frozen, key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, hasLocalData); } public RemoteObjectMeta withKnownType(Class knownType) { - return new RemoteObjectMeta(key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog); + return new RemoteObjectMeta(refsFrom, frozen, key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, hasLocalData); } public RemoteObjectMeta withConfirmedDeletes(PSet confirmedDeletes) { - return new RemoteObjectMeta(key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog); + return new RemoteObjectMeta(refsFrom, frozen, key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, hasLocalData); } public RemoteObjectMeta withSeen(boolean seen) { - return new RemoteObjectMeta(key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog); + return new RemoteObjectMeta(refsFrom, frozen, key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, hasLocalData); } public RemoteObjectMeta withChangelog(PMap changelog) { - return new RemoteObjectMeta(key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog); + return new RemoteObjectMeta(refsFrom, frozen, key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, hasLocalData); } public RemoteObjectMeta withHaveLocal(boolean haveLocal) { - return new RemoteObjectMeta(key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog); + return new RemoteObjectMeta(refsFrom, frozen, key, knownRemoteVersions, knownType, confirmedDeletes, seen, changelog, haveLocal); } public long versionSum() { return changelog.values().stream().mapToLong(Long::longValue).sum(); } + + @Override + public Collection collectRefsTo() { + if (hasLocalData) return List.of(dataKey()); + return List.of(); + } + + @Override + public int estimateSize() { + return 1000; + } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java index 81e53222..8ff36c18 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java @@ -27,22 +27,26 @@ public class RemoteTransaction { return curTx.getId(); } - private Optional> tryDownloadRemote(RemoteObject obj) { - MutableObject> success = new MutableObject<>(null); + private Optional> tryDownloadRemote(RemoteObjectMeta obj) { + MutableObject> success = new MutableObject<>(null); try { remoteObjectServiceClient.getObject(obj.key(), rcv -> { - if (!obj.meta().knownType().isInstance(rcv.getRight().data())) - throw new IllegalStateException("Object type mismatch: " + obj.meta().knownType() + " vs " + rcv.getRight().data().getClass()); + if (!obj.knownType().isInstance(rcv.getRight().data())) + throw new IllegalStateException("Object type mismatch: " + obj.knownType() + " vs " + rcv.getRight().data().getClass()); - if (!rcv.getRight().changelog().equals(obj.meta().changelog())) { - var updated = syncHandler.handleRemoteUpdate(rcv.getLeft(), obj.key(), obj, rcv.getRight().changelog()); - if (!rcv.getRight().changelog().equals(updated.meta().changelog())) - throw new IllegalStateException("Changelog mismatch, update failed?: " + rcv.getRight().changelog() + " vs " + updated.meta().changelog()); - success.setValue(updated.withData((T) rcv.getRight().data())); - } else { - success.setValue(obj.withData((T) rcv.getRight().data())); - } + syncHandler.handleRemoteUpdate(rcv.getLeft(), obj.key(), rcv.getRight().changelog(), rcv.getRight().data()); + + var now = curTx.get(RemoteObjectMeta.class, RemoteObjectMeta.ofMetaKey(obj.key())).orElse(null); + assert now != null; + + if (!now.hasLocalData()) + return false; + + var gotData = curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(obj.key())).orElse(null); + assert gotData != null; + + success.setValue(gotData); return true; }); } catch (Exception e) { @@ -50,60 +54,51 @@ public class RemoteTransaction { return Optional.empty(); } - curTx.put(success.getValue()); return Optional.of(success.getValue()); } @SuppressWarnings("unchecked") - public Optional> get(Class type, JObjectKey key, LockingStrategy strategy) { - return curTx.get(RemoteObject.class, key, strategy) + private Optional getData(Class type, JObjectKey key, LockingStrategy strategy, boolean tryRequest) { + return curTx.get(RemoteObjectMeta.class, RemoteObjectMeta.ofMetaKey(key), strategy) .flatMap(obj -> { - if (obj.data() != null && !type.isInstance(obj.data())) - throw new IllegalStateException("Object (real) type mismatch: " + obj.data().getClass() + " vs " + type); -// FIXME: -// if (!type.isAssignableFrom(obj.meta().knownType())) -// throw new IllegalStateException("Object (meta) type mismatch: " + obj.meta().knownType() + " vs " + type); - - if (obj.data() != null) - return Optional.of(obj); - else - return tryDownloadRemote(obj); + if (obj.hasLocalData()) { + var realData = curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(key), strategy).orElse(null); + if (realData == null) + throw new IllegalStateException("Local data not found for " + key); // TODO: Race + if (!type.isInstance(realData.data())) + throw new IllegalStateException("Object type mismatch: " + realData.data().getClass() + " vs " + type); + return Optional.of((T) realData.data()); + } + if (!tryRequest) + return Optional.empty(); + return tryDownloadRemote(obj).map(wrapper -> (T) wrapper.data()); }); } public Optional getMeta(JObjectKey key, LockingStrategy strategy) { - return curTx.get(RemoteObject.class, key, strategy).map(obj -> obj.meta()); + return curTx.get(RemoteObjectMeta.class, RemoteObjectMeta.ofMetaKey(key), strategy); } - public Optional getData(Class type, JObjectKey key, LockingStrategy strategy) { - return get(type, key, strategy).map(RemoteObject::data); - } + public void putData(T obj) { + var curMeta = getMeta(obj.key()).orElse(null); - public void put(RemoteObject obj) { - curTx.put(obj); - } - - public void put(T obj) { - var cur = get((Class) obj.getClass(), obj.key()).orElse(null); - - if (cur == null) { - curTx.put(new RemoteObject<>(obj, persistentPeerDataService.getSelfUuid())); + if (curMeta == null) { + curTx.put(new RemoteObjectMeta(obj, persistentPeerDataService.getSelfUuid())); + curTx.put(new RemoteObjectDataWrapper<>(obj)); return; } - if (cur.data() != null && cur.data().equals(obj)) - return; - if (cur.data() != null && !cur.data().getClass().equals(obj.getClass())) - throw new IllegalStateException("Object type mismatch: " + cur.data().getClass() + " vs " + obj.getClass()); - var newMeta = cur.meta(); +// if (cur.data() != null && cur.data().equals(obj)) +// return; + if (!curMeta.knownType().isAssignableFrom(obj.getClass())) + throw new IllegalStateException("Object type mismatch: " + curMeta.knownType() + " vs " + obj.getClass()); + var newMeta = curMeta; newMeta = newMeta.withChangelog(newMeta.changelog().plus(persistentPeerDataService.getSelfUuid(), newMeta.changelog().get(persistentPeerDataService.getSelfUuid()) + 1)); - var newObj = cur.withData(obj).withMeta(newMeta); - curTx.put(newObj); - } - - public Optional> get(Class type, JObjectKey key) { - return get(type, key, LockingStrategy.OPTIMISTIC); + curTx.put(newMeta); + var newData = curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(obj.key())) + .map(w -> w.withData(obj)).orElse(new RemoteObjectDataWrapper<>(obj)); + curTx.put(newData); } public Optional getMeta(JObjectKey key) { @@ -111,6 +106,18 @@ public class RemoteTransaction { } public Optional getData(Class type, JObjectKey key) { - return getData(type, key, LockingStrategy.OPTIMISTIC); + return getData(type, key, LockingStrategy.OPTIMISTIC, true); + } + + public Optional getDataLocal(Class type, JObjectKey key) { + return getData(type, key, LockingStrategy.OPTIMISTIC, false); + } + + public Optional getData(Class type, JObjectKey key, LockingStrategy strategy) { + return getData(type, key, strategy, true); + } + + public Optional getDataLocal(Class type, JObjectKey key, LockingStrategy strategy) { + return getData(type, key, strategy, false); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java index b4cb82c6..ace16a81 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java @@ -15,6 +15,7 @@ import io.smallrye.common.annotation.Blocking; import io.smallrye.mutiny.Uni; import jakarta.annotation.security.RolesAllowed; import jakarta.inject.Inject; +import org.apache.commons.lang3.tuple.Pair; // Note: RunOnVirtualThread hangs somehow @GrpcService @@ -52,16 +53,32 @@ public class RemoteObjectServiceServer implements DhfsObjectSyncGrpc { public Uni getObject(GetObjectRequest request) { Log.info("<-- getObject: " + request.getName() + " from " + identity.getPrincipal().getName().substring(3)); - var obj = txm.run(() -> { - var got = remoteTx.get(JDataRemote.class, JObjectKey.of(request.getName())).orElse(null); - if (got == null) { - Log.info("<-- getObject NOT FOUND: " + request.getName() + " from " + identity.getPrincipal().getName().substring(3)); - throw new StatusRuntimeException(Status.NOT_FOUND); - } - return got; + + Pair got = txm.run(() -> { + var meta = remoteTx.getMeta(JObjectKey.of(request.getName())).orElse(null); + var obj = remoteTx.getDataLocal(JDataRemote.class, JObjectKey.of(request.getName())).orElse(null); + if (meta != null && !meta.seen()) + curTx.put(meta.withSeen(true)); + if (obj != null) + for (var ref : obj.collectRefsTo()) { + var refMeta = remoteTx.getMeta(ref).orElse(null); + if (refMeta != null && !refMeta.seen()) + curTx.put(refMeta.withSeen(true)); + } + return Pair.of(meta, obj); }); - var serialized = receivedObjectProtoSerializer.serialize(obj.toReceivedObject()); + if ((got.getValue() != null) && (got.getKey() == null)) { + Log.error("Inconsistent state for object meta: " + request.getName()); + throw new StatusRuntimeException(Status.INTERNAL); + } + + if (got.getValue() == null) { + Log.info("<-- getObject NOT FOUND: " + request.getName() + " from " + identity.getPrincipal().getName().substring(3)); + throw new StatusRuntimeException(Status.NOT_FOUND); + } + + var serialized = receivedObjectProtoSerializer.serialize(new ReceivedObject(got.getKey().key(), got.getKey().changelog(), got.getValue())); return Uni.createFrom().item(serialized); // // Does @Blocking break this? // return Uni.createFrom().emitter(emitter -> { @@ -110,7 +127,7 @@ public class RemoteObjectServiceServer implements DhfsObjectSyncGrpc { builder.setObjName(request.getName()); txm.run(() -> { - var obj = curTx.get(RemoteObject.class, JObjectKey.of(request.getName())).orElse(null); + var obj = curTx.get(RemoteObjectMeta.class, JObjectKey.of(request.getName())).orElse(null); if (obj == null) { builder.setDeletionCandidate(true); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java index d8f9516f..aab38104 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java @@ -1,17 +1,14 @@ package com.usatiuk.dhfs.objects.repository; -import com.usatiuk.dhfs.objects.JDataRemote; -import com.usatiuk.dhfs.objects.JObjectKey; -import com.usatiuk.dhfs.objects.PeerId; -import com.usatiuk.dhfs.objects.RemoteObject; +import com.usatiuk.dhfs.objects.*; import com.usatiuk.dhfs.objects.transaction.Transaction; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; +import org.pcollections.HashTreePMap; import org.pcollections.PMap; -import java.util.stream.Collectors; -import java.util.stream.Stream; +import javax.annotation.Nullable; // @@ -92,149 +89,163 @@ public class SyncHandler { // } // - public RemoteObject handleOneUpdate(PeerId from, RemoteObject current, PMap rcvChangelog) { -// if (!rcv.key().equals(current.key())) { -// Log.error("Received update for different object: " + rcv.key() + " from " + from); -// throw new IllegalArgumentException("Received update for different object: " + rcv.key() + " from " + from); +// public RemoteObjectMeta handleOneUpdate(PeerId from, RemoteObjectMeta current, PMap rcvChangelog) { +//// if (!rcv.key().equals(current.key())) { +//// Log.error("Received update for different object: " + rcv.key() + " from " + from); +//// throw new IllegalArgumentException("Received update for different object: " + rcv.key() + " from " + from); +//// } +// +// var receivedTotalVer = rcvChangelog.values().stream().mapToLong(Long::longValue).sum(); +// +// if (current.meta().knownRemoteVersions().getOrDefault(from, 0L) > receivedTotalVer) { +// Log.error("Received older index update than was known for host: " + from + " " + current.key()); +// throw new IllegalStateException(); // FIXME: OutdatedUpdateException // } - - var receivedTotalVer = rcvChangelog.values().stream().mapToLong(Long::longValue).sum(); - - if (current.meta().knownRemoteVersions().getOrDefault(from, 0L) > receivedTotalVer) { - Log.error("Received older index update than was known for host: " + from + " " + current.key()); - throw new IllegalStateException(); // FIXME: OutdatedUpdateException - } - - Log.trace("Handling update: " + current.key() + " from " + from + "\n" + "ours: " + current + " \n" + "received: " + rcvChangelog); - - boolean conflict = false; - boolean updatedRemoteVersion = false; - - var newObj = current; - var curKnownRemoteVersion = current.meta().knownRemoteVersions().get(from); - - if (curKnownRemoteVersion == null || !curKnownRemoteVersion.equals(receivedTotalVer)) - updatedRemoteVersion = true; - - if (updatedRemoteVersion) - newObj = current.withMeta(current.meta().withKnownRemoteVersions( - current.meta().knownRemoteVersions().plus(from, receivedTotalVer) - )); - - - boolean hasLower = false; - boolean hasHigher = false; - for (var e : Stream.concat(current.meta().changelog().keySet().stream(), rcvChangelog.keySet().stream()).collect(Collectors.toUnmodifiableSet())) { - if (rcvChangelog.getOrDefault(e, 0L) < current.meta().changelog().getOrDefault(e, 0L)) - hasLower = true; - if (rcvChangelog.getOrDefault(e, 0L) > current.meta().changelog().getOrDefault(e, 0L)) - hasHigher = true; - } - - if (hasLower && hasHigher) { - Log.info("Conflict on update (inconsistent version): " + current.key() + " from " + from); -// Log. // -// info("Trying conflict resolution: " + header.getName() + " from " + from); -// var found = foundExt.get(); +// Log.trace("Handling update: " + current.key() + " from " + from + "\n" + "ours: " + current + " \n" + "received: " + rcvChangelog); // -// JObjectData theirsData; -// ObjectHeader theirsHeader; -// if (header. hasPushedData()) { -// theirsHeader = header; -// theirsData = dataProtoSerializer. +// boolean conflict = false; +// boolean updatedRemoteVersion = false; // -// deserialize(header.getPushedData()); -// } else { -// var got = remoteObjectServiceClient.getSpecificObject(from, header.getName()); -// theirsData = dataProtoSerializer. +// var newObj = current; +// var curKnownRemoteVersion = current.meta().knownRemoteVersions().get(from); // -// deserialize(got.getRight()); -// theirsHeader = got. +// if (curKnownRemoteVersion == null || !curKnownRemoteVersion.equals(receivedTotalVer)) +// updatedRemoteVersion = true; // -// getLeft(); -// } +// if (updatedRemoteVersion) +// newObj = current.withMeta(current.meta().withKnownRemoteVersions( +// current.meta().knownRemoteVersions().plus(from, receivedTotalVer) +// )); // -// jObjectTxManager. // -// executeTx(() -> { -// var resolverClass = found.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { -// if (d == null) -// throw new StatusRuntimeExceptionNoStacktrace(Status.UNAVAILABLE.withDescription("No local data when conflict " + header.getName())); -// return d.getConflictResolver(); -// }); -// var resolver = conflictResolvers.select(resolverClass); -// resolver. -// -// get(). -// -// resolve(from, theirsHeader, theirsData, found); -// }); -// Log. info("Resolved conflict for " + from + " " + header.getName()); -// throw new NotImplementedException(); - } else if (hasLower) { - Log.info("Received older index update than known: " + from + " " + current.key()); -// throw new OutdatedUpdateException(); -// throw new NotImplementedException(); - } else if (hasHigher) { - var newChangelog = rcvChangelog.containsKey(persistentPeerDataService.getSelfUuid()) ? - rcvChangelog : rcvChangelog.plus(persistentPeerDataService.getSelfUuid(), 0L); - - newObj = newObj.withData(null).withMeta(newObj.meta().withChangelog(newChangelog)); -// if (header.hasPushedData()) -// found.externalResolution(dataProtoSerializer.deserialize(header.getPushedData())); - } -// else if (data == null && header.hasPushedData()) { -// found.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); -// if (found.getData() == null) -// found.externalResolution(dataProtoSerializer.deserialize(header.getPushedData())); +// boolean hasLower = false; +// boolean hasHigher = false; +// for (var e : Stream.concat(current.meta().changelog().keySet().stream(), rcvChangelog.keySet().stream()).collect(Collectors.toUnmodifiableSet())) { +// if (rcvChangelog.getOrDefault(e, 0L) < current.meta().changelog().getOrDefault(e, 0L)) +// hasLower = true; +// if (rcvChangelog.getOrDefault(e, 0L) > current.meta().changelog().getOrDefault(e, 0L)) +// hasHigher = true; // } +// +// if (hasLower && hasHigher) { +// Log.info("Conflict on update (inconsistent version): " + current.key() + " from " + from); +//// Log. +//// +//// info("Trying conflict resolution: " + header.getName() + " from " + from); +//// var found = foundExt.get(); +//// +//// JObjectData theirsData; +//// ObjectHeader theirsHeader; +//// if (header. hasPushedData()) { +//// theirsHeader = header; +//// theirsData = dataProtoSerializer. +//// +//// deserialize(header.getPushedData()); +//// } else { +//// var got = remoteObjectServiceClient.getSpecificObject(from, header.getName()); +//// theirsData = dataProtoSerializer. +//// +//// deserialize(got.getRight()); +//// theirsHeader = got. +//// +//// getLeft(); +//// } +//// +//// jObjectTxManager. +//// +//// executeTx(() -> { +//// var resolverClass = found.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { +//// if (d == null) +//// throw new StatusRuntimeExceptionNoStacktrace(Status.UNAVAILABLE.withDescription("No local data when conflict " + header.getName())); +//// return d.getConflictResolver(); +//// }); +//// var resolver = conflictResolvers.select(resolverClass); +//// resolver. +//// +//// get(). +//// +//// resolve(from, theirsHeader, theirsData, found); +//// }); +//// Log. info("Resolved conflict for " + from + " " + header.getName()); +//// throw new NotImplementedException(); +// } else if (hasLower) { +// Log.info("Received older index update than known: " + from + " " + current.key()); +//// throw new OutdatedUpdateException(); +//// throw new NotImplementedException(); +// } else if (hasHigher) { +// var newChangelog = rcvChangelog.containsKey(persistentPeerDataService.getSelfUuid()) ? +// rcvChangelog : rcvChangelog.plus(persistentPeerDataService.getSelfUuid(), 0L); +// +// newObj = newObj.withData(null).withMeta(newObj.meta().withChangelog(newChangelog)); +//// if (header.hasPushedData()) +//// found.externalResolution(dataProtoSerializer.deserialize(header.getPushedData())); +// } +//// else if (data == null && header.hasPushedData()) { +//// found.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); +//// if (found.getData() == null) +//// found.externalResolution(dataProtoSerializer.deserialize(header.getPushedData())); +//// } +// -// assert Objects.equals(receivedTotalVer, md.getOurVersion()); + /// / assert Objects.equals(receivedTotalVer, md.getOurVersion()); +// +// if (!updatedRemoteVersion) +// Log.debug("No action on update: " + current.meta().key() + " from " + from); +// +// return newObj; +// } + public void handleRemoteUpdate(PeerId from, JObjectKey key, PMap receivedChangelog, @Nullable JDataRemote receivedData) { + var current = curTx.get(RemoteObjectMeta.class, key).orElse(null); + if (current == null) { + current = new RemoteObjectMeta(key, HashTreePMap.empty()); + curTx.put(current); + } - if (!updatedRemoteVersion) - Log.debug("No action on update: " + current.meta().key() + " from " + from); + var changelogCompare = SyncHelper.compareChangelogs(current.changelog(), receivedChangelog); - return newObj; - } - - public RemoteObject handleRemoteUpdate(PeerId from, JObjectKey key, RemoteObject current, PMap rcv) { - // TODO: Dedup - try { - if (current == null) { - var obj = new RemoteObject<>(key, rcv); - curTx.put(obj); - current = (RemoteObject) obj; // Will update known remote version too + switch (changelogCompare) { + case EQUAL -> { + Log.debug("No action on update: " + key + " from " + from); + if (!current.hasLocalData() && receivedData != null) { + current = current.withHaveLocal(true); + curTx.put(current); + curTx.put(curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(current.key())) + .map(w -> w.withData(receivedData)).orElse(new RemoteObjectDataWrapper<>(receivedData))); + } } + case NEWER -> { + Log.debug("Received newer index update than known: " + key + " from " + from); + var newChangelog = receivedChangelog.containsKey(persistentPeerDataService.getSelfUuid()) ? + receivedChangelog : receivedChangelog.plus(persistentPeerDataService.getSelfUuid(), 0L); + current = current.withChangelog(newChangelog); - var newObj = handleOneUpdate(from, current, rcv); - if (newObj != current) { - curTx.put(newObj); + if (receivedData != null) { + current = current.withHaveLocal(true); + curTx.put(current); + curTx.put(curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(current.key())) + .map(w -> w.withData(receivedData)).orElse(new RemoteObjectDataWrapper<>(receivedData))); + } else { + current = current.withHaveLocal(false); + curTx.put(current); + } + } + case OLDER -> { + Log.debug("Received older index update than known: " + key + " from " + from); + return; + } + case CONFLICT -> { + Log.debug("Conflict on update (inconsistent version): " + key + " from " + from); + // TODO: + return; } - return newObj; -// } catch (OutdatedUpdateException ignored) { -// Log.warn("Outdated update of " + request.getHeader().getName() + " from " + request.getSelfUuid()); -// invalidationQueueService.pushInvalidationToOne(UUID.fromString(request.getSelfUuid()), request.getHeader().getName()); - } catch (Exception ex) { - Log.info("Error when handling update from " + from + " of " + current.meta().key(), ex); - throw ex; } + var curKnownRemoteVersion = current.knownRemoteVersions().get(from); + var receivedTotalVer = receivedChangelog.values().stream().mapToLong(Long::longValue).sum(); -// return IndexUpdateReply.getDefaultInstance(); - } - - protected static class OutdatedUpdateException extends RuntimeException { - OutdatedUpdateException() { - super(); - } - - OutdatedUpdateException(String message) { - super(message); - } - - @Override - public synchronized Throwable fillInStackTrace() { - return this; + if (curKnownRemoteVersion == null || curKnownRemoteVersion < receivedTotalVer) { + current = current.withKnownRemoteVersions(current.knownRemoteVersions().plus(from, receivedTotalVer)); + curTx.put(current); } } } \ No newline at end of file diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHelper.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHelper.java new file mode 100644 index 00000000..ceb391e2 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHelper.java @@ -0,0 +1,42 @@ +package com.usatiuk.dhfs.objects.repository; + +import com.usatiuk.dhfs.objects.PeerId; +import org.pcollections.PMap; + +import java.util.stream.Collectors; +import java.util.stream.Stream; + +public class SyncHelper { + public enum ChangelogCmpResult { + EQUAL, + NEWER, + OLDER, + CONFLICT + } + + public static ChangelogCmpResult compareChangelogs(PMap current, PMap other) { + boolean hasLower = false; + boolean hasHigher = false; + for (var e : Stream.concat(current.keySet().stream(), other.keySet().stream()).collect(Collectors.toUnmodifiableSet())) { + if (other.getOrDefault(e, 0L) < current.getOrDefault(e, 0L)) + hasLower = true; + if (other.getOrDefault(e, 0L) > current.getOrDefault(e, 0L)) + hasHigher = true; + } + + if (hasLower && hasHigher) + return ChangelogCmpResult.CONFLICT; + + if (hasLower) + return ChangelogCmpResult.OLDER; + + if (hasHigher) + return ChangelogCmpResult.NEWER; + + return ChangelogCmpResult.EQUAL; + } + +// public static PMap mergeChangelogs(PMap current, PMap other) { +// return current.plusAll(other); +// } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOp.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOp.java index e2162139..18ed10b9 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOp.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOp.java @@ -2,11 +2,11 @@ package com.usatiuk.dhfs.objects.repository.invalidation; import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.PeerId; -import com.usatiuk.dhfs.objects.RemoteObject; +import com.usatiuk.dhfs.objects.RemoteObjectMeta; import org.pcollections.PMap; public record IndexUpdateOp(JObjectKey key, PMap changelog) implements Op { - public IndexUpdateOp(RemoteObject object) { - this(object.key(), object.meta().changelog()); + public IndexUpdateOp(RemoteObjectMeta object) { + this(object.key(), object.changelog()); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java index acf06393..d8d5b336 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java @@ -27,8 +27,8 @@ public class OpPusher { Op info = txm.run(() -> { var obj = curTx.get(JData.class, key).orElse(null); switch (obj) { - case RemoteObject remote -> { - return new IndexUpdateOp(key, remote.meta().changelog()); + case RemoteObjectMeta remote -> { + return new IndexUpdateOp(key, remote.changelog()); } case JKleppmannTreePersistentData pd -> { var maybeQueue = pd.queues().get(op); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/PushOpHandler.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/PushOpHandler.java index 8ee79b77..b7c4c48d 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/PushOpHandler.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/PushOpHandler.java @@ -1,6 +1,5 @@ package com.usatiuk.dhfs.objects.repository.invalidation; -import com.usatiuk.dhfs.objects.JDataRemote; import com.usatiuk.dhfs.objects.PeerId; import com.usatiuk.dhfs.objects.RemoteTransaction; import com.usatiuk.dhfs.objects.repository.SyncHandler; @@ -18,8 +17,6 @@ public class PushOpHandler { RemoteTransaction remoteTransaction; public void handlePush(PeerId peer, IndexUpdateOp obj) { - syncHandler.handleRemoteUpdate(peer, obj.key(), - remoteTransaction.get(JDataRemote.class, obj.key()).orElse(null), - obj.changelog()); + syncHandler.handleRemoteUpdate(peer, obj.key(), obj.changelog(), null); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoService.java index be8d8a2a..5398302f 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoService.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoService.java @@ -72,7 +72,7 @@ public class PeerInfoService { jObjectTxManager.run(() -> { var parent = getTree().traverse(List.of()); var newPeerInfo = new PeerInfo(id, cert); - remoteTx.put(newPeerInfo); + remoteTx.putData(newPeerInfo); getTree().move(parent, new JKleppmannTreeNodeMetaPeer(newPeerInfo.id()), getTree().getNewNodeId()); }); } diff --git a/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java index 5648e03e..a45d0b9a 100644 --- a/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java @@ -38,6 +38,7 @@ public class DataLocker { private static class LockTag { final Thread owner = Thread.currentThread(); + // final StackTraceElement[] _creationStack = Thread.currentThread().getStackTrace(); boolean released = false; } From 00ee6f31356a422f4c514ddd25a1fde18af09f03 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 16 Feb 2025 09:23:55 +0100 Subject: [PATCH 061/105] some deletion fixes --- .../com/usatiuk/dhfs/objects/RefcounterTxHook.java | 8 +++++++- .../com/usatiuk/dhfs/objects/RemoteObjectDeleter.java | 10 +++++++--- .../objects/repository/RemoteObjectServiceClient.java | 6 +++--- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java index 7cb22447..8978c728 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java @@ -2,6 +2,7 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; import com.usatiuk.dhfs.objects.transaction.Transaction; +import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; @@ -17,7 +18,8 @@ public class RefcounterTxHook implements PreCommitTxHook { return found; } - if (cur instanceof RemoteObjectMeta || cur instanceof JKleppmannTreeNode) { + if (cur instanceof RemoteObjectDataWrapper || cur instanceof JKleppmannTreeNode) { + // FIXME: return new RemoteObjectMeta(key); } else { return found; @@ -39,6 +41,7 @@ public class RefcounterTxHook implements PreCommitTxHook { if (!oldRefs.contains(curRef)) { var referenced = getRef(refCur, curRef); curTx.put(referenced.withRefsFrom(referenced.refsFrom().plus(key))); + Log.tracev("Added ref from {0} to {1}", key, curRef); } } @@ -46,6 +49,7 @@ public class RefcounterTxHook implements PreCommitTxHook { if (!curRefs.contains(oldRef)) { var referenced = getRef(refCur, oldRef); curTx.put(referenced.withRefsFrom(referenced.refsFrom().minus(key))); + Log.tracev("Removed ref from {0} to {1}", key, oldRef); } } } @@ -59,6 +63,7 @@ public class RefcounterTxHook implements PreCommitTxHook { for (var newRef : refCur.collectRefsTo()) { var referenced = getRef(refCur, newRef); curTx.put(referenced.withRefsFrom(referenced.refsFrom().plus(key))); + Log.tracev("Added ref from {0} to {1}", key, newRef); } } @@ -71,6 +76,7 @@ public class RefcounterTxHook implements PreCommitTxHook { for (var removedRef : refCur.collectRefsTo()) { var referenced = getRef(refCur, removedRef); curTx.put(referenced.withRefsFrom(referenced.refsFrom().minus(key))); + Log.tracev("Removed ref from {0} to {1}", key, removedRef); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDeleter.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDeleter.java index 9fe55a39..7c7730f7 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDeleter.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectDeleter.java @@ -126,23 +126,27 @@ public class RemoteObjectDeleter { } var knownHosts = peerInfoService.getPeersNoSelf(); + RemoteObjectMeta finalTarget = target; List missing = knownHosts.stream() .map(PeerInfo::id) - .filter(id -> !target.confirmedDeletes().contains(id)).toList(); + .filter(id -> !finalTarget.confirmedDeletes().contains(id)).toList(); var ret = remoteObjectServiceClient.canDelete(missing, objName, target.refsFrom()); long ok = 0; for (var r : ret) { - if (!r.getDeletionCandidate()) { + if (!r.getValue().getDeletionCandidate()) { // for (var rr : r.getReferrersList()) // autoSyncProcessor.add(rr); } else { + target = target.withConfirmedDeletes(target.confirmedDeletes().plus(r.getKey())); ok++; } } + curTx.put(target); + if (ok != missing.size()) { Log.debugv("Delaying deletion check of {0}", objName); return true; @@ -175,7 +179,7 @@ public class RemoteObjectDeleter { if (!obj.seen()) return true; - var knownHosts = peerInfoService.getPeers(); + var knownHosts = peerInfoService.getPeersNoSelf(); boolean missing = false; for (var x : knownHosts) { if (!obj.confirmedDeletes().contains(x.id())) { diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java index 1d9606c0..fc819e0f 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java @@ -159,15 +159,15 @@ public class RemoteObjectServiceClient { return OpPushReply.getDefaultInstance(); } - public Collection canDelete(Collection targets, JObjectKey object, Collection ourReferrers) { + public Collection> canDelete(Collection targets, JObjectKey object, Collection ourReferrers) { Log.trace("Asking canDelete for " + object + " from " + targets.stream().map(PeerId::toString).collect(Collectors.joining(", "))); try { - return _batchExecutor.invokeAll(targets.stream().>map(h -> () -> { + return _batchExecutor.invokeAll(targets.stream().>>map(h -> () -> { var req = CanDeleteRequest.newBuilder().setName(object.toString()); for (var ref : ourReferrers) { req.addOurReferrers(ref.toString()); } - return rpcClientFactory.withObjSyncClient(h, (p, client) -> client.canDelete(req.build())); + return Pair.of(h, rpcClientFactory.withObjSyncClient(h, (p, client) -> client.canDelete(req.build()))); }).toList()).stream().map(f -> { try { return f.get(); From bcd55835ca53853bffce387a964e3bfd67ac4d6b Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 16 Feb 2025 11:03:30 +0100 Subject: [PATCH 062/105] fix stuff not being deleted --- .../dhfs/objects/CurrentTransaction.java | 10 +++++++ .../usatiuk/dhfs/objects/JObjectManager.java | 24 ++++++++++++++-- .../dhfs/objects/TransactionManager.java | 3 +- .../dhfs/objects/TransactionManagerImpl.java | 5 ++-- .../com/usatiuk/dhfs/objects/TxWriteback.java | 2 +- .../usatiuk/dhfs/objects/TxWritebackImpl.java | 16 +++++------ .../WritebackObjectPersistentStore.java | 7 ++++- .../dhfs/objects/transaction/Transaction.java | 4 ++- .../transaction/TransactionFactoryImpl.java | 28 ++++++++++++++++--- .../transaction/TransactionHandle.java | 7 +++++ .../transaction/TransactionHandlePrivate.java | 7 +++++ .../transaction/TransactionPrivate.java | 4 ++- .../usatiuk/dhfs/objects/DeleterTxHook.java | 4 +-- 13 files changed, 98 insertions(+), 23 deletions(-) create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionHandle.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionHandlePrivate.java diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java index 52b97a5a..089c97ec 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java @@ -17,6 +17,16 @@ public class CurrentTransaction implements Transaction { return transactionManager.current().getId(); } + @Override + public void onCommit(Runnable runnable) { + transactionManager.current().onCommit(runnable); + } + + @Override + public void onFlush(Runnable runnable) { + transactionManager.current().onFlush(runnable); + } + @Override public Optional get(Class type, JObjectKey key, LockingStrategy strategy) { return transactionManager.current().get(type, key, strategy); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index eb4c466d..3210a937 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -110,7 +110,7 @@ public class JObjectManager { return transactionFactory.createTransaction(counter, new TransactionObjectSourceImpl(counter)); } - public void commit(TransactionPrivate tx) { + public TransactionHandle commit(TransactionPrivate tx) { verifyReady(); Log.trace("Committing transaction " + tx.getId()); // FIXME: Better way? @@ -243,7 +243,27 @@ public class JObjectManager { } Log.tracef("Committing transaction %d to storage", tx.getId()); - writebackObjectPersistentStore.commitTx(current.values(), tx.getId()); + var addFlushCallback = writebackObjectPersistentStore.commitTx(current.values(), tx.getId()); + + for (var callback : tx.getOnCommit()) { + callback.run(); + } + + for (var callback : tx.getOnFlush()) { + addFlushCallback.accept(callback); + } + + return new TransactionHandle() { + @Override + public long getId() { + return tx.getId(); + } + + @Override + public void onFlush(Runnable runnable) { + addFlushCallback.accept(runnable); + } + }; } catch (Throwable t) { Log.trace("Error when committing transaction", t); throw new TxCommitException(t.getMessage(), t); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java index ffff3751..754858f0 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java @@ -1,6 +1,7 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.transaction.Transaction; +import com.usatiuk.dhfs.objects.transaction.TransactionHandle; import com.usatiuk.dhfs.utils.VoidFn; import io.quarkus.logging.Log; @@ -9,7 +10,7 @@ import java.util.function.Supplier; public interface TransactionManager { void begin(); - void commit(); + TransactionHandle commit(); void rollback(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java index d8bbf6a4..2f3c212d 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java @@ -1,6 +1,7 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.transaction.Transaction; +import com.usatiuk.dhfs.objects.transaction.TransactionHandle; import com.usatiuk.dhfs.objects.transaction.TransactionPrivate; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; @@ -24,14 +25,14 @@ public class TransactionManagerImpl implements TransactionManager { } @Override - public void commit() { + public TransactionHandle commit() { if (_currentTransaction.get() == null) { throw new IllegalStateException("No transaction started"); } Log.trace("Committing transaction"); try { - jObjectManager.commit(_currentTransaction.get()); + return jObjectManager.commit(_currentTransaction.get()); } catch (Throwable e) { Log.trace("Transaction commit failed", e); throw e; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java index 66138d60..6d73de04 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java @@ -17,7 +17,7 @@ public interface TxWriteback { // Executes callback after bundle with bundleId id has been persisted // if it was already, runs callback on the caller thread - void asyncFence(long bundleId, VoidFn callback); + void asyncFence(long bundleId, Runnable callback); interface PendingWriteEntry { long bundleId(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java index 66ad87b4..2b7ba3c8 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java @@ -145,14 +145,14 @@ public class TxWritebackImpl implements TxWriteback { }); } - List> callbacks = new ArrayList<>(); + List> callbacks = new ArrayList<>(); synchronized (_notFlushedBundles) { _lastWrittenTx.set(bundle.getId()); while (!_notFlushedBundles.isEmpty() && _notFlushedBundles.firstEntry().getKey() <= bundle.getId()) { callbacks.add(_notFlushedBundles.pollFirstEntry().getValue().setCommitted()); } } - callbacks.forEach(l -> l.forEach(VoidFn::apply)); + callbacks.forEach(l -> l.forEach(Runnable::run)); synchronized (_flushWaitSynchronizer) { currentSize -= bundle.calculateTotalSize(); @@ -278,16 +278,16 @@ public class TxWritebackImpl implements TxWriteback { } @Override - public void asyncFence(long bundleId, VoidFn fn) { + public void asyncFence(long bundleId, Runnable fn) { verifyReady(); if (bundleId < 0) throw new IllegalArgumentException("txId should be >0!"); if (_lastWrittenTx.get() >= bundleId) { - fn.apply(); + fn.run(); return; } synchronized (_notFlushedBundles) { if (_lastWrittenTx.get() >= bundleId) { - fn.apply(); + fn.run(); return; } _notFlushedBundles.get(bundleId).addCallback(fn); @@ -296,7 +296,7 @@ public class TxWritebackImpl implements TxWriteback { private class TxBundleImpl implements TxBundle { private final LinkedHashMap _entries = new LinkedHashMap<>(); - private final ArrayList _callbacks = new ArrayList<>(); + private final ArrayList _callbacks = new ArrayList<>(); private long _txId; private volatile boolean _ready = false; private long _size = -1; @@ -315,14 +315,14 @@ public class TxWritebackImpl implements TxWriteback { _ready = true; } - public void addCallback(VoidFn callback) { + public void addCallback(Runnable callback) { synchronized (_callbacks) { if (_wasCommitted) throw new IllegalStateException(); _callbacks.add(callback); } } - public List setCommitted() { + public List setCommitted() { synchronized (_callbacks) { _wasCommitted = true; return Collections.unmodifiableList(_callbacks); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java index c40164a9..e126aff6 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java @@ -9,6 +9,7 @@ import jakarta.inject.Inject; import javax.annotation.Nonnull; import java.util.Collection; import java.util.Optional; +import java.util.function.Consumer; @ApplicationScoped public class WritebackObjectPersistentStore { @@ -33,7 +34,7 @@ public class WritebackObjectPersistentStore { }; } - void commitTx(Collection> writes, long id) { + Consumer commitTx(Collection> writes, long id) { var bundle = txWriteback.createBundle(); try { for (var action : writes) { @@ -58,5 +59,9 @@ public class WritebackObjectPersistentStore { Log.tracef("Committing transaction %d to storage", id); txWriteback.commitBundle(bundle); + + long bundleId = bundle.getId(); + + return r -> txWriteback.asyncFence(bundleId, r); } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java index 198c8f30..166aceb3 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java @@ -6,9 +6,11 @@ import com.usatiuk.dhfs.objects.JObjectKey; import java.util.Optional; // The transaction interface actually used by user code to retrieve objects -public interface Transaction { +public interface Transaction extends TransactionHandle { long getId(); + void onCommit(Runnable runnable); + Optional get(Class type, JObjectKey key, LockingStrategy strategy); void put(JData obj); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index f7357822..da9da306 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -5,10 +5,7 @@ import com.usatiuk.dhfs.objects.JDataVersionedWrapper; import com.usatiuk.dhfs.objects.JObjectKey; import jakarta.enterprise.context.ApplicationScoped; -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; -import java.util.Optional; +import java.util.*; @ApplicationScoped public class TransactionFactoryImpl implements TransactionFactory { @@ -22,6 +19,9 @@ public class TransactionFactoryImpl implements TransactionFactory { private final ReadTrackingObjectSource _source; private final Map> _writes = new HashMap<>(); private Map> _newWrites = new HashMap<>(); + private final List _onCommit = new ArrayList<>(); + private final List _onFlush = new ArrayList<>(); + private TransactionImpl(long id, TransactionObjectSource source) { _id = id; _source = new ReadTrackingObjectSource(source); @@ -31,6 +31,26 @@ public class TransactionFactoryImpl implements TransactionFactory { return _id; } + @Override + public void onCommit(Runnable runnable) { + _onCommit.add(runnable); + } + + @Override + public void onFlush(Runnable runnable) { + _onFlush.add(runnable); + } + + @Override + public Collection getOnCommit() { + return Collections.unmodifiableCollection(_onCommit); + } + + @Override + public Collection getOnFlush() { + return Collections.unmodifiableCollection(_onFlush); + } + @Override public Optional get(Class type, JObjectKey key, LockingStrategy strategy) { switch (_writes.get(key)) { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionHandle.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionHandle.java new file mode 100644 index 00000000..d55ee1ea --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionHandle.java @@ -0,0 +1,7 @@ +package com.usatiuk.dhfs.objects.transaction; + +public interface TransactionHandle { + long getId(); + + void onFlush(Runnable runnable); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionHandlePrivate.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionHandlePrivate.java new file mode 100644 index 00000000..cc9c8e7d --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionHandlePrivate.java @@ -0,0 +1,7 @@ +package com.usatiuk.dhfs.objects.transaction; + +import java.util.Collection; + +public interface TransactionHandlePrivate extends TransactionHandle { + Collection getOnFlush(); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java index e7ca7d05..1de3b1d8 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java @@ -6,10 +6,12 @@ import java.util.Collection; import java.util.Map; // The transaction interface actually used by user code to retrieve objects -public interface TransactionPrivate extends Transaction { +public interface TransactionPrivate extends Transaction, TransactionHandlePrivate { Collection> drainNewWrites(); Map> reads(); ReadTrackingObjectSource readSource(); + + Collection getOnCommit(); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java index 8917ef6c..6910cb1a 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/DeleterTxHook.java @@ -23,7 +23,7 @@ public class DeleterTxHook implements PreCommitTxHook { } if (canDelete(refCur)) { if (refCur instanceof RemoteObjectMeta ro) { - remoteObjectDeleter.putDeletionCandidate(ro); + curTx.onCommit(() -> remoteObjectDeleter.putDeletionCandidate(ro)); return; } Log.trace("Deleting object on change: " + key); @@ -39,7 +39,7 @@ public class DeleterTxHook implements PreCommitTxHook { if (canDelete(refCur)) { if (refCur instanceof RemoteObjectMeta ro) { - remoteObjectDeleter.putDeletionCandidate(ro); + curTx.onCommit(() -> remoteObjectDeleter.putDeletionCandidate(ro)); return; } Log.warn("Deleting object on creation: " + key); From 1f30af50df9b22e3a3bb23470092c419feec1b95 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 16 Feb 2025 13:39:29 +0100 Subject: [PATCH 063/105] getEscapedRefs --- .../JKleppmannTreeOpWrapper.java | 17 ++++++++------ .../repository/RemoteObjectServiceClient.java | 17 ++++---------- .../invalidation/IndexUpdateOp.java | 8 +++++++ .../InvalidationQueueService.java | 23 ------------------- .../objects/repository/invalidation/Op.java | 4 ++++ 5 files changed, 27 insertions(+), 42 deletions(-) diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java index 209c43df..1bcf5798 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java @@ -3,19 +3,22 @@ package com.usatiuk.dhfs.objects.jkleppmanntree; import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.PeerId; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; +import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile; import com.usatiuk.dhfs.objects.repository.invalidation.Op; import com.usatiuk.kleppmanntree.OpMove; import java.io.Serializable; +import java.util.Collection; +import java.util.List; // Wrapper to avoid having to specify generic types public record JKleppmannTreeOpWrapper(JObjectKey treeName, OpMove op) implements Op, Serializable { -// @Override -// public Collection getEscapedRefs() { -// if (_op.newMeta() instanceof JKleppmannTreeNodeMetaFile mf) { -// return List.of(mf.getFileIno()); -// } -// return List.of(); -// } + @Override + public Collection getEscapedRefs() { + if (op.newMeta() instanceof JKleppmannTreeNodeMetaFile mf) { + return List.of(mf.getFileIno()); + } + return List.of(); + } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java index fc819e0f..b9bdc292 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java @@ -139,19 +139,12 @@ public class RemoteObjectServiceClient { // } // public OpPushReply pushOps(PeerId target, List ops) { -// for (Op op : ops) { -// for (var ref : op.getEscapedRefs()) { -// jObjectTxManager.executeTx(() -> { -// jObjectManager.get(ref).ifPresent(JObject::markSeen); -// }); -// } -// } -// var builder = OpPushMsg.newBuilder() -// .setSelfUuid(persistentPeerDataService.getSelfUuid().toString()) -// .setQueueId(queueName); -// for (var op : ops) -// builder.addMsg(opProtoSerializer.serialize(op)); for (Op op : ops) { + txm.run(() -> { + for (var ref : op.getEscapedRefs()) { + curTx.get(RemoteObjectMeta.class, ref).map(m -> m.withSeen(true)).ifPresent(curTx::put); + } + }); var serialized = opProtoSerializer.serialize(op); var built = OpPushRequest.newBuilder().addMsg(serialized).build(); rpcClientFactory.withObjSyncClient(target, (tgt, client) -> client.opPush(built)); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOp.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOp.java index 18ed10b9..8e9a6d4c 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOp.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOp.java @@ -5,8 +5,16 @@ import com.usatiuk.dhfs.objects.PeerId; import com.usatiuk.dhfs.objects.RemoteObjectMeta; import org.pcollections.PMap; +import java.util.Collection; +import java.util.List; + public record IndexUpdateOp(JObjectKey key, PMap changelog) implements Op { public IndexUpdateOp(RemoteObjectMeta object) { this(object.key(), object.changelog()); } + + @Override + public Collection getEscapedRefs() { + return List.of(key); + } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java index b2c3c024..07d7c594 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java @@ -3,12 +3,8 @@ package com.usatiuk.dhfs.objects.repository.invalidation; import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.PeerId; -import com.usatiuk.dhfs.objects.TransactionManager; import com.usatiuk.dhfs.objects.repository.PeerManager; -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; -import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; import com.usatiuk.dhfs.objects.repository.peersync.PeerInfoService; -import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue; import io.quarkus.logging.Log; import io.quarkus.runtime.ShutdownEvent; @@ -34,14 +30,6 @@ public class InvalidationQueueService { @Inject PeerManager remoteHostManager; @Inject - RemoteObjectServiceClient remoteObjectServiceClient; - @Inject - TransactionManager txm; - @Inject - Transaction curTx; - @Inject - PersistentPeerDataService persistentPeerDataService; - @Inject DeferredInvalidationQueueService deferredInvalidationQueueService; @Inject PeerInfoService peerInfoService; @@ -148,7 +136,6 @@ public class InvalidationQueueService { } public void pushInvalidationToAll(JObjectKey key) { -// if (obj.getMeta().isOnlyLocal()) return; while (true) { var queue = _toAllQueue.get(); if (queue == null) { @@ -164,7 +151,6 @@ public class InvalidationQueueService { } public void pushInvalidationToOne(PeerId host, JObjectKey obj) { -// if (obj.getMeta().isOnlyLocal()) return; if (remoteHostManager.isReachable(host)) _queue.add(Pair.of(host, obj)); else @@ -172,18 +158,9 @@ public class InvalidationQueueService { } public void pushInvalidationToOne(PeerId host, JData obj) { -// if (obj.getMeta().isOnlyLocal()) return; pushInvalidationToOne(host, obj.key()); } -// public void pushInvalidationToAll(String name) { -// pushInvalidationToAll(jObjectManager.get(name).orElseThrow(() -> new IllegalArgumentException("Object " + name + " not found"))); -// } -// -// public void pushInvalidationToOne(PeerId host, JObjectKey name) { -// pushInvalidationToOne(host, jObjectManager.get(name).orElseThrow(() -> new IllegalArgumentException("Object " + name + " not found"))); -// } - protected void pushDeferredInvalidations(PeerId host, JObjectKey name) { _queue.add(Pair.of(host, name)); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/Op.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/Op.java index eb5c6029..c73b406c 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/Op.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/Op.java @@ -1,8 +1,12 @@ package com.usatiuk.dhfs.objects.repository.invalidation; import com.usatiuk.autoprotomap.runtime.ProtoMirror; +import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.repository.OpPushPayload; +import java.util.Collection; + @ProtoMirror(OpPushPayload.class) public interface Op { + Collection getEscapedRefs(); } From f9ad540e2d6e7818bc805889f99b6740906ee388 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 16 Feb 2025 15:38:48 +0100 Subject: [PATCH 064/105] pcollections in JKleppmannTree --- .../jkleppmanntree/JKleppmannTreeManager.java | 33 +++++++------------ .../structs/JKleppmannTreePersistentData.java | 8 ++--- 2 files changed, 16 insertions(+), 25 deletions(-) diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java index cfcf5036..25ef3949 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java @@ -45,8 +45,8 @@ public class JKleppmannTreeManager { true, 1L, HashTreePMap.empty(), - new HashMap<>(), - new TreeMap<>() + HashTreePMap.empty(), + TreePMap.empty() ); curTx.put(data); var rootNode = new JKleppmannTreeNode(JObjectKey.of(name.name() + "_jt_root"), null, new JKleppmannTreeNodeMetaDirectory("")); @@ -330,9 +330,7 @@ public class JKleppmannTreeManager { @Override public void putForPeer(PeerId peerId, Long timestamp) { - var newPeerTimestampLog = new HashMap<>(_data.peerTimestampLog()); - newPeerTimestampLog.put(peerId, timestamp); - _data = _data.withPeerTimestampLog(newPeerTimestampLog); + _data = _data.withPeerTimestampLog(_data.peerTimestampLog().plus(peerId, timestamp)); curTx.put(_data); } } @@ -340,26 +338,23 @@ public class JKleppmannTreeManager { private class LogWrapper implements LogInterface { @Override public Pair, LogRecord> peekOldest() { - var ret = _data.log().firstEntry(); - if (ret == null) return null; - return Pair.of(ret); + if (_data.log().isEmpty()) return null; + return Pair.of(_data.log().firstEntry()); } @Override public Pair, LogRecord> takeOldest() { - var newLog = new TreeMap<>(_data.log()); - var ret = newLog.pollFirstEntry(); - _data = _data.withLog(newLog); + if (_data.log().isEmpty()) return null; + var ret = _data.log().firstEntry(); + _data = _data.withLog(_data.log().minusFirstEntry()); curTx.put(_data); - if (ret == null) return null; return Pair.of(ret); } @Override public Pair, LogRecord> peekNewest() { - var ret = _data.log().lastEntry(); - if (ret == null) return null; - return Pair.of(ret); + if (_data.log().isEmpty()) return null; + return Pair.of(_data.log().lastEntry()); } @Override @@ -391,17 +386,13 @@ public class JKleppmannTreeManager { public void put(CombinedTimestamp timestamp, LogRecord record) { if (_data.log().containsKey(timestamp)) throw new IllegalStateException("Overwriting log entry?"); - var newLog = new TreeMap<>(_data.log()); - newLog.put(timestamp, record); - _data = _data.withLog(newLog); + _data = _data.withLog(_data.log().plus(timestamp, record)); curTx.put(_data); } @Override public void replace(CombinedTimestamp timestamp, LogRecord record) { - var newLog = new TreeMap<>(_data.log()); - newLog.put(timestamp, record); - _data = _data.withLog(newLog); + _data = _data.withLog(_data.log().plus(timestamp, record)); curTx.put(_data); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java index 440b38de..f6576804 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java @@ -17,8 +17,8 @@ public record JKleppmannTreePersistentData( JObjectKey key, PCollection refsFrom, boolean frozen, long clock, PMap, OpMove>> queues, - HashMap peerTimestampLog, - TreeMap, LogRecord> log + PMap peerTimestampLog, + PSortedMap, LogRecord> log ) implements JDataRefcounted { @Override public JKleppmannTreePersistentData withRefsFrom(PCollection refs) { @@ -38,11 +38,11 @@ public record JKleppmannTreePersistentData( return new JKleppmannTreePersistentData(key, refsFrom, frozen, clock, queues, peerTimestampLog, log); } - public JKleppmannTreePersistentData withPeerTimestampLog(HashMap peerTimestampLog) { + public JKleppmannTreePersistentData withPeerTimestampLog(PMap peerTimestampLog) { return new JKleppmannTreePersistentData(key, refsFrom, frozen, clock, queues, peerTimestampLog, log); } - public JKleppmannTreePersistentData withLog(TreeMap, LogRecord> log) { + public JKleppmannTreePersistentData withLog(PSortedMap, LogRecord> log) { return new JKleppmannTreePersistentData(key, refsFrom, frozen, clock, queues, peerTimestampLog, log); } From de1970553111b586d14f7851faa91f5caebeb67b Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 16 Feb 2025 18:02:22 +0100 Subject: [PATCH 065/105] some initial resync infrastructure --- .../dhfs/objects/repository/PeerManager.java | 31 +++---- .../repository/PersistentPeerDataService.java | 82 ++++++------------- .../repository/PersistentRemoteHostsData.java | 11 ++- .../dhfs/objects/repository/SyncHandler.java | 4 + .../repository/peersync/PeerInfoService.java | 16 ++++ .../repository/webapi/ManagementApi.java | 2 +- 6 files changed, 70 insertions(+), 76 deletions(-) diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java index eabd1b8c..52741a53 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java @@ -49,6 +49,8 @@ public class PeerManager { long pingTimeout; @Inject PeerDiscoveryDirectory peerDiscoveryDirectory; + @Inject + SyncHandler syncHandler; private ExecutorService _heartbeatExecutor; // Note: keep priority updated with below @@ -101,13 +103,10 @@ public class PeerManager { private void handleConnectionSuccess(PeerInfo host, PeerAddress address) { boolean wasReachable = isReachable(host); -// boolean shouldSyncObj = persistentPeerDataService.markInitialObjSyncDone(host); -// boolean shouldSyncOp = persistentPeerDataService.markInitialOpSyncDone(host); -// -// if (shouldSyncObj) -// syncHandler.pushInitialResyncObj(host); -// if (shouldSyncOp) -// syncHandler.pushInitialResyncOp(host); + boolean shouldSync = persistentPeerDataService.markInitialSyncDone(host.id()); + + if (shouldSync) + syncHandler.doInitialSync(host.id()); _states.put(host.id(), address); @@ -179,14 +178,12 @@ public class PeerManager { }); } -// public void removeRemoteHost(UUID host) { -// persistentPeerDataService.removeHost(host); -// // Race? -// _transientPeersState.runWriteLocked(d -> { -// d.getStates().remove(host); -// return null; -// }); -// } + public void removeRemoteHost(PeerId peerId) { + transactionManager.run(() -> { + peerInfoService.removePeer(peerId); + persistentPeerDataService.resetInitialSyncDone(peerId); + }); + } private PeerAddress selectBestAddress(PeerId host) { return peerDiscoveryDirectory.getForPeer(host).stream().findFirst().orElseThrow(); @@ -207,9 +204,7 @@ public class PeerManager { peerInfoService.putPeer(host, cert); }); - peerTrustManager.reloadTrustManagerHosts( - transactionManager.run(() -> peerInfoService.getPeers().stream().toList()) - ); //FIXME: + peerTrustManager.reloadTrustManagerHosts(transactionManager.run(() -> peerInfoService.getPeers().stream().toList())); //FIXME: } public Collection getSeenButNotAddedHosts() { diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java index 3f3f0eae..8d21c6ef 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java @@ -13,6 +13,7 @@ import jakarta.enterprise.context.ApplicationScoped; import jakarta.enterprise.event.Observes; import jakarta.inject.Inject; import org.eclipse.microprofile.config.inject.ConfigProperty; +import org.pcollections.HashTreePSet; import java.io.IOException; import java.security.KeyPair; @@ -38,6 +39,8 @@ public class PersistentPeerDataService { Transaction curTx; @Inject PeerInfoService peerInfoService; + @Inject + TransactionManager txm; @ConfigProperty(name = "dhfs.peerdiscovery.preset-uuid") Optional presetUuid; @@ -61,7 +64,7 @@ public class PersistentPeerDataService { _selfKeyPair = CertificateTools.generateKeyPair(); _selfCertificate = CertificateTools.generateCertificate(_selfKeyPair, _selfUuid.toString()); - curTx.put(new PersistentRemoteHostsData(_selfUuid, 0, _selfCertificate, _selfKeyPair)); + curTx.put(new PersistentRemoteHostsData(_selfUuid, 0, _selfCertificate, _selfKeyPair, HashTreePSet.empty())); peerInfoService.putPeer(_selfUuid, _selfCertificate.getEncoded()); } catch (CertificateEncodingException e) { throw new RuntimeException(e); @@ -122,59 +125,28 @@ public class PersistentPeerDataService { return _selfCertificate; } -// // Returns true if host's initial sync wasn't done before, and marks it as done -// public boolean markInitialOpSyncDone(UUID connectedHost) { -// return jObjectTxManager.executeTx(() -> { -// peerDirectoryLocal.get().rwLock(); -// try { -// peerDirectoryLocal.get().local(); -// boolean contained = peerDirectoryLocal.get().getData().getInitialOpSyncDone().contains(connectedHost); -// -// if (!contained) -// peerDirectoryLocal.get().local().mutate(new JMutator() { -// @Override -// public boolean mutate(PeerDirectoryLocal object) { -// object.getInitialOpSyncDone().add(connectedHost); -// return true; -// } -// -// @Override -// public void revert(PeerDirectoryLocal object) { -// object.getInitialOpSyncDone().remove(connectedHost); -// } -// }); -// return !contained; -// } finally { -// peerDirectoryLocal.get().rwUnlock(); -// } -// }); -// } -// -// public boolean markInitialObjSyncDone(UUID connectedHost) { -// return jObjectTxManager.executeTx(() -> { -// peerDirectoryLocal.get().rwLock(); -// try { -// peerDirectoryLocal.get().local(); -// boolean contained = peerDirectoryLocal.get().getData().getInitialObjSyncDone().contains(connectedHost); -// -// if (!contained) -// peerDirectoryLocal.get().local().mutate(new JMutator() { -// @Override -// public boolean mutate(PeerDirectoryLocal object) { -// object.getInitialObjSyncDone().add(connectedHost); -// return true; -// } -// -// @Override -// public void revert(PeerDirectoryLocal object) { -// object.getInitialObjSyncDone().remove(connectedHost); -// } -// }); -// return !contained; -// } finally { -// peerDirectoryLocal.get().rwUnlock(); -// } -// }); -// } + // Returns true if host's initial sync wasn't done before, and marks it as done + public boolean markInitialSyncDone(PeerId peerId) { + return txm.run(() -> { + var data = curTx.get(PersistentRemoteHostsData.class, PersistentRemoteHostsData.KEY).orElse(null); + if (data == null) throw new IllegalStateException("Self data not found"); + boolean exists = data.initialSyncDone().contains(peerId); + if (exists) return false; + curTx.put(data.withInitialSyncDone(data.initialSyncDone().plus(peerId))); + return true; + }); + } + + // Returns true if it was marked as done before, and resets it + public boolean resetInitialSyncDone(PeerId peerId) { + return txm.run(() -> { + var data = curTx.get(PersistentRemoteHostsData.class, PersistentRemoteHostsData.KEY).orElse(null); + if (data == null) throw new IllegalStateException("Self data not found"); + boolean exists = data.initialSyncDone().contains(peerId); + if (!exists) return false; + curTx.put(data.withInitialSyncDone(data.initialSyncDone().minus(peerId))); + return true; + }); + } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java index 23a99e0e..56f3411f 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java @@ -3,6 +3,8 @@ package com.usatiuk.dhfs.objects.repository; import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.PeerId; +import org.pcollections.HashTreePSet; +import org.pcollections.PSet; import java.io.Serializable; import java.security.KeyPair; @@ -11,7 +13,8 @@ import java.security.cert.X509Certificate; public record PersistentRemoteHostsData(PeerId selfUuid, long selfCounter, X509Certificate selfCertificate, - KeyPair selfKeyPair) implements JData, Serializable { + KeyPair selfKeyPair, + PSet initialSyncDone) implements JData, Serializable { public static final JObjectKey KEY = JObjectKey.of("self_peer_data"); @Override @@ -20,6 +23,10 @@ public record PersistentRemoteHostsData(PeerId selfUuid, } public PersistentRemoteHostsData withSelfCounter(long selfCounter) { - return new PersistentRemoteHostsData(selfUuid, selfCounter, selfCertificate, selfKeyPair); + return new PersistentRemoteHostsData(selfUuid, selfCounter, selfCertificate, selfKeyPair, HashTreePSet.empty()); + } + + public PersistentRemoteHostsData withInitialSyncDone(PSet initialSyncDone) { + return new PersistentRemoteHostsData(selfUuid, selfCounter, selfCertificate, selfKeyPair, initialSyncDone); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java index aab38104..ae91198f 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java @@ -248,4 +248,8 @@ public class SyncHandler { curTx.put(current); } } + + public void doInitialSync(PeerId peer) { + //TODO: + } } \ No newline at end of file diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoService.java index 5398302f..783d391a 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoService.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoService.java @@ -9,6 +9,7 @@ import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; import com.usatiuk.dhfs.objects.repository.peersync.structs.JKleppmannTreeNodeMetaPeer; import com.usatiuk.dhfs.objects.transaction.Transaction; +import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; @@ -76,4 +77,19 @@ public class PeerInfoService { getTree().move(parent, new JKleppmannTreeNodeMetaPeer(newPeerInfo.id()), getTree().getNewNodeId()); }); } + + public void removePeer(PeerId id) { + jObjectTxManager.run(() -> { + var gotKey = getTree().traverse(List.of(id.toString())); + if (gotKey == null) { + return; + } + var meta = curTx.get(JKleppmannTreeNode.class, gotKey).map(node -> (JKleppmannTreeNodeMetaPeer) node.meta()).orElse(null); + if (meta == null) { + Log.warn("Peer " + id + " not found in the tree"); + return; + } + getTree().trash(meta, id.toJObjectKey()); + }); + } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java index 344ef33f..43c1aa88 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java @@ -34,7 +34,7 @@ public class ManagementApi { @Path("known-peers") @DELETE public void deletePeer(KnownPeerDelete knownPeerDelete) { -// peerManager.removeRemoteHost(PeerId.of(knownPeerPut.uuid())); + peerManager.removeRemoteHost(PeerId.of(knownPeerDelete.uuid())); } @Path("available-peers") From 73f5b9cdd964f596bfe13fcd670844b917952083 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 16 Feb 2025 21:45:51 +0100 Subject: [PATCH 066/105] slight cleanup --- dhfs-parent/kleppmanntree/pom.xml | 4 ++++ .../usatiuk/kleppmanntree/KleppmannTree.java | 20 +++++++------------ .../com/usatiuk/kleppmanntree/TreeNode.java | 6 ++++-- .../kleppmanntree/KleppmanTreeSimpleTest.java | 2 +- .../usatiuk/kleppmanntree/TestTreeNode.java | 10 +++++----- .../java/com/usatiuk/dhfs/objects/PeerId.java | 9 ++------- .../structs/JKleppmannTreeNode.java | 8 +++++--- .../repository/PersistentPeerDataService.java | 2 +- .../local/LocalPeerDiscoveryClient.java | 2 +- 9 files changed, 30 insertions(+), 33 deletions(-) diff --git a/dhfs-parent/kleppmanntree/pom.xml b/dhfs-parent/kleppmanntree/pom.xml index c6b78ae1..077abfd1 100644 --- a/dhfs-parent/kleppmanntree/pom.xml +++ b/dhfs-parent/kleppmanntree/pom.xml @@ -26,5 +26,9 @@ org.apache.commons commons-lang3 + + org.pcollections + pcollections + \ No newline at end of file diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java index e92a1fa2..10fd6004 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java @@ -53,8 +53,7 @@ public class KleppmannTree, PeerIdT ex var node = _storage.getById(effect.childId()); var curParent = _storage.getById(effect.newParentId()); { - var newCurParentChildren = new HashMap<>(curParent.children()); - newCurParentChildren.remove(node.meta().getName()); + var newCurParentChildren = curParent.children().minus(node.meta().getName()); curParent = curParent.withChildren(newCurParentChildren); _storage.putNode(curParent); } @@ -65,8 +64,7 @@ public class KleppmannTree, PeerIdT ex // Needs to be read after changing curParent, as it might be the same node var oldParent = _storage.getById(effect.oldInfo().oldParent()); { - var newOldParentChildren = new HashMap<>(oldParent.children()); - newOldParentChildren.put(node.meta().getName(), node.key()); + var newOldParentChildren = oldParent.children().plus(node.meta().getName(), node.key()); oldParent = oldParent.withChildren(newOldParentChildren); _storage.putNode(oldParent); } @@ -79,8 +77,7 @@ public class KleppmannTree, PeerIdT ex var node = _storage.getById(effect.childId()); var curParent = _storage.getById(effect.newParentId()); { - var newCurParentChildren = new HashMap<>(curParent.children()); - newCurParentChildren.remove(node.meta().getName()); + var newCurParentChildren = curParent.children().minus(node.meta().getName()); curParent = curParent.withChildren(newCurParentChildren); _storage.putNode(curParent); } @@ -149,10 +146,9 @@ public class KleppmannTree, PeerIdT ex for (var n : inTrash) { var node = _storage.getById(n); { - var newTrashChildren = new HashMap<>(trash.children()); - if (newTrashChildren.remove(n.toString()) == null) + if (!trash.children().containsKey(n.toString())) LOGGER.severe("Node " + node.key() + " not found in trash but should be there"); - trash = trash.withChildren(newTrashChildren); + trash = trash.withChildren(trash.children().minus(n.toString())); _storage.putNode(trash); } _storage.removeNode(n); @@ -307,8 +303,7 @@ public class KleppmannTree, PeerIdT ex node = _storage.getById(effect.childId()); } if (oldParentNode != null) { - var newOldParentChildren = new HashMap<>(oldParentNode.children()); - newOldParentChildren.remove(effect.oldInfo().oldMeta().getName()); + var newOldParentChildren = oldParentNode.children().minus(effect.oldInfo().oldMeta().getName()); oldParentNode = oldParentNode.withChildren(newOldParentChildren); _storage.putNode(oldParentNode); } @@ -317,8 +312,7 @@ public class KleppmannTree, PeerIdT ex newParentNode = _storage.getById(effect.newParentId()); { - var newNewParentChildren = new HashMap<>(newParentNode.children()); - newNewParentChildren.put(effect.newMeta().getName(), effect.childId()); + var newNewParentChildren = newParentNode.children().plus(effect.newMeta().getName(), effect.childId()); newParentNode = newParentNode.withChildren(newNewParentChildren); _storage.putNode(newParentNode); } diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNode.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNode.java index 852c5870..f490bb9e 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNode.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/TreeNode.java @@ -1,5 +1,7 @@ package com.usatiuk.kleppmanntree; +import org.pcollections.PMap; + import java.io.Serializable; import java.util.Map; @@ -12,7 +14,7 @@ public interface TreeNode, PeerIdT ext MetaT meta(); - Map children(); + PMap children(); TreeNode withParent(NodeIdT parent); @@ -20,5 +22,5 @@ public interface TreeNode, PeerIdT ext TreeNode withMeta(MetaT meta); - TreeNode withChildren(Map children); + TreeNode withChildren(PMap children); } diff --git a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/KleppmanTreeSimpleTest.java b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/KleppmanTreeSimpleTest.java index a52ce207..dfe99ebd 100644 --- a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/KleppmanTreeSimpleTest.java +++ b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/KleppmanTreeSimpleTest.java @@ -73,7 +73,7 @@ public class KleppmanTreeSimpleTest { // Second node wins as it has smaller timestamp Assertions.assertIterableEquals(List.of("Test2"), testNode1._storageInterface.getById(testNode2._storageInterface.getRootId()).children().keySet()); - Assertions.assertIterableEquals(List.of("Test1", "TestFile"), testNode1._storageInterface.getById(d2id).children().keySet()); + Assertions.assertIterableEquals(List.of("Test1", "TestFile"), testNode1._storageInterface.getById(d2id).children().keySet().stream().sorted().toList()); Assertions.assertEquals(d2id, testNode1._tree.traverse(List.of("Test2"))); Assertions.assertEquals(d1id, testNode1._tree.traverse(List.of("Test2", "Test1"))); Assertions.assertEquals(f1id, testNode1._tree.traverse(List.of("Test2", "TestFile"))); diff --git a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestTreeNode.java b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestTreeNode.java index a8da2c14..373eb580 100644 --- a/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestTreeNode.java +++ b/dhfs-parent/kleppmanntree/src/test/java/com/usatiuk/kleppmanntree/TestTreeNode.java @@ -1,14 +1,14 @@ package com.usatiuk.kleppmanntree; -import java.util.Collections; -import java.util.Map; +import org.pcollections.HashTreePMap; +import org.pcollections.PMap; public record TestTreeNode(Long key, Long parent, OpMove lastEffectiveOp, TestNodeMeta meta, - Map children) implements TreeNode { + PMap children) implements TreeNode { public TestTreeNode(Long id, Long parent, TestNodeMeta meta) { - this(id, parent, null, meta, Collections.emptyMap()); + this(id, parent, null, meta, HashTreePMap.empty()); } @Override @@ -27,7 +27,7 @@ public record TestTreeNode(Long key, Long parent, OpMove withChildren(Map children) { + public TreeNode withChildren(PMap children) { return new TestTreeNode(key, parent, lastEffectiveOp, meta, children); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/PeerId.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/PeerId.java index a85ae068..339f2a53 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/PeerId.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/PeerId.java @@ -1,15 +1,10 @@ package com.usatiuk.dhfs.objects; import java.io.Serializable; -import java.util.UUID; - -public record PeerId(UUID id) implements Serializable, Comparable { - public static PeerId of(UUID id) { - return new PeerId(id); - } +public record PeerId(JObjectKey id) implements Serializable, Comparable { public static PeerId of(String id) { - return new PeerId(UUID.fromString(id)); + return new PeerId(JObjectKey.of(id)); } @Override diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java index 1d1a4839..8b2afa28 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java @@ -6,7 +6,9 @@ import com.usatiuk.dhfs.objects.PeerId; import com.usatiuk.dhfs.objects.repository.peersync.structs.JKleppmannTreeNodeMetaPeer; import com.usatiuk.kleppmanntree.OpMove; import com.usatiuk.kleppmanntree.TreeNode; +import org.pcollections.HashTreePMap; import org.pcollections.PCollection; +import org.pcollections.PMap; import org.pcollections.TreePSet; import java.io.Serializable; @@ -20,10 +22,10 @@ import java.util.stream.Stream; public record JKleppmannTreeNode(JObjectKey key, PCollection refsFrom, boolean frozen, JObjectKey parent, OpMove lastEffectiveOp, JKleppmannTreeNodeMeta meta, - Map children) implements TreeNode, JDataRefcounted, Serializable { + PMap children) implements TreeNode, JDataRefcounted, Serializable { public JKleppmannTreeNode(JObjectKey id, JObjectKey parent, JKleppmannTreeNodeMeta meta) { - this(id, TreePSet.empty(), false, parent, null, meta, Collections.emptyMap()); + this(id, TreePSet.empty(), false, parent, null, meta, HashTreePMap.empty()); } @Override @@ -42,7 +44,7 @@ public record JKleppmannTreeNode(JObjectKey key, PCollection refsFro } @Override - public JKleppmannTreeNode withChildren(Map children) { + public JKleppmannTreeNode withChildren(PMap children) { return new JKleppmannTreeNode(key, refsFrom, frozen, parent, lastEffectiveOp, meta, children); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java index 8d21c6ef..562b7809 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java @@ -59,7 +59,7 @@ public class PersistentPeerDataService { return; } else { try { - _selfUuid = presetUuid.map(s -> PeerId.of(UUID.fromString(s))).orElseGet(() -> PeerId.of(UUID.randomUUID())); + _selfUuid = presetUuid.map(PeerId::of).orElseGet(() -> PeerId.of(UUID.randomUUID().toString())); Log.info("Generating a key pair, please wait"); _selfKeyPair = CertificateTools.generateKeyPair(); _selfCertificate = CertificateTools.generateCertificate(_selfKeyPair, _selfUuid.toString()); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryClient.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryClient.java index f5ce9d6b..9b505307 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryClient.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryClient.java @@ -72,7 +72,7 @@ public class LocalPeerDiscoveryClient { var got = PeerDiscoveryInfo.parseFrom(ByteBuffer.wrap(buf, 0, packet.getLength())); peerDiscoveryDirectory.notifyAddr( new IpPeerAddress( - PeerId.of(UUID.fromString(got.getUuid())), + PeerId.of(got.getUuid()), PeerAddressType.LAN, packet.getAddress(), got.getPort(), From 4f7da67ba5659c341faed9add3e5182ea8430335 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Fri, 21 Feb 2025 09:55:35 +0100 Subject: [PATCH 067/105] simplify serialization, for now --- .../usatiuk/dhfs/files/objects/ChunkData.java | 2 - .../files/objects/ChunkDataSerializer.java | 22 --- .../com/usatiuk/dhfs/files/objects/File.java | 1 - .../dhfs/files/objects/FileSerializer.java | 44 ----- .../com/usatiuk/dhfs/objects/JDataRemote.java | 4 - .../usatiuk/dhfs/objects/ReceivedObject.java | 2 +- .../TemporaryJDataRemoteSerializer.java | 21 +++ .../dhfs/objects/TemporaryOpSerializer.java | 22 +++ .../structs/JKleppmannTreeNodeMeta.java | 2 - .../JKleppmannTreeNodeMetaDirectory.java | 4 - .../structs/JKleppmannTreeNodeMetaFile.java | 2 - .../dhfs/objects/repository/PeerManager.java | 5 +- .../repository/ReceivedObjectSerializer.java | 46 ----- .../repository/RemoteObjectServiceClient.java | 16 +- .../repository/RemoteObjectServiceServer.java | 21 +-- .../TemporaryReceivedObjectSerializer.java | 43 +++++ .../invalidation/IndexUpdateOp.java | 4 - .../invalidation/IndexUpdateOpSerializer.java | 36 ---- .../JKleppmannTreeOpPTempSerializer.java | 22 --- .../objects/repository/invalidation/Op.java | 5 +- .../peersync/PeerInfoSerializer.java | 24 --- .../src/main/proto/dhfs_objects_serial.proto | 168 ++---------------- .../src/main/proto/dhfs_objects_sync.proto | 70 ++------ 23 files changed, 137 insertions(+), 449 deletions(-) delete mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java delete mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/TemporaryJDataRemoteSerializer.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/TemporaryOpSerializer.java delete mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/ReceivedObjectSerializer.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/TemporaryReceivedObjectSerializer.java delete mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOpSerializer.java delete mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/JKleppmannTreeOpPTempSerializer.java delete mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoSerializer.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java index 0f1033c7..2f41e743 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java @@ -4,9 +4,7 @@ import com.google.protobuf.ByteString; import com.usatiuk.autoprotomap.runtime.ProtoMirror; import com.usatiuk.dhfs.objects.JDataRemote; import com.usatiuk.dhfs.objects.JObjectKey; -import com.usatiuk.dhfs.objects.persistence.ChunkDataP; -//@ProtoMirror(ChunkDataP.class) public record ChunkData(JObjectKey key, ByteString data) implements JDataRemote { @Override public int estimateSize() { diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java deleted file mode 100644 index f23a8da0..00000000 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java +++ /dev/null @@ -1,22 +0,0 @@ -package com.usatiuk.dhfs.files.objects; - -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.JObjectKey; -import com.usatiuk.dhfs.objects.persistence.ChunkDataP; -import jakarta.enterprise.context.ApplicationScoped; - -@ApplicationScoped -public class ChunkDataSerializer implements ProtoSerializer { - @Override - public ChunkData deserialize(ChunkDataP message) { - return new ChunkData(JObjectKey.of(message.getName()), message.getData()); - } - - @Override - public ChunkDataP serialize(ChunkData object) { - return ChunkDataP.newBuilder() - .setName(object.key().toString()) - .setData(object.data()) - .build(); - } -} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java index a1878128..37aaf5a5 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java @@ -2,7 +2,6 @@ package com.usatiuk.dhfs.files.objects; import com.usatiuk.autoprotomap.runtime.ProtoMirror; import com.usatiuk.dhfs.objects.JObjectKey; -import com.usatiuk.dhfs.objects.persistence.ChunkDataP; import org.pcollections.TreePMap; import java.util.Collection; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java deleted file mode 100644 index d5550a7c..00000000 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java +++ /dev/null @@ -1,44 +0,0 @@ -package com.usatiuk.dhfs.files.objects; - -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.JObjectKey; -import com.usatiuk.dhfs.objects.persistence.FileP; -import jakarta.enterprise.context.ApplicationScoped; -import org.pcollections.TreePMap; - -@ApplicationScoped -public class FileSerializer implements ProtoSerializer { - @Override - public File deserialize(FileP message) { - TreePMap chunks = TreePMap.empty(); - for (var chunk : message.getChunksList()) { - chunks = chunks.plus(chunk.getStart(), JObjectKey.of(chunk.getId())); - } - var ret = new File(JObjectKey.of(message.getUuid()), - message.getMode(), - message.getCtime(), - message.getMtime(), - chunks, - message.getSymlink(), - message.getSize() - ); - return ret; - } - - @Override - public FileP serialize(File object) { - var builder = FileP.newBuilder() - .setUuid(object.key().toString()) - .setMode(object.mode()) - .setCtime(object.cTime()) - .setMtime(object.mTime()) - .setSymlink(object.symlink()) - .setSize(object.size()); - object.chunks().forEach((s, i) -> { - builder.addChunksBuilder() - .setStart(s) - .setId(i.toString()); - }); - return builder.build(); - } -} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRemote.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRemote.java index 531fe8ad..2a39f186 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRemote.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/JDataRemote.java @@ -1,13 +1,9 @@ package com.usatiuk.dhfs.objects; -import com.usatiuk.autoprotomap.runtime.ProtoMirror; -import com.usatiuk.dhfs.objects.persistence.RemoteObjectP; - import java.io.Serializable; import java.util.Collection; import java.util.List; -@ProtoMirror(RemoteObjectP.class) public interface JDataRemote extends Serializable { JObjectKey key(); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/ReceivedObject.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/ReceivedObject.java index 30e92654..6826b09f 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/ReceivedObject.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/ReceivedObject.java @@ -2,5 +2,5 @@ package com.usatiuk.dhfs.objects; import org.pcollections.PMap; -public record ReceivedObject(JObjectKey key, PMap changelog, JDataRemote data) { +public record ReceivedObject(PMap changelog, JDataRemote data) { } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/TemporaryJDataRemoteSerializer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/TemporaryJDataRemoteSerializer.java new file mode 100644 index 00000000..8131c103 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/TemporaryJDataRemoteSerializer.java @@ -0,0 +1,21 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.autoprotomap.runtime.ProtoSerializer; +import com.usatiuk.dhfs.objects.persistence.JDataRemoteP; +import com.usatiuk.dhfs.utils.SerializationHelper; +import jakarta.inject.Singleton; + +@Singleton +public class TemporaryJDataRemoteSerializer implements ProtoSerializer { + @Override + public JDataRemote deserialize(JDataRemoteP message) { + return SerializationHelper.deserialize(message.getSerializedData().toByteArray()); + } + + @Override + public JDataRemoteP serialize(JDataRemote object) { + return JDataRemoteP.newBuilder() + .setSerializedData(SerializationHelper.serialize(object)) + .build(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/TemporaryOpSerializer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/TemporaryOpSerializer.java new file mode 100644 index 00000000..ab9682db --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/TemporaryOpSerializer.java @@ -0,0 +1,22 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.autoprotomap.runtime.ProtoSerializer; +import com.usatiuk.dhfs.objects.repository.OpP; +import com.usatiuk.dhfs.objects.repository.invalidation.Op; +import com.usatiuk.dhfs.utils.SerializationHelper; +import jakarta.inject.Singleton; + +@Singleton +public class TemporaryOpSerializer implements ProtoSerializer { + @Override + public Op deserialize(OpP message) { + return SerializationHelper.deserialize(message.getSerializedData().toByteArray()); + } + + @Override + public OpP serialize(Op object) { + return OpP.newBuilder() + .setSerializedData(SerializationHelper.serialize(object)) + .build(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java index fb4e5311..d2c1f988 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java @@ -1,7 +1,5 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; -import com.usatiuk.autoprotomap.runtime.ProtoMirror; -import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaP; import com.usatiuk.kleppmanntree.NodeMeta; import java.util.Objects; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java index 39ebd488..4cf3514d 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java @@ -1,9 +1,5 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; -import com.usatiuk.autoprotomap.runtime.ProtoMirror; -import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaDirectoryP; - -//@ProtoMirror(JKleppmannTreeNodeMetaDirectoryP.class) public class JKleppmannTreeNodeMetaDirectory extends JKleppmannTreeNodeMeta { public JKleppmannTreeNodeMetaDirectory(String name) { super(name); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java index b51af59a..a7e9a3a5 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java @@ -2,11 +2,9 @@ package com.usatiuk.dhfs.objects.jkleppmanntree.structs; import com.usatiuk.autoprotomap.runtime.ProtoMirror; import com.usatiuk.dhfs.objects.JObjectKey; -import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaFileP; import java.util.Objects; -//@ProtoMirror(JKleppmannTreeNodeMetaFileP.class) public class JKleppmannTreeNodeMetaFile extends JKleppmannTreeNodeMeta { private final JObjectKey _fileIno; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java index 52741a53..3a8eb774 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java @@ -136,10 +136,7 @@ public class PeerManager { private boolean pingCheck(PeerInfo host, PeerAddress address) { try { return rpcClientFactory.withObjSyncClient(host.id(), address, pingTimeout, (peer, c) -> { - var ret = c.ping(PingRequest.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).build()); - if (!UUID.fromString(ret.getSelfUuid()).equals(host.id().id())) { - throw new IllegalStateException("Ping selfUuid returned " + ret.getSelfUuid() + " but expected " + host.id()); - } + c.ping(PingRequest.getDefaultInstance()); return true; }); } catch (Exception ignored) { diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/ReceivedObjectSerializer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/ReceivedObjectSerializer.java deleted file mode 100644 index 73ab19cd..00000000 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/ReceivedObjectSerializer.java +++ /dev/null @@ -1,46 +0,0 @@ -package com.usatiuk.dhfs.objects.repository; - -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.JDataRemote; -import com.usatiuk.dhfs.objects.JObjectKey; -import com.usatiuk.dhfs.objects.PeerId; -import com.usatiuk.dhfs.objects.ReceivedObject; -import com.usatiuk.dhfs.objects.persistence.RemoteObjectP; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.inject.Inject; -import org.pcollections.HashTreePMap; -import org.pcollections.PMap; - -@ApplicationScoped -public class ReceivedObjectSerializer implements ProtoSerializer { - @Inject - ProtoSerializer remoteObjectSerializer; - - @Override - public ReceivedObject deserialize(GetObjectReply message) { - PMap changelog = HashTreePMap.empty(); - for (var entry : message.getHeader().getChangelog().getEntriesList()) { - changelog = changelog.plus(PeerId.of(entry.getHost()), entry.getVersion()); - } - return new ReceivedObject( - JObjectKey.of(message.getHeader().getName()), - changelog, - remoteObjectSerializer.deserialize(message.getContent()) - ); - } - - @Override - public GetObjectReply serialize(ReceivedObject object) { - var builder = GetObjectReply.newBuilder(); - var headerBuilder = builder.getHeaderBuilder(); - headerBuilder.setName(object.key().toString()); - var changelogBuilder = headerBuilder.getChangelogBuilder(); - object.changelog().forEach((peer, version) -> { - changelogBuilder.addEntriesBuilder() - .setHost(peer.toString()) - .setVersion(version); - }); - builder.setContent(remoteObjectSerializer.serialize(object.data())); - return builder.build(); - } -} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java index b9bdc292..56135ef5 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java @@ -2,6 +2,7 @@ package com.usatiuk.dhfs.objects.repository; import com.usatiuk.autoprotomap.runtime.ProtoSerializer; import com.usatiuk.dhfs.objects.*; +import com.usatiuk.dhfs.objects.persistence.JObjectKeyP; import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; import com.usatiuk.dhfs.objects.repository.invalidation.Op; import com.usatiuk.dhfs.objects.transaction.Transaction; @@ -41,11 +42,8 @@ public class RemoteObjectServiceClient { SyncHandler syncHandler; @Inject InvalidationQueueService invalidationQueueService; - // @Inject -// ProtoSerializer dataProtoSerializer; @Inject - ProtoSerializer opProtoSerializer; - + ProtoSerializer opProtoSerializer; @Inject ProtoSerializer receivedObjectProtoSerializer; @@ -76,7 +74,7 @@ public class RemoteObjectServiceClient { Log.info("Downloading object " + key + " from " + targets); rpcClientFactory.withObjSyncClient(targets, (peer, client) -> { - var reply = client.getObject(GetObjectRequest.newBuilder().setName(key.toString()).build()); + var reply = client.getObject(GetObjectRequest.newBuilder().setName(JObjectKeyP.newBuilder().setName(key.toString()).build()).build()); var deserialized = receivedObjectProtoSerializer.deserialize(reply); @@ -152,13 +150,13 @@ public class RemoteObjectServiceClient { return OpPushReply.getDefaultInstance(); } - public Collection> canDelete(Collection targets, JObjectKey object, Collection ourReferrers) { - Log.trace("Asking canDelete for " + object + " from " + targets.stream().map(PeerId::toString).collect(Collectors.joining(", "))); + public Collection> canDelete(Collection targets, JObjectKey objKey, Collection ourReferrers) { + Log.trace("Asking canDelete for " + objKey + " from " + targets.stream().map(PeerId::toString).collect(Collectors.joining(", "))); try { return _batchExecutor.invokeAll(targets.stream().>>map(h -> () -> { - var req = CanDeleteRequest.newBuilder().setName(object.toString()); + var req = CanDeleteRequest.newBuilder().setName(JObjectKeyP.newBuilder().setName(objKey.toString()).build()); for (var ref : ourReferrers) { - req.addOurReferrers(ref.toString()); + req.addOurReferrers(JObjectKeyP.newBuilder().setName(ref.toString()).build()); } return Pair.of(h, rpcClientFactory.withObjSyncClient(h, (p, client) -> client.canDelete(req.build()))); }).toList()).stream().map(f -> { diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java index ace16a81..466df67d 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java @@ -2,6 +2,7 @@ package com.usatiuk.dhfs.objects.repository; import com.usatiuk.autoprotomap.runtime.ProtoSerializer; import com.usatiuk.dhfs.objects.*; +import com.usatiuk.dhfs.objects.persistence.JObjectKeyP; import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; import com.usatiuk.dhfs.objects.repository.invalidation.Op; import com.usatiuk.dhfs.objects.repository.invalidation.OpHandler; @@ -37,10 +38,8 @@ public class RemoteObjectServiceServer implements DhfsObjectSyncGrpc { InvalidationQueueService invalidationQueueService; @Inject SecurityIdentity identity; - // @Inject -// ProtoSerializer dataProtoSerializer; @Inject - ProtoSerializer opProtoSerializer; + ProtoSerializer opProtoSerializer; @Inject ProtoSerializer receivedObjectProtoSerializer; @Inject @@ -53,10 +52,9 @@ public class RemoteObjectServiceServer implements DhfsObjectSyncGrpc { public Uni getObject(GetObjectRequest request) { Log.info("<-- getObject: " + request.getName() + " from " + identity.getPrincipal().getName().substring(3)); - Pair got = txm.run(() -> { - var meta = remoteTx.getMeta(JObjectKey.of(request.getName())).orElse(null); - var obj = remoteTx.getDataLocal(JDataRemote.class, JObjectKey.of(request.getName())).orElse(null); + var meta = remoteTx.getMeta(JObjectKey.of(request.getName().getName())).orElse(null); + var obj = remoteTx.getDataLocal(JDataRemote.class, JObjectKey.of(request.getName().getName())).orElse(null); if (meta != null && !meta.seen()) curTx.put(meta.withSeen(true)); if (obj != null) @@ -78,7 +76,7 @@ public class RemoteObjectServiceServer implements DhfsObjectSyncGrpc { throw new StatusRuntimeException(Status.NOT_FOUND); } - var serialized = receivedObjectProtoSerializer.serialize(new ReceivedObject(got.getKey().key(), got.getKey().changelog(), got.getValue())); + var serialized = receivedObjectProtoSerializer.serialize(new ReceivedObject(got.getKey().changelog(), got.getValue())); return Uni.createFrom().item(serialized); // // Does @Blocking break this? // return Uni.createFrom().emitter(emitter -> { @@ -124,10 +122,9 @@ public class RemoteObjectServiceServer implements DhfsObjectSyncGrpc { Log.info("<-- canDelete: " + request.getName() + " from " + peerId); var builder = CanDeleteReply.newBuilder(); - builder.setObjName(request.getName()); txm.run(() -> { - var obj = curTx.get(RemoteObjectMeta.class, JObjectKey.of(request.getName())).orElse(null); + var obj = curTx.get(RemoteObjectMeta.class, JObjectKey.of(request.getName().getName())).orElse(null); if (obj == null) { builder.setDeletionCandidate(true); @@ -138,7 +135,7 @@ public class RemoteObjectServiceServer implements DhfsObjectSyncGrpc { if (!builder.getDeletionCandidate()) for (var r : obj.refsFrom()) - builder.addReferrers(r.toString()); + builder.addReferrers(JObjectKeyP.newBuilder().setName(r.toString()).build()); // if (!ret.getDeletionCandidate()) // for (var rr : request.getOurReferrersList()) @@ -181,8 +178,6 @@ public class RemoteObjectServiceServer implements DhfsObjectSyncGrpc { @Override @Blocking public Uni ping(PingRequest request) { - if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - - return Uni.createFrom().item(PingReply.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).build()); + return Uni.createFrom().item(PingReply.getDefaultInstance()); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/TemporaryReceivedObjectSerializer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/TemporaryReceivedObjectSerializer.java new file mode 100644 index 00000000..741026f9 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/TemporaryReceivedObjectSerializer.java @@ -0,0 +1,43 @@ +package com.usatiuk.dhfs.objects.repository; + +import com.usatiuk.autoprotomap.runtime.ProtoSerializer; +import com.usatiuk.dhfs.objects.JDataRemote; +import com.usatiuk.dhfs.objects.PeerId; +import com.usatiuk.dhfs.objects.ReceivedObject; +import com.usatiuk.dhfs.objects.persistence.JDataRemoteP; +import com.usatiuk.dhfs.objects.persistence.JObjectKeyP; +import com.usatiuk.dhfs.objects.persistence.PeerIdP; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import jakarta.inject.Singleton; +import org.pcollections.HashTreePMap; +import org.pcollections.PMap; + +@Singleton +public class TemporaryReceivedObjectSerializer implements ProtoSerializer { + @Inject + ProtoSerializer remoteObjectSerializer; + + @Override + public ReceivedObject deserialize(GetObjectReply message) { + PMap changelog = HashTreePMap.empty(); + for (var entry : message.getChangelog().getEntriesList()) { + changelog = changelog.plus(PeerId.of(entry.getKey().getId().getName()), entry.getValue()); + } + var data = remoteObjectSerializer.deserialize(message.getPushedData()); + return new ReceivedObject(changelog, data); + } + + @Override + public GetObjectReply serialize(ReceivedObject object) { + var builder = GetObjectReply.newBuilder(); + var changelogBuilder = builder.getChangelogBuilder(); + object.changelog().forEach((peer, version) -> { + changelogBuilder.addEntriesBuilder() + .setKey(PeerIdP.newBuilder().setId(JObjectKeyP.newBuilder().setName(peer.id().toString()).build()).build()) + .setValue(version); + }); + builder.setPushedData(remoteObjectSerializer.serialize(object.data())); + return builder.build(); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOp.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOp.java index 8e9a6d4c..38e1aef3 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOp.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOp.java @@ -9,10 +9,6 @@ import java.util.Collection; import java.util.List; public record IndexUpdateOp(JObjectKey key, PMap changelog) implements Op { - public IndexUpdateOp(RemoteObjectMeta object) { - this(object.key(), object.changelog()); - } - @Override public Collection getEscapedRefs() { return List.of(key); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOpSerializer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOpSerializer.java deleted file mode 100644 index bb614c1a..00000000 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/IndexUpdateOpSerializer.java +++ /dev/null @@ -1,36 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.invalidation; - -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.JObjectKey; -import com.usatiuk.dhfs.objects.PeerId; -import com.usatiuk.dhfs.objects.repository.IndexUpdateOpP; -import jakarta.enterprise.context.ApplicationScoped; -import org.pcollections.HashTreePMap; -import org.pcollections.PMap; - -@ApplicationScoped -public class IndexUpdateOpSerializer implements ProtoSerializer { - - @Override - public IndexUpdateOp deserialize(IndexUpdateOpP message) { - PMap map = HashTreePMap.empty(); - for (var entry : message.getHeader().getChangelog().getEntriesList()) { - map = map.plus(PeerId.of(entry.getHost()), entry.getVersion()); - } - return new IndexUpdateOp(JObjectKey.of(message.getHeader().getName()), map); - } - - @Override - public IndexUpdateOpP serialize(IndexUpdateOp object) { - var builder = IndexUpdateOpP.newBuilder(); - var headerBuilder = builder.getHeaderBuilder(); - headerBuilder.setName(object.key().name()); - var changelogBuilder = headerBuilder.getChangelogBuilder(); - for (var entry : object.changelog().entrySet()) { - var entryBuilder = changelogBuilder.addEntriesBuilder(); - entryBuilder.setHost(entry.getKey().id().toString()); - entryBuilder.setVersion(entry.getValue()); - } - return builder.build(); - } -} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/JKleppmannTreeOpPTempSerializer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/JKleppmannTreeOpPTempSerializer.java deleted file mode 100644 index 13399feb..00000000 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/JKleppmannTreeOpPTempSerializer.java +++ /dev/null @@ -1,22 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.invalidation; - -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeOpWrapper; -import com.usatiuk.dhfs.objects.repository.JKleppmannTreeOpPTemp; -import com.usatiuk.dhfs.utils.SerializationHelper; -import jakarta.enterprise.context.ApplicationScoped; - -@ApplicationScoped -public class JKleppmannTreeOpPTempSerializer implements ProtoSerializer { - @Override - public JKleppmannTreeOpWrapper deserialize(JKleppmannTreeOpPTemp message) { - return SerializationHelper.deserialize(message.getSerialized().toByteArray()); - } - - @Override - public JKleppmannTreeOpPTemp serialize(JKleppmannTreeOpWrapper object) { - return JKleppmannTreeOpPTemp.newBuilder() - .setSerialized(SerializationHelper.serialize(object)) - .build(); - } -} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/Op.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/Op.java index c73b406c..143bfcf3 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/Op.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/Op.java @@ -2,11 +2,10 @@ package com.usatiuk.dhfs.objects.repository.invalidation; import com.usatiuk.autoprotomap.runtime.ProtoMirror; import com.usatiuk.dhfs.objects.JObjectKey; -import com.usatiuk.dhfs.objects.repository.OpPushPayload; +import java.io.Serializable; import java.util.Collection; -@ProtoMirror(OpPushPayload.class) -public interface Op { +public interface Op extends Serializable { Collection getEscapedRefs(); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoSerializer.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoSerializer.java deleted file mode 100644 index 5f00c155..00000000 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfoSerializer.java +++ /dev/null @@ -1,24 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.peersync; - -import com.google.protobuf.ByteString; -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.PeerId; -import com.usatiuk.dhfs.objects.persistence.PeerInfoP; -import jakarta.enterprise.context.ApplicationScoped; - -@ApplicationScoped -public class PeerInfoSerializer implements ProtoSerializer { - - @Override - public PeerInfo deserialize(PeerInfoP message) { - return new PeerInfo(PeerId.of(message.getUuid()), message.getCert().toByteArray()); - } - - @Override - public PeerInfoP serialize(PeerInfo object) { - return PeerInfoP.newBuilder() - .setUuid(object.key().toString()) - .setCert(ByteString.copyFrom(object.cert())) - .build(); - } -} diff --git a/dhfs-parent/server/src/main/proto/dhfs_objects_serial.proto b/dhfs-parent/server/src/main/proto/dhfs_objects_serial.proto index 3acb21a3..4681a0be 100644 --- a/dhfs-parent/server/src/main/proto/dhfs_objects_serial.proto +++ b/dhfs-parent/server/src/main/proto/dhfs_objects_serial.proto @@ -6,164 +6,28 @@ option java_outer_classname = "DhfsObjectPersistence"; package dhfs.objects.persistence; -message ObjectMetadataP { - string name = 1; - map remoteCopies = 2; - string knownClass = 3; - bool seen = 4; - bool deleted = 5; - repeated string confirmedDeletes = 6; - repeated string referrers = 7; - map changelog = 8; - repeated string savedRefs = 9; - bool frozen = 10; - bool haveLocalCopy = 11; -} - -message FsNodeP { - string uuid = 1; - int64 mode = 2; - int64 ctime = 3; - int64 mtime = 4; -} - -message FilePChunksEntry { - int64 start = 1; - string id = 2; -} - -message FileP { - string uuid = 1; - int64 mode = 2; - int64 ctime = 3; - int64 mtime = 4; - repeated FilePChunksEntry chunks = 5; - bool symlink = 6; - int64 size = 7; -} - -message DirectoryP { - FsNodeP fsNode = 1; - map children = 2; -} - -message ChunkDataP { - string name = 1; - bytes data = 2; -} - -message PeerDirectoryP { - repeated string peers = 1; -} - -message PeerInfoP { - string uuid = 1; - bytes cert = 2; -} - -message JKleppmannTreeNodeMetaFileP { - string name = 1; - string fileIno = 2; -} - -message JKleppmannTreeNodeMetaDirectoryP { +message JObjectKeyP { string name = 1; } -message JKleppmannTreeNodeMetaP { - oneof meta { - JKleppmannTreeNodeMetaFileP jKleppmannTreeNodeMetaFile = 1; - JKleppmannTreeNodeMetaDirectoryP jKleppmannTreeNodeMetaDirectory = 2; +message PeerIdP { + JObjectKeyP id = 1; +} + +message ObjectChangelog { + message entries_Entry { + PeerIdP key = 1; + int64 value = 2; } + repeated entries_Entry entries = 1; } -message JKleppmannTreeOpP { - int64 timestamp = 1; - string peer = 2; - string newParentId = 3; - JKleppmannTreeNodeMetaP meta = 4; - string child = 5; +// TODO: Serialization + +message JDataRemoteP { + bytes serializedData = 1; } -message JKleppmannTreeNodePChildrenEntry { - string key = 1; - string value = 2; -} - -message JKleppmannTreeNodeP { - optional string parent = 1; - string id = 2; - repeated JKleppmannTreeNodePChildrenEntry children = 3; - optional JKleppmannTreeNodeMetaP meta = 4; - optional JKleppmannTreeOpP lastEffectiveOp = 5; -} - -message JKleppmannTreePersistentDataPQueueEntry { - int64 clock = 1; - string uuid = 2; - JKleppmannTreeOpP op = 3; -} - -message JKleppmannTreePersistentDataPQueue { - string node = 1; - repeated JKleppmannTreePersistentDataPQueueEntry entries = 2; -} - -message JKleppmannTreePersistentDataPTimestampEntry { - string host = 1; - int64 timestamp = 2; -} - -message JKleppmannTreeOpLogEffectP { - optional JKleppmannTreeOpP oldEffectiveMove = 1; - optional string oldParent = 2; - optional JKleppmannTreeNodeMetaP oldMeta = 3; - JKleppmannTreeOpP effectiveOp = 4; - string newParentId = 5; - JKleppmannTreeNodeMetaP newMeta = 6; - string selfId = 7; -} - -message JKleppmannTreeOpLogPEntry { - int64 clock = 1; - string uuid = 2; - JKleppmannTreeOpP op = 3; - repeated JKleppmannTreeOpLogEffectP effects = 4; -} - -message JKleppmannTreePersistentDataP { - string treeName = 1; - int64 clock = 2; - repeated JKleppmannTreePersistentDataPQueue queues = 3; - repeated JKleppmannTreePersistentDataPTimestampEntry peerLog = 4; - repeated JKleppmannTreeOpLogPEntry opLog = 5; -} - -message PeerDirectoryLocalP { - repeated string initialOpSyncDonePeers = 1; - repeated string initialObjSyncDonePeers = 2; -} - -message RemoteObjectP { - oneof obj { - FileP file = 2; - ChunkDataP chunkData = 5; - // PeerDirectoryP peerDirectory = 6; - PeerInfoP peerInfo = 7; - // JKleppmannTreeNodeP jKleppmannTreeNode = 8; - // JKleppmannTreePersistentDataP jKleppmannTreePersistentData = 9; - // PeerDirectoryLocalP peerDirectoryLocal = 10; - } -} - -message JObjectDataP { - oneof obj { - FileP file = 2; - ChunkDataP chunkData = 5; -// PeerDirectoryP peerDirectory = 6; - PeerInfoP peerInfo = 7; -// JKleppmannTreeNodeP jKleppmannTreeNode = 8; -// JKleppmannTreePersistentDataP jKleppmannTreePersistentData = 9; -// PeerDirectoryLocalP peerDirectoryLocal = 10; - } +message JDataP { + bytes serializedData = 1; } \ No newline at end of file diff --git a/dhfs-parent/server/src/main/proto/dhfs_objects_sync.proto b/dhfs-parent/server/src/main/proto/dhfs_objects_sync.proto index 5820aefb..2baccab9 100644 --- a/dhfs-parent/server/src/main/proto/dhfs_objects_sync.proto +++ b/dhfs-parent/server/src/main/proto/dhfs_objects_sync.proto @@ -9,83 +9,45 @@ option java_outer_classname = "DhfsObjectSyncApi"; package dhfs.objects.sync; service DhfsObjectSyncGrpc { + rpc OpPush (OpPushRequest) returns (OpPushReply) {} + rpc GetObject (GetObjectRequest) returns (GetObjectReply) {} rpc CanDelete (CanDeleteRequest) returns (CanDeleteReply) {} - rpc OpPush (OpPushRequest) returns (OpPushReply) {} rpc Ping (PingRequest) returns (PingReply) {} } -message PingRequest { - string selfUuid = 1; -} +message PingRequest {} -message PingReply { - string selfUuid = 1; -} - -message ObjectChangelogEntry { - string host = 1; - uint64 version = 2; -} - -message ObjectChangelog { - repeated ObjectChangelogEntry entries = 1; -} - -message ObjectHeader { - string name = 2; - ObjectChangelog changelog = 5; - optional dhfs.objects.persistence.RemoteObjectP pushedData = 6; -} +message PingReply {} message GetObjectRequest { - string name = 2; + dhfs.objects.persistence.JObjectKeyP name = 2; } message GetObjectReply { - ObjectHeader header = 1; - dhfs.objects.persistence.RemoteObjectP content = 2; + dhfs.objects.persistence.ObjectChangelog changelog = 5; + dhfs.objects.persistence.JDataRemoteP pushedData = 6; } message CanDeleteRequest { - string name = 2; - repeated string ourReferrers = 3; + dhfs.objects.persistence.JObjectKeyP name = 2; + repeated dhfs.objects.persistence.JObjectKeyP ourReferrers = 3; } message CanDeleteReply { - string objName = 1; bool deletionCandidate = 2; - repeated string referrers = 3; -} - -message IndexUpdateOpP { - ObjectHeader header = 1; -} - -message IndexUpdateReply {} - -message JKleppmannTreePeriodicPushOpP { - int64 timestamp = 2; -} - -message JKleppmannTreeOpPTemp { - bytes serialized = 2; -} - -message OpPushPayload { - oneof payload { - JKleppmannTreeOpPTemp jKleppmannTreeOpWrapper = 1; - // dhfs.objects.persistence.JKleppmannTreeOpP jKleppmannTreeOpWrapper = 1; - // JKleppmannTreePeriodicPushOpP jKleppmannTreePeriodicPushOp = 2; - IndexUpdateOpP indexUpdateOp = 3; - } + repeated dhfs.objects.persistence.JObjectKeyP referrers = 3; } message OpPushRequest { - repeated OpPushPayload msg = 2; + repeated OpP msg = 2; } message OpPushReply { -} \ No newline at end of file +} + +message OpP { + bytes serializedData = 1; +} From 12d7f3a4273fe3ec2920d9c6ab2c107cba42c362 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Fri, 21 Feb 2025 16:22:43 +0100 Subject: [PATCH 068/105] small op push improvements, prepare for "forced" push --- .../dhfs/objects/repository/PeerManager.java | 6 --- .../repository/PersistentPeerDataService.java | 34 +-------------- .../repository/PersistentRemoteHostsData.java | 14 +----- .../DeferredInvalidationQueueData.java | 2 +- .../DeferredInvalidationQueueService.java | 12 +++--- .../invalidation/InvalidationQueueEntry.java | 7 +++ .../InvalidationQueueService.java | 43 ++++++++++--------- .../repository/invalidation/OpPusher.java | 25 ++++++----- 8 files changed, 52 insertions(+), 91 deletions(-) create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueEntry.java diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java index 3a8eb774..c8a213c2 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java @@ -103,11 +103,6 @@ public class PeerManager { private void handleConnectionSuccess(PeerInfo host, PeerAddress address) { boolean wasReachable = isReachable(host); - boolean shouldSync = persistentPeerDataService.markInitialSyncDone(host.id()); - - if (shouldSync) - syncHandler.doInitialSync(host.id()); - _states.put(host.id(), address); if (wasReachable) return; @@ -178,7 +173,6 @@ public class PeerManager { public void removeRemoteHost(PeerId peerId) { transactionManager.run(() -> { peerInfoService.removePeer(peerId); - persistentPeerDataService.resetInitialSyncDone(peerId); }); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java index 562b7809..2f359a82 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java @@ -13,7 +13,6 @@ import jakarta.enterprise.context.ApplicationScoped; import jakarta.enterprise.event.Observes; import jakarta.inject.Inject; import org.eclipse.microprofile.config.inject.ConfigProperty; -import org.pcollections.HashTreePSet; import java.io.IOException; import java.security.KeyPair; @@ -64,7 +63,7 @@ public class PersistentPeerDataService { _selfKeyPair = CertificateTools.generateKeyPair(); _selfCertificate = CertificateTools.generateCertificate(_selfKeyPair, _selfUuid.toString()); - curTx.put(new PersistentRemoteHostsData(_selfUuid, 0, _selfCertificate, _selfKeyPair, HashTreePSet.empty())); + curTx.put(new PersistentRemoteHostsData(_selfUuid, _selfCertificate, _selfKeyPair)); peerInfoService.putPeer(_selfUuid, _selfCertificate.getEncoded()); } catch (CertificateEncodingException e) { throw new RuntimeException(e); @@ -94,14 +93,6 @@ public class PersistentPeerDataService { return _selfUuid; } - public long getUniqueId() { - return jObjectTxManager.run(() -> { - var curData = curTx.get(PersistentRemoteHostsData.class, PersistentRemoteHostsData.KEY).orElse(null); - curTx.put(curData.withSelfCounter(curData.selfCounter() + 1)); - return curData.selfCounter(); - }); - } - // private void updateCerts() { // try { // peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { @@ -125,28 +116,5 @@ public class PersistentPeerDataService { return _selfCertificate; } - // Returns true if host's initial sync wasn't done before, and marks it as done - public boolean markInitialSyncDone(PeerId peerId) { - return txm.run(() -> { - var data = curTx.get(PersistentRemoteHostsData.class, PersistentRemoteHostsData.KEY).orElse(null); - if (data == null) throw new IllegalStateException("Self data not found"); - boolean exists = data.initialSyncDone().contains(peerId); - if (exists) return false; - curTx.put(data.withInitialSyncDone(data.initialSyncDone().plus(peerId))); - return true; - }); - } - - // Returns true if it was marked as done before, and resets it - public boolean resetInitialSyncDone(PeerId peerId) { - return txm.run(() -> { - var data = curTx.get(PersistentRemoteHostsData.class, PersistentRemoteHostsData.KEY).orElse(null); - if (data == null) throw new IllegalStateException("Self data not found"); - boolean exists = data.initialSyncDone().contains(peerId); - if (!exists) return false; - curTx.put(data.withInitialSyncDone(data.initialSyncDone().minus(peerId))); - return true; - }); - } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java index 56f3411f..c72f5331 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java @@ -3,30 +3,18 @@ package com.usatiuk.dhfs.objects.repository; import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.PeerId; -import org.pcollections.HashTreePSet; -import org.pcollections.PSet; import java.io.Serializable; import java.security.KeyPair; import java.security.cert.X509Certificate; public record PersistentRemoteHostsData(PeerId selfUuid, - long selfCounter, X509Certificate selfCertificate, - KeyPair selfKeyPair, - PSet initialSyncDone) implements JData, Serializable { + KeyPair selfKeyPair) implements JData, Serializable { public static final JObjectKey KEY = JObjectKey.of("self_peer_data"); @Override public JObjectKey key() { return KEY; } - - public PersistentRemoteHostsData withSelfCounter(long selfCounter) { - return new PersistentRemoteHostsData(selfUuid, selfCounter, selfCertificate, selfKeyPair, HashTreePSet.empty()); - } - - public PersistentRemoteHostsData withInitialSyncDone(PSet initialSyncDone) { - return new PersistentRemoteHostsData(selfUuid, selfCounter, selfCertificate, selfKeyPair, initialSyncDone); - } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java index 1df4136a..83a5144b 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java @@ -13,5 +13,5 @@ public class DeferredInvalidationQueueData implements Serializable { @Serial private static final long serialVersionUID = 1L; - public final MultiValuedMap deferredInvalidations = new HashSetValuedHashMap<>(); + public final MultiValuedMap deferredInvalidations = new HashSetValuedHashMap<>(); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java index 8d2a30b9..d8e68d98 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java @@ -1,6 +1,5 @@ package com.usatiuk.dhfs.objects.repository.invalidation; -import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.PeerId; import com.usatiuk.dhfs.objects.repository.PeerManager; import com.usatiuk.dhfs.utils.SerializationHelper; @@ -19,7 +18,6 @@ import org.eclipse.microprofile.config.inject.ConfigProperty; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Paths; -import java.util.UUID; @ApplicationScoped public class DeferredInvalidationQueueService { @@ -69,17 +67,17 @@ public class DeferredInvalidationQueueService { synchronized (this) { var col = _persistentData.deferredInvalidations.get(host); for (var s : col) { - Log.trace("Un-deferred invalidation to " + host + " of " + s); - invalidationQueueService.pushDeferredInvalidations(host, s); + Log.tracev("Returning deferred invalidation: {0}", s); + invalidationQueueService.pushDeferredInvalidations(s); } col.clear(); } } - void defer(PeerId host, JObjectKey object) { + void defer(InvalidationQueueEntry entry) { synchronized (this) { - Log.trace("Deferred invalidation to " + host + " of " + object); - _persistentData.deferredInvalidations.put(host, object); + Log.tracev("Deferred invalidation: {0}", entry); + _persistentData.deferredInvalidations.put(entry.peer(), entry); } } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueEntry.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueEntry.java new file mode 100644 index 00000000..deae409b --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueEntry.java @@ -0,0 +1,7 @@ +package com.usatiuk.dhfs.objects.repository.invalidation; + +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.PeerId; + +public record InvalidationQueueEntry(PeerId peer, JObjectKey key, boolean forced) { +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java index 07d7c594..cb31e2b0 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java @@ -1,6 +1,5 @@ package com.usatiuk.dhfs.objects.repository.invalidation; -import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.PeerId; import com.usatiuk.dhfs.objects.repository.PeerManager; @@ -15,7 +14,6 @@ import jakarta.enterprise.context.ApplicationScoped; import jakarta.enterprise.event.Observes; import jakarta.inject.Inject; import org.apache.commons.lang3.concurrent.BasicThreadFactory; -import org.apache.commons.lang3.tuple.Pair; import org.eclipse.microprofile.config.inject.ConfigProperty; import java.util.concurrent.ExecutorService; @@ -25,7 +23,7 @@ import java.util.concurrent.atomic.AtomicReference; @ApplicationScoped public class InvalidationQueueService { - private final HashSetDelayedBlockingQueue> _queue; + private final HashSetDelayedBlockingQueue _queue; private final AtomicReference> _toAllQueue = new AtomicReference<>(new ConcurrentHashSet<>()); @Inject PeerManager remoteHostManager; @@ -65,7 +63,7 @@ public class InvalidationQueueService { var data = _queue.close(); Log.info("Will defer " + data.size() + " invalidations on shutdown"); for (var e : data) - deferredInvalidationQueueService.defer(e.getLeft(), e.getRight()); + deferredInvalidationQueueService.defer(e); } private void sender() { @@ -89,9 +87,9 @@ public class InvalidationQueueService { var hostInfo = remoteHostManager.getHostStateSnapshot(); for (var o : toAllQueue) { for (var h : hostInfo.available()) - _queue.add(Pair.of(h, o)); + _queue.add(new InvalidationQueueEntry(h, o, false)); for (var u : hostInfo.unavailable()) - deferredInvalidationQueueService.defer(u, o); + deferredInvalidationQueueService.defer(new InvalidationQueueEntry(u, o, false)); } } } @@ -102,19 +100,19 @@ public class InvalidationQueueService { long success = 0; for (var e : data) { - if (peerInfoService.getPeerInfo(e.getLeft()).isEmpty()) continue; + if (peerInfoService.getPeerInfo(e.peer()).isEmpty()) continue; - if (!remoteHostManager.isReachable(e.getLeft())) { - deferredInvalidationQueueService.defer(e.getLeft(), e.getRight()); + if (!remoteHostManager.isReachable(e.peer())) { + deferredInvalidationQueueService.defer(e); continue; } try { - opPusher.doPush(e.getLeft(), e.getRight()); + opPusher.doPush(e); success++; } catch (Exception ex) { - Log.info("Failed to send invalidation to " + e.getLeft() + ", will retry", ex); - pushInvalidationToOne(e.getLeft(), e.getRight()); + Log.warnv("Failed to send invalidation to {0}, will retry: {1}", e, ex); + pushInvalidationToOne(e); } if (_shutdown) { Log.info("Invalidation sender exiting"); @@ -150,18 +148,23 @@ public class InvalidationQueueService { } } - public void pushInvalidationToOne(PeerId host, JObjectKey obj) { - if (remoteHostManager.isReachable(host)) - _queue.add(Pair.of(host, obj)); + void pushInvalidationToOne(InvalidationQueueEntry entry) { + if (remoteHostManager.isReachable(entry.peer())) + _queue.add(entry); else - deferredInvalidationQueueService.defer(host, obj); + deferredInvalidationQueueService.defer(entry); } - public void pushInvalidationToOne(PeerId host, JData obj) { - pushInvalidationToOne(host, obj.key()); + public void pushInvalidationToOne(PeerId host, JObjectKey obj, boolean forced) { + var entry = new InvalidationQueueEntry(host, obj, forced); + pushInvalidationToOne(entry); } - protected void pushDeferredInvalidations(PeerId host, JObjectKey name) { - _queue.add(Pair.of(host, name)); + public void pushInvalidationToOne(PeerId host, JObjectKey obj) { + pushInvalidationToOne(host, obj, false); + } + + void pushDeferredInvalidations(InvalidationQueueEntry entry) { + _queue.add(entry); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java index d8d5b336..330b70da 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java @@ -1,6 +1,9 @@ package com.usatiuk.dhfs.objects.repository.invalidation; -import com.usatiuk.dhfs.objects.*; +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.RemoteObjectMeta; +import com.usatiuk.dhfs.objects.RemoteTransaction; +import com.usatiuk.dhfs.objects.TransactionManager; import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeOpWrapper; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreePersistentData; import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; @@ -23,23 +26,23 @@ public class OpPusher { @Inject InvalidationQueueService invalidationQueueService; - public void doPush(PeerId op, JObjectKey key) { + public void doPush(InvalidationQueueEntry entry) { Op info = txm.run(() -> { - var obj = curTx.get(JData.class, key).orElse(null); + var obj = curTx.get(JData.class, entry.key()).orElse(null); switch (obj) { case RemoteObjectMeta remote -> { - return new IndexUpdateOp(key, remote.changelog()); + return new IndexUpdateOp(entry.key(), remote.changelog()); } case JKleppmannTreePersistentData pd -> { - var maybeQueue = pd.queues().get(op); - if(maybeQueue == null || maybeQueue.isEmpty()) { + var maybeQueue = pd.queues().get(entry.peer()); + if (maybeQueue == null || maybeQueue.isEmpty()) { return null; } - var ret = new JKleppmannTreeOpWrapper(key, pd.queues().get(op).firstEntry().getValue()); - var newPd = pd.withQueues(pd.queues().plus(op, pd.queues().get(op).minus(ret.op().timestamp()))); + var ret = new JKleppmannTreeOpWrapper(entry.key(), pd.queues().get(entry.peer()).firstEntry().getValue()); + var newPd = pd.withQueues(pd.queues().plus(entry.peer(), pd.queues().get(entry.peer()).minus(ret.op().timestamp()))); curTx.put(newPd); - if (!newPd.queues().get(op).isEmpty()) - invalidationQueueService.pushInvalidationToOne(op, pd.key()); + if (!newPd.queues().get(entry.peer()).isEmpty()) + invalidationQueueService.pushInvalidationToOne(entry.peer(), pd.key()); return ret; } case null, @@ -51,6 +54,6 @@ public class OpPusher { if (info == null) { return; } - remoteObjectServiceClient.pushOps(op, List.of(info)); + remoteObjectServiceClient.pushOps(entry.peer(), List.of(info)); } } From 891b15a75ac110a367cd88bc9c7259a97f108019 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Fri, 21 Feb 2025 17:25:42 +0100 Subject: [PATCH 069/105] somewhat... working... initial sync --- .../usatiuk/kleppmanntree/KleppmannTree.java | 19 +- .../dhfs/objects/CurrentTransaction.java | 9 + .../com/usatiuk/dhfs/objects/TxWriteback.java | 4 +- .../usatiuk/dhfs/objects/TxWritebackImpl.java | 8 +- .../WritebackObjectPersistentStore.java | 14 +- .../dhfs/objects/transaction/Transaction.java | 5 + .../transaction/TransactionFactoryImpl.java | 12 ++ .../jkleppmanntree/JKleppmannTreeManager.java | 40 +--- .../dhfs/objects/repository/PeerManager.java | 5 + .../repository/PersistentPeerDataService.java | 26 ++- .../repository/PersistentRemoteHostsData.java | 9 +- .../dhfs/objects/repository/SyncHandler.java | 187 +----------------- .../invalidation/InvalidationQueueEntry.java | 4 +- .../repository/invalidation/OpPusher.java | 44 +++-- 14 files changed, 148 insertions(+), 238 deletions(-) diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java index 10fd6004..29b3b142 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java @@ -90,8 +90,9 @@ public class KleppmannTree, PeerIdT ex } private void undoOp(LogRecord op) { - for (var e : op.effects().reversed()) - undoEffect(e); + if (op.effects() != null) + for (var e : op.effects().reversed()) + undoEffect(e); } private void redoOp(Map.Entry, LogRecord> entry) { @@ -343,11 +344,16 @@ public class KleppmannTree, PeerIdT ex var conflictNodeId = newParent.children().get(op.newMeta().getName()); if (conflictNodeId != null) { + var conflictNode = _storage.getById(conflictNodeId); + MetaT conflictNodeMeta = conflictNode.meta(); + + if (Objects.equals(conflictNodeMeta, op.newMeta())) { + return new LogRecord<>(op, null); + } + if (failCreatingIfExists) throw new AlreadyExistsException("Already exists: " + op.newMeta().getName() + ": " + conflictNodeId); - var conflictNode = _storage.getById(conflictNodeId); - MetaT conflictNodeMeta = conflictNode.meta(); String newConflictNodeName = conflictNodeMeta.getName() + ".conflict." + conflictNode.key(); String newOursName = op.newMeta().getName() + ".conflict." + op.childId(); return new LogRecord<>(op, List.of( @@ -374,6 +380,11 @@ public class KleppmannTree, PeerIdT ex if (replaceNodeId != null) { var replaceNode = _storage.getById(replaceNodeId); var replaceNodeMeta = replaceNode.meta(); + + if (Objects.equals(replaceNodeMeta, op.newMeta())) { + return new LogRecord<>(op, null); + } + return new LogRecord<>(op, List.of( new LogEffect<>(new LogEffectOld<>(replaceNode.lastEffectiveOp(), newParentId, replaceNodeMeta), replaceNode.lastEffectiveOp(), _storage.getTrashId(), (MetaT) replaceNodeMeta.withName(replaceNodeId.toString()), replaceNodeId), new LogEffect<>(new LogEffectOld<>(node.lastEffectiveOp(), oldParentId, oldMeta), op, op.newParentId(), op.newMeta(), op.childId()) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java index 089c97ec..71ac9fa4 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java @@ -5,6 +5,9 @@ import com.usatiuk.dhfs.objects.transaction.Transaction; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; +import javax.annotation.Nonnull; +import java.util.Collection; +import java.util.List; import java.util.Optional; @ApplicationScoped @@ -37,6 +40,12 @@ public class CurrentTransaction implements Transaction { transactionManager.current().delete(key); } + @Nonnull + @Override + public Collection findAllObjects() { + return transactionManager.current().findAllObjects(); + } + @Override public void put(JData obj) { transactionManager.current().put(obj); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java index 6d73de04..27ac10e4 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java @@ -1,7 +1,6 @@ package com.usatiuk.dhfs.objects; -import com.usatiuk.dhfs.utils.VoidFn; - +import java.util.Collection; import java.util.Optional; public interface TxWriteback { @@ -14,6 +13,7 @@ public interface TxWriteback { void fence(long bundleId); Optional getPendingWrite(JObjectKey key); + Collection getPendingWrites(); // Executes callback after bundle with bundleId id has been persisted // if it was already, runs callback on the caller thread diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java index 2b7ba3c8..5f40d65d 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java @@ -2,7 +2,6 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.persistence.CachingObjectPersistentStore; import com.usatiuk.dhfs.objects.persistence.TxManifestObj; -import com.usatiuk.dhfs.utils.VoidFn; import io.quarkus.logging.Log; import io.quarkus.runtime.ShutdownEvent; import io.quarkus.runtime.StartupEvent; @@ -277,6 +276,13 @@ public class TxWritebackImpl implements TxWriteback { } } + @Override + public Collection getPendingWrites() { + synchronized (_pendingBundles) { + return Collections.unmodifiableCollection(_pendingWrites.values()); + } + } + @Override public void asyncFence(long bundleId, Runnable fn) { verifyReady(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java index e126aff6..48f7265a 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java @@ -8,6 +8,7 @@ import jakarta.inject.Inject; import javax.annotation.Nonnull; import java.util.Collection; +import java.util.HashSet; import java.util.Optional; import java.util.function.Consumer; @@ -19,8 +20,17 @@ public class WritebackObjectPersistentStore { TxWriteback txWriteback; @Nonnull - Collection findAllObjects() { - return delegate.findAllObjects(); + public Collection findAllObjects() { + var pending = txWriteback.getPendingWrites(); + var found = new HashSet<>(delegate.findAllObjects()); + for (var p : pending) { + switch (p) { + case TxWriteback.PendingWrite write -> found.add(write.data().data().key()); + case TxWriteback.PendingDelete deleted -> found.remove(deleted.key()); + default -> throw new IllegalStateException("Unexpected value: " + p); + } + } + return found; } @Nonnull diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java index 166aceb3..e78525a9 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java @@ -3,6 +3,8 @@ package com.usatiuk.dhfs.objects.transaction; import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.JObjectKey; +import javax.annotation.Nonnull; +import java.util.Collection; import java.util.Optional; // The transaction interface actually used by user code to retrieve objects @@ -17,6 +19,9 @@ public interface Transaction extends TransactionHandle { void delete(JObjectKey key); + @Nonnull + Collection findAllObjects(); // FIXME: This is crap + default Optional get(Class type, JObjectKey key) { return get(type, key, LockingStrategy.OPTIMISTIC); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index da9da306..629d8a67 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -3,12 +3,18 @@ package com.usatiuk.dhfs.objects.transaction; import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.JDataVersionedWrapper; import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.WritebackObjectPersistentStore; import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import javax.annotation.Nonnull; import java.util.*; @ApplicationScoped public class TransactionFactoryImpl implements TransactionFactory { + @Inject + WritebackObjectPersistentStore store; // FIXME: + @Override public TransactionPrivate createTransaction(long id, TransactionObjectSource source) { return new TransactionImpl(id, source); @@ -98,6 +104,12 @@ public class TransactionFactoryImpl implements TransactionFactory { _newWrites.put(key, new TxRecord.TxObjectRecordDeleted(key)); } + @Nonnull + @Override + public Collection findAllObjects() { + return store.findAllObjects(); + } + @Override public void put(JData obj) { // get(JData.class, obj.getKey(), LockingStrategy.OPTIMISTIC); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java index 25ef3949..43b9cf7e 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java @@ -115,12 +115,12 @@ public class JKleppmannTreeManager { throw new IllegalArgumentException("Committed op push was not the oldest"); _data = _data.withQueues(_data.queues().plus(host, _data.queues().get(host).minus(_data.queues().get(host).firstKey()))); + curTx.put(_data); } -// @Override -// public void pushBootstrap(PeerId host) { -// _tree.recordBoostrapFor(host); -// } + public void recordBootstrap(PeerId host) { + _tree.recordBoostrapFor(host); + } public Pair findParent(Function, Boolean> predicate) { return _tree.findParent(predicate); @@ -207,42 +207,12 @@ public class JKleppmannTreeManager { for (var p : peerInfoService.getPeersNoSelf()) { recordOpForPeer(p.id(), op); } -// _persistentData.get().assertRwLock(); -// _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); -// var hostUuds = persistentPeerDataService.getHostUuids().stream().toList(); -// _persistentData.get().mutate(new JMutator() { -// @Override -// public boolean mutate(JKleppmannTreePersistentData object) { -// object.recordOp(hostUuds, op); -// return true; -// } -// -// @Override -// public void revert(JKleppmannTreePersistentData object) { -// object.removeOp(hostUuds, op); -// } -// }); -// opSender.push(JKleppmannTree.this); } @Override public void recordOpForPeer(PeerId peer, OpMove op) { _data = _data.withQueues(_data.queues().plus(peer, _data.queues().getOrDefault(peer, TreePMap.empty()).plus(op.timestamp(), op))); -// _persistentData.get().assertRwLock(); -// _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); -// _persistentData.get().mutate(new JMutator() { -// @Override -// public boolean mutate(JKleppmannTreePersistentData object) { -// object.recordOp(peer, op); -// return true; -// } -// -// @Override -// public void revert(JKleppmannTreePersistentData object) { -// object.removeOp(peer, op); -// } -// }); -// opSender.push(JKleppmannTree.this); + curTx.put(_data); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java index c8a213c2..824d07b5 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java @@ -103,6 +103,11 @@ public class PeerManager { private void handleConnectionSuccess(PeerInfo host, PeerAddress address) { boolean wasReachable = isReachable(host); + boolean shouldSync = persistentPeerDataService.markInitialSyncDone(host.id()); + + if (shouldSync) + syncHandler.doInitialSync(host.id()); + _states.put(host.id(), address); if (wasReachable) return; diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java index 2f359a82..4a48e9cd 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java @@ -13,6 +13,7 @@ import jakarta.enterprise.context.ApplicationScoped; import jakarta.enterprise.event.Observes; import jakarta.inject.Inject; import org.eclipse.microprofile.config.inject.ConfigProperty; +import org.pcollections.HashTreePSet; import java.io.IOException; import java.security.KeyPair; @@ -63,7 +64,7 @@ public class PersistentPeerDataService { _selfKeyPair = CertificateTools.generateKeyPair(); _selfCertificate = CertificateTools.generateCertificate(_selfKeyPair, _selfUuid.toString()); - curTx.put(new PersistentRemoteHostsData(_selfUuid, _selfCertificate, _selfKeyPair)); + curTx.put(new PersistentRemoteHostsData(_selfUuid, _selfCertificate, _selfKeyPair, HashTreePSet.empty())); peerInfoService.putPeer(_selfUuid, _selfCertificate.getEncoded()); } catch (CertificateEncodingException e) { throw new RuntimeException(e); @@ -116,5 +117,28 @@ public class PersistentPeerDataService { return _selfCertificate; } + // Returns true if host's initial sync wasn't done before, and marks it as done + public boolean markInitialSyncDone(PeerId peerId) { + return txm.run(() -> { + var data = curTx.get(PersistentRemoteHostsData.class, PersistentRemoteHostsData.KEY).orElse(null); + if (data == null) throw new IllegalStateException("Self data not found"); + boolean exists = data.initialSyncDone().contains(peerId); + if (exists) return false; + curTx.put(data.withInitialSyncDone(data.initialSyncDone().plus(peerId))); + return true; + }); + } + + // Returns true if it was marked as done before, and resets it + public boolean resetInitialSyncDone(PeerId peerId) { + return txm.run(() -> { + var data = curTx.get(PersistentRemoteHostsData.class, PersistentRemoteHostsData.KEY).orElse(null); + if (data == null) throw new IllegalStateException("Self data not found"); + boolean exists = data.initialSyncDone().contains(peerId); + if (!exists) return false; + curTx.put(data.withInitialSyncDone(data.initialSyncDone().minus(peerId))); + return true; + }); + } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java index c72f5331..4f4343f1 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java @@ -3,6 +3,7 @@ package com.usatiuk.dhfs.objects.repository; import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.PeerId; +import org.pcollections.PSet; import java.io.Serializable; import java.security.KeyPair; @@ -10,11 +11,17 @@ import java.security.cert.X509Certificate; public record PersistentRemoteHostsData(PeerId selfUuid, X509Certificate selfCertificate, - KeyPair selfKeyPair) implements JData, Serializable { + KeyPair selfKeyPair, + PSet initialSyncDone) implements JData, Serializable { public static final JObjectKey KEY = JObjectKey.of("self_peer_data"); @Override public JObjectKey key() { return KEY; } + + + public PersistentRemoteHostsData withInitialSyncDone(PSet initialSyncDone) { + return new PersistentRemoteHostsData(selfUuid, selfCertificate, selfKeyPair, initialSyncDone); + } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java index ae91198f..e1bdeff1 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java @@ -1,6 +1,7 @@ package com.usatiuk.dhfs.objects.repository; import com.usatiuk.dhfs.objects.*; +import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; import com.usatiuk.dhfs.objects.transaction.Transaction; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; @@ -10,191 +11,17 @@ import org.pcollections.PMap; import javax.annotation.Nullable; -// - -//import com.usatiuk.autoprotomap.runtime.ProtoSerializer; - -//import com.usatiuk.dhfs.objects.jrepository.JObject; - -//import com.usatiuk.dhfs.objects.jrepository.JObjectData; - -//import com.usatiuk.dhfs.objects.jrepository.JObjectManager; - -//import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; - -//import com.usatiuk.dhfs.objects.persistence.JObjectDataP; - -//import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; - -//import com.usatiuk.dhfs.objects.repository.opsupport.OpObjectRegistry; -//import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; -//import io.grpc.Status; -//import io.quarkus.logging.Log; -//import jakarta.enterprise.context.ApplicationScoped; -//import jakarta.enterprise.inject.Instance; -//import jakarta.inject.Inject; -// -//import java.util.HashMap; -//import java.util.Objects; -//import java.util.Optional; -//import java.util.UUID; -//import java.util.concurrent.atomic.AtomicReference; -//import java.util.stream.Collectors; -//import java.util.stream.Stream; -// @ApplicationScoped public class SyncHandler { @Inject Transaction curTx; @Inject PersistentPeerDataService persistentPeerDataService; -// @Inject -// JObjectManager jObjectManager; -// @Inject -// PeerManager peerManager; -// @Inject -// RemoteObjectServiceClient remoteObjectServiceClient; -// @Inject -// InvalidationQueueService invalidationQueueService; -// @Inject -// Instance conflictResolvers; -// @Inject -// PersistentPeerDataService persistentPeerDataService; -// @Inject -// ProtoSerializer dataProtoSerializer; -// @Inject -// OpObjectRegistry opObjectRegistry; -// @Inject -// JObjectTxManager jObjectTxManager; -// -// public void pushInitialResyncObj(UUID host) { -// Log.info("Doing initial object push for " + host); -// -// var objs = jObjectManager.findAll(); -// -// for (var obj : objs) { -// Log.trace("IS: " + obj + " to " + host); -// invalidationQueueService.pushInvalidationToOne(host, obj); -// } -// } -// -// public void pushInitialResyncOp(UUID host) { -// Log.info("Doing initial op push for " + host); -// -// jObjectTxManager.executeTxAndFlush( -// () -> { -// opObjectRegistry.pushBootstrapData(host); -// } -// ); -// } -// + @Inject + TransactionManager txm; + @Inject + InvalidationQueueService invalidationQueueService; -// public RemoteObjectMeta handleOneUpdate(PeerId from, RemoteObjectMeta current, PMap rcvChangelog) { -//// if (!rcv.key().equals(current.key())) { -//// Log.error("Received update for different object: " + rcv.key() + " from " + from); -//// throw new IllegalArgumentException("Received update for different object: " + rcv.key() + " from " + from); -//// } -// -// var receivedTotalVer = rcvChangelog.values().stream().mapToLong(Long::longValue).sum(); -// -// if (current.meta().knownRemoteVersions().getOrDefault(from, 0L) > receivedTotalVer) { -// Log.error("Received older index update than was known for host: " + from + " " + current.key()); -// throw new IllegalStateException(); // FIXME: OutdatedUpdateException -// } -// -// Log.trace("Handling update: " + current.key() + " from " + from + "\n" + "ours: " + current + " \n" + "received: " + rcvChangelog); -// -// boolean conflict = false; -// boolean updatedRemoteVersion = false; -// -// var newObj = current; -// var curKnownRemoteVersion = current.meta().knownRemoteVersions().get(from); -// -// if (curKnownRemoteVersion == null || !curKnownRemoteVersion.equals(receivedTotalVer)) -// updatedRemoteVersion = true; -// -// if (updatedRemoteVersion) -// newObj = current.withMeta(current.meta().withKnownRemoteVersions( -// current.meta().knownRemoteVersions().plus(from, receivedTotalVer) -// )); -// -// -// boolean hasLower = false; -// boolean hasHigher = false; -// for (var e : Stream.concat(current.meta().changelog().keySet().stream(), rcvChangelog.keySet().stream()).collect(Collectors.toUnmodifiableSet())) { -// if (rcvChangelog.getOrDefault(e, 0L) < current.meta().changelog().getOrDefault(e, 0L)) -// hasLower = true; -// if (rcvChangelog.getOrDefault(e, 0L) > current.meta().changelog().getOrDefault(e, 0L)) -// hasHigher = true; -// } -// -// if (hasLower && hasHigher) { -// Log.info("Conflict on update (inconsistent version): " + current.key() + " from " + from); -//// Log. -//// -//// info("Trying conflict resolution: " + header.getName() + " from " + from); -//// var found = foundExt.get(); -//// -//// JObjectData theirsData; -//// ObjectHeader theirsHeader; -//// if (header. hasPushedData()) { -//// theirsHeader = header; -//// theirsData = dataProtoSerializer. -//// -//// deserialize(header.getPushedData()); -//// } else { -//// var got = remoteObjectServiceClient.getSpecificObject(from, header.getName()); -//// theirsData = dataProtoSerializer. -//// -//// deserialize(got.getRight()); -//// theirsHeader = got. -//// -//// getLeft(); -//// } -//// -//// jObjectTxManager. -//// -//// executeTx(() -> { -//// var resolverClass = found.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { -//// if (d == null) -//// throw new StatusRuntimeExceptionNoStacktrace(Status.UNAVAILABLE.withDescription("No local data when conflict " + header.getName())); -//// return d.getConflictResolver(); -//// }); -//// var resolver = conflictResolvers.select(resolverClass); -//// resolver. -//// -//// get(). -//// -//// resolve(from, theirsHeader, theirsData, found); -//// }); -//// Log. info("Resolved conflict for " + from + " " + header.getName()); -//// throw new NotImplementedException(); -// } else if (hasLower) { -// Log.info("Received older index update than known: " + from + " " + current.key()); -//// throw new OutdatedUpdateException(); -//// throw new NotImplementedException(); -// } else if (hasHigher) { -// var newChangelog = rcvChangelog.containsKey(persistentPeerDataService.getSelfUuid()) ? -// rcvChangelog : rcvChangelog.plus(persistentPeerDataService.getSelfUuid(), 0L); -// -// newObj = newObj.withData(null).withMeta(newObj.meta().withChangelog(newChangelog)); -//// if (header.hasPushedData()) -//// found.externalResolution(dataProtoSerializer.deserialize(header.getPushedData())); -// } -//// else if (data == null && header.hasPushedData()) { -//// found.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); -//// if (found.getData() == null) -//// found.externalResolution(dataProtoSerializer.deserialize(header.getPushedData())); -//// } -// - - /// / assert Objects.equals(receivedTotalVer, md.getOurVersion()); -// -// if (!updatedRemoteVersion) -// Log.debug("No action on update: " + current.meta().key() + " from " + from); -// -// return newObj; -// } public void handleRemoteUpdate(PeerId from, JObjectKey key, PMap receivedChangelog, @Nullable JDataRemote receivedData) { var current = curTx.get(RemoteObjectMeta.class, key).orElse(null); if (current == null) { @@ -250,6 +77,8 @@ public class SyncHandler { } public void doInitialSync(PeerId peer) { - //TODO: + txm.run(() -> { + for (var cur : curTx.findAllObjects()) invalidationQueueService.pushInvalidationToOne(peer, cur, true); + }); } } \ No newline at end of file diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueEntry.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueEntry.java index deae409b..04ea3853 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueEntry.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueEntry.java @@ -3,5 +3,7 @@ package com.usatiuk.dhfs.objects.repository.invalidation; import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.PeerId; -public record InvalidationQueueEntry(PeerId peer, JObjectKey key, boolean forced) { +import java.io.Serializable; + +public record InvalidationQueueEntry(PeerId peer, JObjectKey key, boolean forced) implements Serializable { } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java index 330b70da..d1b39846 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/OpPusher.java @@ -4,7 +4,7 @@ import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.RemoteObjectMeta; import com.usatiuk.dhfs.objects.RemoteTransaction; import com.usatiuk.dhfs.objects.TransactionManager; -import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeOpWrapper; +import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeManager; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreePersistentData; import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; import com.usatiuk.dhfs.objects.transaction.Transaction; @@ -25,25 +25,30 @@ public class OpPusher { RemoteObjectServiceClient remoteObjectServiceClient; @Inject InvalidationQueueService invalidationQueueService; + @Inject + JKleppmannTreeManager jKleppmannTreeManager; public void doPush(InvalidationQueueEntry entry) { - Op info = txm.run(() -> { + List info = txm.run(() -> { var obj = curTx.get(JData.class, entry.key()).orElse(null); switch (obj) { case RemoteObjectMeta remote -> { - return new IndexUpdateOp(entry.key(), remote.changelog()); + return List.of(new IndexUpdateOp(entry.key(), remote.changelog())); } case JKleppmannTreePersistentData pd -> { - var maybeQueue = pd.queues().get(entry.peer()); - if (maybeQueue == null || maybeQueue.isEmpty()) { + var tree = jKleppmannTreeManager.getTree(pd.key()); + if (entry.forced()) + tree.recordBootstrap(entry.peer()); + + if (!tree.hasPendingOpsForHost(entry.peer())) return null; - } - var ret = new JKleppmannTreeOpWrapper(entry.key(), pd.queues().get(entry.peer()).firstEntry().getValue()); - var newPd = pd.withQueues(pd.queues().plus(entry.peer(), pd.queues().get(entry.peer()).minus(ret.op().timestamp()))); - curTx.put(newPd); - if (!newPd.queues().get(entry.peer()).isEmpty()) + + var ops = tree.getPendingOpsForHost(entry.peer(), 1); + + if (tree.hasPendingOpsForHost(entry.peer())) invalidationQueueService.pushInvalidationToOne(entry.peer(), pd.key()); - return ret; + + return ops; } case null, default -> { @@ -54,6 +59,21 @@ public class OpPusher { if (info == null) { return; } - remoteObjectServiceClient.pushOps(entry.peer(), List.of(info)); + remoteObjectServiceClient.pushOps(entry.peer(), info); + txm.run(() -> { + var obj = curTx.get(JData.class, entry.key()).orElse(null); + switch (obj) { + case JKleppmannTreePersistentData pd: { + var tree = jKleppmannTreeManager.getTree(pd.key()); + for (var op : info) { + tree.commitOpForHost(entry.peer(), op); + } + break; + } + case null: + default: + } + }); + } } From b0e43ad7d2bf5ced9b63dd35cc2029d8c5cfcdef Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Fri, 21 Feb 2025 17:28:15 +0100 Subject: [PATCH 070/105] move failCreatingIfExists check to a correct place --- .../main/java/com/usatiuk/kleppmanntree/KleppmannTree.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java index 29b3b142..a84091a8 100644 --- a/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java +++ b/dhfs-parent/kleppmanntree/src/main/java/com/usatiuk/kleppmanntree/KleppmannTree.java @@ -344,6 +344,9 @@ public class KleppmannTree, PeerIdT ex var conflictNodeId = newParent.children().get(op.newMeta().getName()); if (conflictNodeId != null) { + if (failCreatingIfExists) + throw new AlreadyExistsException("Already exists: " + op.newMeta().getName() + ": " + conflictNodeId); + var conflictNode = _storage.getById(conflictNodeId); MetaT conflictNodeMeta = conflictNode.meta(); @@ -351,9 +354,6 @@ public class KleppmannTree, PeerIdT ex return new LogRecord<>(op, null); } - if (failCreatingIfExists) - throw new AlreadyExistsException("Already exists: " + op.newMeta().getName() + ": " + conflictNodeId); - String newConflictNodeName = conflictNodeMeta.getName() + ".conflict." + conflictNode.key(); String newOursName = op.newMeta().getName() + ".conflict." + op.childId(); return new LogRecord<>(op, List.of( From 37fe39d99e2f43fcfc985fe901fe476d106148d8 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Fri, 21 Feb 2025 17:49:34 +0100 Subject: [PATCH 071/105] make some tests work --- .../objects/repository/PersistentPeerDataService.java | 8 ++++++++ .../repository/peerdiscovery/PeerDiscoveryDirectory.java | 2 ++ .../local/LocalPeerDiscoveryBroadcaster.java | 2 ++ .../peerdiscovery/local/LocalPeerDiscoveryClient.java | 2 +- .../server/src/main/resources/application.properties | 1 + .../java/com/usatiuk/dhfs/integration/DhfsFuseIT.java | 4 ++-- .../test/java/com/usatiuk/dhfs/integration/DhfsImage.java | 2 +- 7 files changed, 17 insertions(+), 4 deletions(-) diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java index 4a48e9cd..cf3b3de3 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java @@ -15,7 +15,11 @@ import jakarta.inject.Inject; import org.eclipse.microprofile.config.inject.ConfigProperty; import org.pcollections.HashTreePSet; +import java.io.File; import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; import java.security.KeyPair; import java.security.cert.CertificateEncodingException; import java.security.cert.X509Certificate; @@ -44,6 +48,8 @@ public class PersistentPeerDataService { @ConfigProperty(name = "dhfs.peerdiscovery.preset-uuid") Optional presetUuid; + @ConfigProperty(name = "dhfs.objects.persistence.stuff.root") + String stuffRoot; private PeerId _selfUuid; private X509Certificate _selfCertificate; @@ -73,6 +79,8 @@ public class PersistentPeerDataService { }); peerTrustManager.reloadTrustManagerHosts(peerInfoService.getPeers()); Log.info("Self uuid is: " + _selfUuid.toString()); + new File(stuffRoot).mkdirs(); + Files.write(Path.of(stuffRoot, "self_uuid"), _selfUuid.id().toString().getBytes(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING); } // private void pushPeerUpdates() { diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerDiscoveryDirectory.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerDiscoveryDirectory.java index 50523880..1021add5 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerDiscoveryDirectory.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/PeerDiscoveryDirectory.java @@ -1,6 +1,7 @@ package com.usatiuk.dhfs.objects.repository.peerdiscovery; import com.usatiuk.dhfs.objects.PeerId; +import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import org.apache.commons.collections4.MultiValuedMap; import org.apache.commons.collections4.multimap.HashSetValuedHashMap; @@ -37,6 +38,7 @@ public class PeerDiscoveryDirectory { private final MultiValuedMap _entries = new HashSetValuedHashMap<>(); public void notifyAddr(PeerAddress addr) { + Log.tracev("New address {0}", addr); synchronized (_entries) { var peer = addr.peer(); _entries.removeMapping(peer, new PeerEntry(addr, 0)); diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryBroadcaster.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryBroadcaster.java index d3f77471..7b8362f5 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryBroadcaster.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryBroadcaster.java @@ -3,6 +3,7 @@ package com.usatiuk.dhfs.objects.repository.peerdiscovery.local; import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; import com.usatiuk.dhfs.objects.repository.peerdiscovery.PeerDiscoveryInfo; import io.quarkus.arc.properties.IfBuildProperty; +import io.quarkus.logging.Log; import io.quarkus.runtime.ShutdownEvent; import io.quarkus.runtime.Startup; import io.quarkus.scheduler.Scheduled; @@ -90,6 +91,7 @@ public class LocalPeerDiscoveryBroadcaster { try { sendPacket = new DatagramPacket(sendBytes, sendBytes.length, broadcast, broadcastPort); _socket.send(sendPacket); + Log.tracev("Broadcast sent to: {0}, at: {1}", broadcast.getHostAddress(), networkInterface.getDisplayName()); } catch (Exception ignored) { continue; } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryClient.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryClient.java index 9b505307..ce412b6b 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryClient.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/local/LocalPeerDiscoveryClient.java @@ -18,7 +18,6 @@ import org.eclipse.microprofile.config.inject.ConfigProperty; import java.net.*; import java.nio.ByteBuffer; -import java.util.UUID; @ApplicationScoped @IfBuildProperty(name = "dhfs.local-discovery", stringValue = "true") @@ -70,6 +69,7 @@ public class LocalPeerDiscoveryClient { try { var got = PeerDiscoveryInfo.parseFrom(ByteBuffer.wrap(buf, 0, packet.getLength())); + Log.tracev("Got peer discovery packet from {0}", packet.getAddress()); peerDiscoveryDirectory.notifyAddr( new IpPeerAddress( PeerId.of(got.getUuid()), diff --git a/dhfs-parent/server/src/main/resources/application.properties b/dhfs-parent/server/src/main/resources/application.properties index 220ba49b..4f499e77 100644 --- a/dhfs-parent/server/src/main/resources/application.properties +++ b/dhfs-parent/server/src/main/resources/application.properties @@ -10,6 +10,7 @@ dhfs.objects.reconnect_interval=5s dhfs.objects.write_log=false dhfs.objects.periodic-push-op-interval=5m dhfs.fuse.root=${HOME}/dhfs_default/fuse +dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff dhfs.fuse.debug=false dhfs.fuse.enabled=true dhfs.files.allow_recursive_delete=false diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsFuseIT.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsFuseIT.java index b9d9f92d..bdb70a8e 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsFuseIT.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsFuseIT.java @@ -59,8 +59,8 @@ public class DhfsFuseIT { Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid)); Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid)); - waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS); var c1curl = container1.execInContainer("/bin/sh", "-c", "curl --header \"Content-Type: application/json\" " + diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java index 12d30e28..1d08d76e 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java @@ -69,7 +69,7 @@ public class DhfsImage implements Future { .cmd("java", "-ea", "-Xmx128M", "--add-exports", "java.base/sun.nio.ch=ALL-UNNAMED", "--add-exports", "java.base/jdk.internal.access=ALL-UNNAMED", - "-Ddhfs.objects.peerdiscovery.interval=100", + "-Ddhfs.objects.peerdiscovery.interval=1s", "-Ddhfs.objects.invalidation.delay=100", "-Ddhfs.objects.deletion.delay=0", "-Ddhfs.objects.deletion.can-delete-retry-delay=1000", From 080032c3e9d3bfd746281ace8b1b6be9702d55ce Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Fri, 21 Feb 2025 17:52:39 +0100 Subject: [PATCH 072/105] ci updates --- .github/workflows/server.yml | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/server.yml b/.github/workflows/server.yml index b4738b5f..efef968f 100644 --- a/.github/workflows/server.yml +++ b/.github/workflows/server.yml @@ -54,12 +54,12 @@ jobs: # - name: Build with Maven # run: cd dhfs-parent && mvn --batch-mode --update-snapshots package # -Dquarkus.log.category.\"com.usatiuk.dhfs\".min-level=DEBUG - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: DHFS Server Package path: dhfs-parent/server/target/quarkus-app - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 if: ${{ always() }} with: name: Test logs @@ -84,7 +84,7 @@ jobs: - name: NPM Build run: cd webui && npm run build - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: Webui path: webui/dist @@ -155,7 +155,7 @@ jobs: CMAKE_ARGS="-DCMAKE_BUILD_TYPE=Release" libdhfs_support/builder/cross-build.sh both build "$(pwd)/result" - name: Upload build - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: NativeLib-${{ matrix.os }}-${{ env.SANITIZED_DOCKER_PLATFORM }} path: result @@ -168,7 +168,7 @@ jobs: uses: actions/checkout@v4 - name: Download artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: path: downloaded-libs @@ -180,7 +180,7 @@ jobs: test -f "result/Linux-x86_64/libdhfs_support.so" || exit 1 - name: Upload - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: NativeLibs path: result @@ -201,19 +201,19 @@ jobs: uses: actions/checkout@v4 - name: Download server package - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: DHFS Server Package path: dhfs-package-downloaded - name: Download webui - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: Webui path: webui-dist-downloaded - name: Download native libs - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: NativeLibs path: dhfs-native-downloaded @@ -299,17 +299,17 @@ jobs: - name: Checkout repository uses: actions/checkout@v4 - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 with: name: DHFS Server Package path: dhfs-package-downloaded - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 with: name: Webui path: webui-dist-downloaded - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 with: name: NativeLibs path: dhfs-native-downloaded @@ -339,7 +339,7 @@ jobs: run: tar -cvf ~/run-wrapper.tar.gz ./run-wrapper-out - name: Upload - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Run wrapper path: ~/run-wrapper.tar.gz From 5b3e55d1bbbbb728a8883a73080dcfc5dc6425e2 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sat, 22 Feb 2025 10:17:58 +0100 Subject: [PATCH 073/105] DhfsFusex3IT fix new address waiter --- .../java/com/usatiuk/dhfs/integration/DhfsFusex3IT.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsFusex3IT.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsFusex3IT.java index b401b053..089cee37 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsFusex3IT.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsFusex3IT.java @@ -84,9 +84,9 @@ public class DhfsFusex3IT { Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid)); Assertions.assertDoesNotThrow(() -> UUID.fromString(c3uuid)); - waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS, 2); - waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS, 2); - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS, 2); + waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS, 2); + waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS, 2); + waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS, 2); var c1curl = container1.execInContainer("/bin/sh", "-c", "curl --header \"Content-Type: application/json\" " + From a461dd6b80e2fe0c224bd4f4c9fe2c113954e74a Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sat, 22 Feb 2025 12:26:38 +0100 Subject: [PATCH 074/105] objects: some basic iterator structure --- dhfs-parent/objects/pom.xml | 5 + .../dhfs/objects/CloseableKvIterator.java | 10 + .../dhfs/objects/JDataVersionedWrapper.java | 6 +- .../com/usatiuk/dhfs/objects/JObjectKey.java | 9 + .../usatiuk/dhfs/objects/JObjectManager.java | 20 +- .../dhfs/objects/MergingKvIterator.java | 73 +++++ .../dhfs/objects/NavigableMapKvIterator.java | 63 ++++ .../dhfs/objects/PredicateKvIterator.java | 56 ++++ .../com/usatiuk/dhfs/objects/TxBundle.java | 2 +- .../com/usatiuk/dhfs/objects/TxWriteback.java | 11 +- .../usatiuk/dhfs/objects/TxWritebackImpl.java | 38 ++- .../WritebackObjectPersistentStore.java | 15 +- .../CachingObjectPersistentStore.java | 34 +- .../FileObjectPersistentStore.java | 308 ------------------ .../objects/persistence/IteratorStart.java | 8 + .../LmdbObjectPersistentStore.java | 201 ++++++++++++ .../MemoryObjectPersistentStore.java | 12 +- .../persistence/ObjectPersistentStore.java | 9 + .../SerializingObjectPersistentStore.java | 53 ++- .../transaction/TransactionFactoryImpl.java | 4 +- .../transaction/TransactionObject.java | 2 +- .../src/main/resources/application.properties | 2 +- 22 files changed, 579 insertions(+), 362 deletions(-) create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CloseableKvIterator.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java delete mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/IteratorStart.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java diff --git a/dhfs-parent/objects/pom.xml b/dhfs-parent/objects/pom.xml index 66fe78ff..8f4c33ff 100644 --- a/dhfs-parent/objects/pom.xml +++ b/dhfs-parent/objects/pom.xml @@ -64,6 +64,11 @@ quarkus-junit5-mockito test + + org.lmdbjava + lmdbjava + 0.9.1 + diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CloseableKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CloseableKvIterator.java new file mode 100644 index 00000000..82227750 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CloseableKvIterator.java @@ -0,0 +1,10 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.Iterator; + +public interface CloseableKvIterator, V> extends Iterator>, AutoCloseableNoThrow { + K peekNextKey(); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java index b71ac8b9..facba141 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java @@ -4,8 +4,8 @@ import jakarta.annotation.Nonnull; import java.io.Serializable; -public record JDataVersionedWrapper(@Nonnull T data, long version) implements Serializable { - public JDataVersionedWrapper withVersion(long version) { - return new JDataVersionedWrapper<>(data, version); +public record JDataVersionedWrapper(@Nonnull JData data, long version) implements Serializable { + public JDataVersionedWrapper withVersion(long version) { + return new JDataVersionedWrapper(data, version); } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java index 67e368ac..26e5b347 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java @@ -1,6 +1,7 @@ package com.usatiuk.dhfs.objects; import java.io.Serializable; +import java.nio.charset.StandardCharsets; public record JObjectKey(String name) implements Serializable, Comparable { public static JObjectKey of(String name) { @@ -16,4 +17,12 @@ public record JObjectKey(String name) implements Serializable, Comparable JDataVersionedWrapper get(Class type, JObjectKey key) { + private JDataVersionedWrapper get(Class type, JObjectKey key) { verifyReady(); while (true) { { @@ -63,26 +63,24 @@ public class JObjectManager { if (ref == null) { _objects.remove(key, got); } else if (type.isInstance(ref.data())) { - return (JDataVersionedWrapper) ref; + return (JDataVersionedWrapper) ref; } else { throw new IllegalArgumentException("Object type mismatch: " + ref.data().getClass() + " vs " + type); } } } - //noinspection unused try (var readLock = _objLocker.lock(key)) { if (_objects.containsKey(key)) continue; - var read = writebackObjectPersistentStore.readObject(key).orElse(null); if (read == null) return null; if (type.isInstance(read.data())) { - var wrapper = new JDataWrapper<>((JDataVersionedWrapper) read); + var wrapper = new JDataWrapper<>((JDataVersionedWrapper) read); var old = _objects.put(key, wrapper); assert old == null; - return (JDataVersionedWrapper) read; + return (JDataVersionedWrapper) read; } else { throw new IllegalArgumentException("Object type mismatch: " + read.getClass() + " vs " + type); } @@ -229,7 +227,7 @@ public class JObjectManager { switch (action.getValue()) { case TxRecord.TxObjectRecordWrite write -> { Log.trace("Writing " + action.getKey()); - var wrapped = new JDataVersionedWrapper<>(write.data(), tx.getId()); + var wrapped = new JDataVersionedWrapper(write.data(), tx.getId()); _objects.put(action.getKey(), new JDataWrapper<>(wrapped)); } case TxRecord.TxObjectRecordDeleted deleted -> { @@ -285,19 +283,19 @@ public class JObjectManager { } private record TransactionObjectNoLock - (Optional> data) + (Optional data) implements TransactionObject { } private record TransactionObjectLocked - (Optional> data, AutoCloseableNoThrow lock) + (Optional data, AutoCloseableNoThrow lock) implements TransactionObject { } - private class JDataWrapper extends WeakReference> { + private class JDataWrapper extends WeakReference { private static final Cleaner CLEANER = Cleaner.create(); - public JDataWrapper(JDataVersionedWrapper referent) { + public JDataWrapper(JDataVersionedWrapper referent) { super(referent); var key = referent.data().key(); CLEANER.register(referent, () -> { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java new file mode 100644 index 00000000..93305583 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java @@ -0,0 +1,73 @@ +package com.usatiuk.dhfs.objects; + +import org.apache.commons.lang3.tuple.Pair; + +import java.util.List; +import java.util.NoSuchElementException; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.stream.Stream; + +public class MergingKvIterator, V> implements CloseableKvIterator { + private final List> _iterators; + private final SortedMap> _sortedIterators = new TreeMap<>(); + + public MergingKvIterator(List> iterators) { + _iterators = iterators; + + for (CloseableKvIterator iterator : iterators) { + if (!iterator.hasNext()) { + continue; + } + K key = iterator.peekNextKey(); + if (key != null) { + _sortedIterators.put(key, iterator); + } + } + } + + @SafeVarargs + public MergingKvIterator(CloseableKvIterator... iterators) { + this(List.of(iterators)); + } + + @SafeVarargs + public MergingKvIterator(MergingKvIterator parent, CloseableKvIterator... iterators) { + this(Stream.concat(parent._iterators.stream(), Stream.of(iterators)).toList()); + } + + @Override + public K peekNextKey() { + var cur = _sortedIterators.pollFirstEntry(); + if (cur == null) { + throw new NoSuchElementException(); + } + return cur.getKey(); + } + + @Override + public void close() { + for (CloseableKvIterator iterator : _iterators) { + iterator.close(); + } + } + + @Override + public boolean hasNext() { + return !_sortedIterators.isEmpty(); + } + + @Override + public Pair next() { + var cur = _sortedIterators.pollFirstEntry(); + if (cur == null) { + throw new NoSuchElementException(); + } + var curVal = cur.getValue().next(); + if (cur.getValue().hasNext()) { + var nextKey = cur.getValue().peekNextKey(); + _sortedIterators.put(nextKey, cur.getValue()); + } + return curVal; + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java new file mode 100644 index 00000000..ac224347 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java @@ -0,0 +1,63 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.*; + +public class NavigableMapKvIterator, V> implements CloseableKvIterator { + private final Iterator> _iterator; + private Map.Entry _next; + + public NavigableMapKvIterator(NavigableMap map, IteratorStart start, K key) { + SortedMap _view; + switch (start) { + case GE -> _view = map.tailMap(key, true); + case GT -> _view = map.tailMap(key, false); + case LE -> { + var tail = map.tailMap(key, true); + if (tail.firstKey().equals(key)) _view = tail; + else _view = map.tailMap(map.lowerKey(key), true); + } + case LT -> _view = map.tailMap(map.lowerKey(key), true); + default -> throw new IllegalArgumentException("Unknown start type"); + } + _iterator = _view.entrySet().iterator(); + fillNext(); + } + + private void fillNext() { + while (_iterator.hasNext() && _next == null) { + _next = _iterator.next(); + } + } + + @Override + public K peekNextKey() { + if (_next == null) { + throw new NoSuchElementException(); + } + return _next.getKey(); + } + + @Override + public void close() { + } + + @Override + public boolean hasNext() { + return _next != null; + } + + @Override + public Pair next() { + if (_next == null) { + throw new NoSuchElementException("No more elements"); + } + var ret = _next; + _next = null; + fillNext(); + return Pair.of(ret); + } + +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java new file mode 100644 index 00000000..4f2651c4 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java @@ -0,0 +1,56 @@ +package com.usatiuk.dhfs.objects; + +import org.apache.commons.lang3.tuple.Pair; + +import java.util.NoSuchElementException; +import java.util.function.Function; + +public class PredicateKvIterator, V, V_T> implements CloseableKvIterator { + private final CloseableKvIterator _backing; + private final Function _transformer; + private Pair _next; + + public PredicateKvIterator(CloseableKvIterator backing, Function transformer) { + _backing = backing; + _transformer = transformer; + fillNext(); + } + + private void fillNext() { + while (_backing.hasNext() && _next == null) { + var next = _backing.next(); + var transformed = _transformer.apply(next.getValue()); + if (transformed == null) + continue; + _next = Pair.of(next.getKey(), transformed); + } + } + + @Override + public K peekNextKey() { + if (_next == null) + throw new NoSuchElementException(); + return _next.getKey(); + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public boolean hasNext() { + return _next != null; + } + + @Override + public Pair next() { + if (_next == null) + throw new NoSuchElementException("No more elements"); + var ret = _next; + _next = null; + fillNext(); + return ret; + } + +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java index a31dc61d..8068e262 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java @@ -3,7 +3,7 @@ package com.usatiuk.dhfs.objects; public interface TxBundle { long getId(); - void commit(JDataVersionedWrapper obj); + void commit(JDataVersionedWrapper obj); void delete(JObjectKey obj); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java index 27ac10e4..8723d4ee 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java @@ -1,5 +1,7 @@ package com.usatiuk.dhfs.objects; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; + import java.util.Collection; import java.util.Optional; @@ -13,6 +15,7 @@ public interface TxWriteback { void fence(long bundleId); Optional getPendingWrite(JObjectKey key); + Collection getPendingWrites(); // Executes callback after bundle with bundleId id has been persisted @@ -23,9 +26,15 @@ public interface TxWriteback { long bundleId(); } - record PendingWrite(JDataVersionedWrapper data, long bundleId) implements PendingWriteEntry { + record PendingWrite(JDataVersionedWrapper data, long bundleId) implements PendingWriteEntry { } record PendingDelete(JObjectKey key, long bundleId) implements PendingWriteEntry { } + + CloseableKvIterator getIterator(IteratorStart start, JObjectKey key); + + default CloseableKvIterator getIterator(JObjectKey key) { + return getIterator(IteratorStart.GE, key); + } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java index 5f40d65d..4c6676c8 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java @@ -1,6 +1,7 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.persistence.CachingObjectPersistentStore; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; import com.usatiuk.dhfs.objects.persistence.TxManifestObj; import io.quarkus.logging.Log; import io.quarkus.runtime.ShutdownEvent; @@ -14,7 +15,7 @@ import org.apache.commons.lang3.tuple.Pair; import org.eclipse.microprofile.config.inject.ConfigProperty; import java.util.*; -import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -24,7 +25,7 @@ import java.util.concurrent.atomic.AtomicLong; public class TxWritebackImpl implements TxWriteback { private final LinkedList _pendingBundles = new LinkedList<>(); - private final ConcurrentHashMap _pendingWrites = new ConcurrentHashMap<>(); + private final ConcurrentSkipListMap _pendingWrites = new ConcurrentSkipListMap<>(); private final LinkedHashMap _notFlushedBundles = new LinkedHashMap<>(); private final Object _flushWaitSynchronizer = new Object(); @@ -37,7 +38,6 @@ public class TxWritebackImpl implements TxWriteback { long sizeLimit; private long currentSize = 0; private ExecutorService _writebackExecutor; - private ExecutorService _commitExecutor; private ExecutorService _statusExecutor; private volatile boolean _ready = false; @@ -51,21 +51,13 @@ public class TxWritebackImpl implements TxWriteback { _writebackExecutor.submit(this::writeback); } - { - BasicThreadFactory factory = new BasicThreadFactory.Builder() - .namingPattern("writeback-commit-%d") - .build(); - - _commitExecutor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(), factory); - } _statusExecutor = Executors.newSingleThreadExecutor(); _statusExecutor.submit(() -> { try { while (true) { Thread.sleep(1000); if (currentSize > 0) - Log.info("Tx commit status: size=" - + currentSize / 1024 / 1024 + "MB"); + Log.info("Tx commit status: size=" + currentSize / 1024 / 1024 + "MB"); } } catch (InterruptedException ignored) { } @@ -111,12 +103,12 @@ public class TxWritebackImpl implements TxWriteback { } } - var toWrite = new ArrayList>>(); + var toWrite = new ArrayList>(); var toDelete = new ArrayList(); for (var e : bundle._entries.values()) { switch (e) { - case TxBundleImpl.CommittedEntry(JObjectKey key, JDataVersionedWrapper data, int size) -> { + case TxBundleImpl.CommittedEntry(JObjectKey key, JDataVersionedWrapper data, int size) -> { Log.trace("Writing new " + key); toWrite.add(Pair.of(key, data)); } @@ -336,7 +328,7 @@ public class TxWritebackImpl implements TxWriteback { } @Override - public void commit(JDataVersionedWrapper obj) { + public void commit(JDataVersionedWrapper obj) { synchronized (_entries) { _entries.put(obj.data().key(), new CommittedEntry(obj.data().key(), obj, obj.data().estimateSize())); } @@ -371,7 +363,7 @@ public class TxWritebackImpl implements TxWriteback { int size(); } - private record CommittedEntry(JObjectKey key, JDataVersionedWrapper data, int size) + private record CommittedEntry(JObjectKey key, JDataVersionedWrapper data, int size) implements BundleEntry { } @@ -383,4 +375,18 @@ public class TxWritebackImpl implements TxWriteback { } } } + + // Returns an iterator with a view of all commited objects + // Does not have to guarantee consistent view, snapshots are handled by upper layers + @Override + public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { + return new PredicateKvIterator<>( + new NavigableMapKvIterator<>(_pendingWrites, start, key), + e -> { + if (e instanceof PendingWrite pw) { + return pw.data(); + } + return null; + }); + } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java index 48f7265a..4fd115c0 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java @@ -1,6 +1,7 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.persistence.CachingObjectPersistentStore; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; import com.usatiuk.dhfs.objects.transaction.TxRecord; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; @@ -34,7 +35,7 @@ public class WritebackObjectPersistentStore { } @Nonnull - Optional> readObject(JObjectKey name) { + Optional readObject(JObjectKey name) { var pending = txWriteback.getPendingWrite(name).orElse(null); return switch (pending) { case TxWriteback.PendingWrite write -> Optional.of(write.data()); @@ -51,7 +52,7 @@ public class WritebackObjectPersistentStore { switch (action) { case TxRecord.TxObjectRecordWrite write -> { Log.trace("Flushing object " + write.key()); - bundle.commit(new JDataVersionedWrapper<>(write.data(), id)); + bundle.commit(new JDataVersionedWrapper(write.data(), id)); } case TxRecord.TxObjectRecordDeleted deleted -> { Log.trace("Deleting object " + deleted.key()); @@ -74,4 +75,14 @@ public class WritebackObjectPersistentStore { return r -> txWriteback.asyncFence(bundleId, r); } + + // Returns an iterator with a view of all commited objects + // Does not have to guarantee consistent view, snapshots are handled by upper layers + public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { + return new MergingKvIterator<>(delegate.getIterator(start, key), txWriteback.getIterator(start, key)); + } + + public CloseableKvIterator getIterator(JObjectKey key) { + return getIterator(IteratorStart.GE, key); + } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java index 127cdde7..60afe37a 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java @@ -1,7 +1,6 @@ package com.usatiuk.dhfs.objects.persistence; -import com.usatiuk.dhfs.objects.JDataVersionedWrapper; -import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.*; import com.usatiuk.dhfs.utils.DataLocker; import io.quarkus.logging.Log; import io.quarkus.runtime.Startup; @@ -14,6 +13,7 @@ import javax.annotation.Nonnull; import java.util.Collection; import java.util.LinkedHashMap; import java.util.Optional; +import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.stream.Stream; @@ -21,6 +21,7 @@ import java.util.stream.Stream; @ApplicationScoped public class CachingObjectPersistentStore { private final LinkedHashMap _cache = new LinkedHashMap<>(8, 0.75f, true); + private final ConcurrentSkipListMap _sortedCache = new ConcurrentSkipListMap<>(); private final DataLocker _locker = new DataLocker(); @Inject SerializingObjectPersistentStore delegate; @@ -57,17 +58,20 @@ public class CachingObjectPersistentStore { return delegate.findAllObjects(); } - private void put(JObjectKey key, Optional> obj) { + private void put(JObjectKey key, Optional obj) { synchronized (_cache) { int size = obj.map(o -> o.data().estimateSize()).orElse(0); _curSize += size; - var old = _cache.putLast(key, new CacheEntry(obj, size)); + var entry = new CacheEntry(obj, size); + var old = _cache.putLast(key, entry); + _sortedCache.put(key, entry); if (old != null) _curSize -= old.size(); while (_curSize >= sizeLimit) { var del = _cache.pollFirstEntry(); + _sortedCache.remove(del.getKey(), del.getValue()); _curSize -= del.getValue().size(); _evict++; } @@ -75,7 +79,7 @@ public class CachingObjectPersistentStore { } @Nonnull - public Optional> readObject(JObjectKey name) { + public Optional readObject(JObjectKey name) { try (var lock = _locker.lock(name)) { synchronized (_cache) { var got = _cache.get(name); @@ -90,7 +94,7 @@ public class CachingObjectPersistentStore { } } - public void commitTx(TxManifestObj> names) { + public void commitTx(TxManifestObj names) { // During commit, readObject shouldn't be called for these items, // it should be handled by the upstream store synchronized (_cache) { @@ -98,11 +102,27 @@ public class CachingObjectPersistentStore { names.deleted().stream()).toList()) { _curSize -= Optional.ofNullable(_cache.get(key)).map(CacheEntry::size).orElse(0L); _cache.remove(key); + _sortedCache.remove(key); } } delegate.commitTx(names); } - private record CacheEntry(Optional> object, long size) { + // Returns an iterator with a view of all commited objects + // Does not have to guarantee consistent view, snapshots are handled by upper layers + public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { + return new MergingKvIterator<>( + new PredicateKvIterator<>( + new NavigableMapKvIterator<>(_sortedCache, start, key), + e -> e.object().orElse(null) + ), + delegate.getIterator(start, key)); + } + + public CloseableKvIterator getIterator(JObjectKey key) { + return getIterator(IteratorStart.GE, key); + } + + private record CacheEntry(Optional object, long size) { } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java deleted file mode 100644 index b668534c..00000000 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/FileObjectPersistentStore.java +++ /dev/null @@ -1,308 +0,0 @@ -package com.usatiuk.dhfs.objects.persistence; - -import com.google.protobuf.ByteString; -import com.google.protobuf.UnsafeByteOperations; -import com.usatiuk.dhfs.objects.JObjectKey; -import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer; -import com.usatiuk.dhfs.utils.ByteUtils; -import com.usatiuk.dhfs.utils.SerializationHelper; -import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; -import io.grpc.Status; -import io.quarkus.arc.properties.IfBuildProperty; -import io.quarkus.logging.Log; -import io.quarkus.runtime.ShutdownEvent; -import io.quarkus.runtime.StartupEvent; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import net.openhft.hashing.LongHashFunction; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import javax.annotation.Nonnull; -import java.io.*; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.nio.file.Files; -import java.nio.file.NoSuchFileException; -import java.nio.file.Path; -import java.util.*; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.stream.Stream; - -import static java.nio.file.StandardCopyOption.ATOMIC_MOVE; -import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; - -// File format: -// 64-bit metadata serialized size -// 64-bit offset of "rest of" metadata (if -1 then file has no data, -// if 0 then file has data and metadata fits into META_BLOCK_SIZE) -// Until META_BLOCK_SIZE - metadata (encoded as ObjectMetadataP) -// data (encoded as JObjectDataP) -// rest of metadata - -@ApplicationScoped -@IfBuildProperty(name = "dhfs.objects.persistence", stringValue = "files") -public class FileObjectPersistentStore implements ObjectPersistentStore { - private final Path _root; - private final Path _txManifest; - private ExecutorService _flushExecutor; - private RandomAccessFile _txFile; - private boolean _ready = false; - - public FileObjectPersistentStore(@ConfigProperty(name = "dhfs.objects.persistence.files.root") String root) { - this._root = Path.of(root).resolve("objects"); - _txManifest = Path.of(root).resolve("cur-tx-manifest"); - } - - void init(@Observes @Priority(100) StartupEvent event) throws IOException { - if (!_root.toFile().exists()) { - Log.info("Initializing with root " + _root); - _root.toFile().mkdirs(); - for (int i = 0; i < 256; i++) { - _root.resolve(String.valueOf(i)).toFile().mkdirs(); - } - } - if (!Files.exists(_txManifest)) { - Files.createFile(_txManifest); - } - _txFile = new RandomAccessFile(_txManifest.toFile(), "rw"); - _flushExecutor = Executors.newVirtualThreadPerTaskExecutor(); - - tryReplay(); - Log.info("Transaction replay done"); - _ready = true; - } - - void shutdown(@Observes @Priority(900) ShutdownEvent event) throws IOException { - _ready = false; - Log.debug("Deleting manifest file"); - _txFile.close(); - Files.delete(_txManifest); - Log.debug("Manifest file deleted"); - } - - private void verifyReady() { - if (!_ready) throw new IllegalStateException("Wrong service order!"); - } - - private void tryReplay() { - var read = readTxManifest(); - if (read != null) - commitTxImpl(read, false); - } - - private Path getObjPath(@Nonnull JObjectKey obj) { - int h = Objects.hash(obj); - int p1 = h & 0b00000000_00000000_11111111_00000000; - return _root.resolve(String.valueOf(p1 >> 8)).resolve(obj.toString()); - } - - private Path getTmpObjPath(@Nonnull JObjectKey obj) { - int h = Objects.hash(obj); - int p1 = h & 0b00000000_00000000_11111111_00000000; - return _root.resolve(String.valueOf(p1 >> 8)).resolve(obj + ".tmp"); - } - - private void findAllObjectsImpl(Collection out, Path path) { - var read = path.toFile().listFiles(); - if (read == null) return; - - for (var s : read) { - if (s.isDirectory()) { - findAllObjectsImpl(out, s.toPath()); - } else { - if (s.getName().endsWith(".tmp")) continue; // FIXME: - out.add(new JObjectKey(s.getName())); // FIXME: - } - } - } - - @Nonnull - @Override - public Collection findAllObjects() { - verifyReady(); - ArrayList out = new ArrayList<>(); - findAllObjectsImpl(out, _root); - return Collections.unmodifiableCollection(out); - } - - @Nonnull - @Override - public Optional readObject(JObjectKey name) { - verifyReady(); - var path = getObjPath(name); - try (var rf = new RandomAccessFile(path.toFile(), "r")) { - ByteBuffer buf = UninitializedByteBuffer.allocateUninitialized(Math.toIntExact(rf.getChannel().size())); - fillBuffer(buf, rf.getChannel()); - buf.flip(); - - var bs = UnsafeByteOperations.unsafeWrap(buf); - // This way, the input will be considered "immutable" which would allow avoiding copies - // when parsing byte arrays -// var ch = bs.newCodedInput(); -// ch.enableAliasing(true); - return Optional.of(bs); - } catch (EOFException | FileNotFoundException | NoSuchFileException fx) { - return Optional.empty(); - } catch (IOException e) { - Log.error("Error reading file " + path, e); - throw new StatusRuntimeExceptionNoStacktrace(Status.INTERNAL); - } - } - - private void fillBuffer(ByteBuffer dst, FileChannel src) throws IOException { - int rem = dst.remaining(); - int readTotal = 0; - int readCur = 0; - while (readTotal < rem && (readCur = src.read(dst)) != -1) { - readTotal += readCur; - } - if (rem != readTotal) - throw new EOFException(); - } - - private void writeObjectImpl(Path path, ByteString data, boolean sync) throws IOException { - try (var fsb = new FileOutputStream(path.toFile(), false)) { - data.writeTo(fsb); - - if (sync) { - fsb.flush(); - fsb.getFD().sync(); - } - } - } - - private TxManifestRaw readTxManifest() { - try { - var channel = _txFile.getChannel(); - - if (channel.size() == 0) - return null; - - channel.position(0); - - var buf = ByteBuffer.allocate(Math.toIntExact(channel.size())); - - fillBuffer(buf, channel); - buf.flip(); - - long checksum = buf.getLong(); - var data = buf.slice(); - var hash = LongHashFunction.xx3().hashBytes(data); - - if (hash != checksum) - throw new StatusRuntimeExceptionNoStacktrace(Status.DATA_LOSS.withDescription("Transaction manifest checksum mismatch!")); - - return SerializationHelper.deserialize(data.array(), data.arrayOffset()); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - private void putTxManifest(TxManifestRaw manifest) { - try { - var channel = _txFile.getChannel(); - var data = SerializationHelper.serializeArray(manifest); - channel.truncate(data.length + 8); - channel.position(0); - var hash = LongHashFunction.xx3().hashBytes(data); - if (channel.write(ByteUtils.longToBb(hash)) != 8) - throw new StatusRuntimeExceptionNoStacktrace(Status.INTERNAL); - if (channel.write(ByteBuffer.wrap(data)) != data.length) - throw new StatusRuntimeExceptionNoStacktrace(Status.INTERNAL); - channel.force(true); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - @Override - public void commitTx(TxManifestRaw manifest) { - verifyReady(); - try { - _flushExecutor.invokeAll( - manifest.written().stream().map(p -> (Callable) () -> { - var tmpPath = getTmpObjPath(p.getKey()); - writeObjectImpl(tmpPath, p.getValue(), true); - return null; - }).toList() - ).forEach(p -> { - try { - p.get(); - } catch (InterruptedException | ExecutionException e) { - throw new RuntimeException(e); - } - }); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - commitTxImpl(manifest, true); - } - - public void commitTxImpl(TxManifestRaw manifest, boolean failIfNotFound) { - if (manifest.deleted().isEmpty() && manifest.written().isEmpty()) { - Log.debug("Empty manifest, skipping"); - return; - } - - putTxManifest(manifest); - - try { - _flushExecutor.invokeAll( - Stream.concat(manifest.written().stream().map(p -> (Callable) () -> { - try { - Files.move(getTmpObjPath(p.getKey()), getObjPath(p.getKey()), ATOMIC_MOVE, REPLACE_EXISTING); - } catch (NoSuchFileException n) { - if (failIfNotFound) - throw n; - } - return null; - }), - manifest.deleted().stream().map(p -> (Callable) () -> { - deleteImpl(getObjPath(p)); - return null; - })).toList() - ).forEach(p -> { - try { - p.get(); - } catch (InterruptedException | ExecutionException e) { - throw new RuntimeException(e); - } - }); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - - private void deleteImpl(Path path) { - try { - Files.delete(path); - } catch (NoSuchFileException ignored) { - } catch (IOException e) { - Log.error("Error deleting file " + path, e); - throw new StatusRuntimeExceptionNoStacktrace(Status.INTERNAL); - } - } - - @Override - public long getTotalSpace() { - verifyReady(); - return _root.toFile().getTotalSpace(); - } - - @Override - public long getFreeSpace() { - verifyReady(); - return _root.toFile().getFreeSpace(); - } - - @Override - public long getUsableSpace() { - verifyReady(); - return _root.toFile().getUsableSpace(); - } - -} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/IteratorStart.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/IteratorStart.java new file mode 100644 index 00000000..6dd270c9 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/IteratorStart.java @@ -0,0 +1,8 @@ +package com.usatiuk.dhfs.objects.persistence; + +public enum IteratorStart { + LT, + LE, + GT, + GE +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java new file mode 100644 index 00000000..05e77dfa --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java @@ -0,0 +1,201 @@ +package com.usatiuk.dhfs.objects.persistence; + +import com.google.protobuf.ByteString; +import com.usatiuk.dhfs.objects.CloseableKvIterator; +import com.usatiuk.dhfs.objects.JObjectKey; +import io.quarkus.arc.properties.IfBuildProperty; +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import org.apache.commons.lang3.tuple.Pair; +import org.eclipse.microprofile.config.inject.ConfigProperty; +import org.lmdbjava.*; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.util.Collection; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Optional; + +import static org.lmdbjava.DbiFlags.MDB_CREATE; +import static org.lmdbjava.Env.create; + +@ApplicationScoped +@IfBuildProperty(name = "dhfs.objects.persistence", stringValue = "lmdb") +public class LmdbObjectPersistentStore implements ObjectPersistentStore { + private final Path _root; + private Env _env; + private Dbi _db; + private boolean _ready = false; + + private static final String DB_NAME = "objects"; + + public LmdbObjectPersistentStore(@ConfigProperty(name = "dhfs.objects.persistence.files.root") String root) { + _root = Path.of(root).resolve("objects"); + } + + void init(@Observes @Priority(100) StartupEvent event) throws IOException { + if (!_root.toFile().exists()) { + Log.info("Initializing with root " + _root); + _root.toFile().mkdirs(); + } + _env = create(ByteArrayProxy.PROXY_BA) + .setMapSize(1_000_000_000_000L) + .setMaxDbs(1) + .open(_root.toFile(), EnvFlags.MDB_NOTLS); + _db = _env.openDbi(DB_NAME, MDB_CREATE); + _ready = true; + } + + void shutdown(@Observes @Priority(900) ShutdownEvent event) throws IOException { + _ready = false; + _db.close(); + _env.close(); + } + + private void verifyReady() { + if (!_ready) throw new IllegalStateException("Wrong service order!"); + } + + @Nonnull + @Override + public Collection findAllObjects() { +// try (Txn txn = env.txnRead()) { +// try (var cursor = db.openCursor(txn)) { +// var keys = List.of(); +// while (cursor.next()) { +// keys.add(JObjectKey.fromBytes(cursor.key())); +// } +// return keys; +// } +// } + return List.of(); + } + + @Nonnull + @Override + public Optional readObject(JObjectKey name) { + verifyReady(); + try (Txn txn = _env.txnRead()) { + var key = name.toString().getBytes(StandardCharsets.UTF_8); + var value = _db.get(txn, key); + return Optional.ofNullable(value).map(ByteString::copyFrom); + } + } + + private class LmdbKvIterator implements CloseableKvIterator { + private final Txn _txn = _env.txnRead(); + private final Cursor _cursor = _db.openCursor(_txn); + private boolean _hasNext = false; + + LmdbKvIterator(IteratorStart start, JObjectKey key) { + verifyReady(); + if (!_cursor.get(key.toString().getBytes(StandardCharsets.UTF_8), GetOp.MDB_SET_RANGE)) { + return; + } + + var got = JObjectKey.fromBytes(_cursor.key()); + var cmp = got.compareTo(key); + + assert cmp >= 0; + + _hasNext = true; + + if (cmp == 0) { + switch (start) { + case LT -> { + _hasNext = _cursor.prev(); + } + case GT -> { + _hasNext = _cursor.next(); + } + case LE, GE -> { + } + } + } else { + switch (start) { + case LT, LE -> { + _hasNext = _cursor.prev(); + } + case GT, GE -> { + } + } + } + } + + @Override + public void close() { + _cursor.close(); + _txn.close(); + } + + @Override + public boolean hasNext() { + return _hasNext; + } + + @Override + public Pair next() { + if (!_hasNext) { + throw new NoSuchElementException("No more elements"); + } + var ret = Pair.of(JObjectKey.fromBytes(_cursor.key()), ByteString.copyFrom(_cursor.val())); + _hasNext = _cursor.next(); + return ret; + } + + @Override + public JObjectKey peekNextKey() { + if (!_hasNext) { + throw new NoSuchElementException("No more elements"); + } + return JObjectKey.fromBytes(_cursor.key()); + } + } + + @Override + public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { + return new LmdbKvIterator(start, key); + } + + @Override + public void commitTx(TxManifestRaw names) { + verifyReady(); + try (Txn txn = _env.txnWrite()) { + for (var written : names.written()) { + var key = written.getKey().toString().getBytes(StandardCharsets.UTF_8); + _db.put(txn, key, written.getValue().toByteArray()); + } + for (JObjectKey key : names.deleted()) { + var keyBytes = key.toString().getBytes(StandardCharsets.UTF_8); + _db.delete(txn, keyBytes); + } + txn.commit(); + } + } + + @Override + public long getTotalSpace() { + verifyReady(); + return _root.toFile().getTotalSpace(); + } + + @Override + public long getFreeSpace() { + verifyReady(); + return _root.toFile().getFreeSpace(); + } + + @Override + public long getUsableSpace() { + verifyReady(); + return _root.toFile().getUsableSpace(); + } + +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java index cc7bd59e..7bba672a 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java @@ -1,20 +1,21 @@ package com.usatiuk.dhfs.objects.persistence; import com.google.protobuf.ByteString; +import com.usatiuk.dhfs.objects.CloseableKvIterator; import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.NavigableMapKvIterator; import io.quarkus.arc.properties.IfBuildProperty; import jakarta.enterprise.context.ApplicationScoped; import javax.annotation.Nonnull; import java.util.Collection; -import java.util.HashMap; -import java.util.Map; import java.util.Optional; +import java.util.concurrent.ConcurrentSkipListMap; @ApplicationScoped @IfBuildProperty(name = "dhfs.objects.persistence", stringValue = "memory") public class MemoryObjectPersistentStore implements ObjectPersistentStore { - private final Map _objects = new HashMap<>(); + private final ConcurrentSkipListMap _objects = new ConcurrentSkipListMap<>(); @Nonnull @Override @@ -32,6 +33,11 @@ public class MemoryObjectPersistentStore implements ObjectPersistentStore { } } + @Override + public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { + return new NavigableMapKvIterator<>(_objects, start, key); + } + @Override public void commitTx(TxManifestRaw names) { synchronized (this) { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java index 19fe5d42..3467007b 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java @@ -1,6 +1,7 @@ package com.usatiuk.dhfs.objects.persistence; import com.google.protobuf.ByteString; +import com.usatiuk.dhfs.objects.CloseableKvIterator; import com.usatiuk.dhfs.objects.JObjectKey; import javax.annotation.Nonnull; @@ -16,6 +17,14 @@ public interface ObjectPersistentStore { @Nonnull Optional readObject(JObjectKey name); + // Returns an iterator with a view of all commited objects + // Does not have to guarantee consistent view, snapshots are handled by upper layers + CloseableKvIterator getIterator(IteratorStart start, JObjectKey key); + + default CloseableKvIterator getIterator(JObjectKey key) { + return getIterator(IteratorStart.GE, key); + } + void commitTx(TxManifestRaw names); long getTotalSpace(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java index 99abf09c..6c339d03 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java @@ -1,5 +1,7 @@ package com.usatiuk.dhfs.objects.persistence; +import com.google.protobuf.ByteString; +import com.usatiuk.dhfs.objects.CloseableKvIterator; import com.usatiuk.dhfs.objects.JDataVersionedWrapper; import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.ObjectSerializer; @@ -17,20 +19,59 @@ public class SerializingObjectPersistentStore { ObjectSerializer serializer; @Inject - ObjectPersistentStore delegate; + ObjectPersistentStore delegateStore; @Nonnull Collection findAllObjects() { - return delegate.findAllObjects(); + return delegateStore.findAllObjects(); } @Nonnull - Optional> readObject(JObjectKey name) { - return delegate.readObject(name).map(serializer::deserialize); + Optional readObject(JObjectKey name) { + return delegateStore.readObject(name).map(serializer::deserialize); } - void commitTx(TxManifestObj> names) { - delegate.commitTx(new TxManifestRaw( + private class SerializingKvIterator implements CloseableKvIterator { + private final CloseableKvIterator _delegate; + + private SerializingKvIterator(IteratorStart start, JObjectKey key) { + _delegate = delegateStore.getIterator(start, key); + } + + @Override + public JObjectKey peekNextKey() { + return _delegate.peekNextKey(); + } + + @Override + public void close() { + _delegate.close(); + } + + @Override + public boolean hasNext() { + return _delegate.hasNext(); + } + + @Override + public Pair next() { + var next = _delegate.next(); + return Pair.of(next.getKey(), serializer.deserialize(next.getValue())); + } + } + + // Returns an iterator with a view of all commited objects + // Does not have to guarantee consistent view, snapshots are handled by upper layers + public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { + return new SerializingKvIterator(start, key); + } + + public CloseableKvIterator getIterator(JObjectKey key) { + return getIterator(IteratorStart.GE, key); + } + + void commitTx(TxManifestObj names) { + delegateStore.commitTx(new TxManifestRaw( names.written().stream() .map(e -> Pair.of(e.getKey(), serializer.serialize(e.getValue()))) .toList() diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index 629d8a67..cb5a4ccb 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -75,8 +75,8 @@ public class TransactionFactoryImpl implements TransactionFactory { } return switch (strategy) { - case OPTIMISTIC -> _source.get(type, key).data().map(JDataVersionedWrapper::data); - case WRITE -> _source.getWriteLocked(type, key).data().map(JDataVersionedWrapper::data); + case OPTIMISTIC -> (Optional) _source.get(type, key).data().map(JDataVersionedWrapper::data); + case WRITE -> (Optional) _source.getWriteLocked(type, key).data().map(JDataVersionedWrapper::data); }; } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java index 5404245a..05826900 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObject.java @@ -6,5 +6,5 @@ import com.usatiuk.dhfs.objects.JDataVersionedWrapper; import java.util.Optional; public interface TransactionObject { - Optional> data(); + Optional data(); } diff --git a/dhfs-parent/objects/src/main/resources/application.properties b/dhfs-parent/objects/src/main/resources/application.properties index f7842d0c..93211847 100644 --- a/dhfs-parent/objects/src/main/resources/application.properties +++ b/dhfs-parent/objects/src/main/resources/application.properties @@ -1,4 +1,4 @@ -dhfs.objects.persistence=files +dhfs.objects.persistence=lmdb dhfs.objects.writeback.limit=134217728 dhfs.objects.lru.limit=134217728 dhfs.objects.lru.print-stats=true From c60a55b9152d1c06fc13455f375fb58b832312f5 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sat, 22 Feb 2025 17:22:58 +0100 Subject: [PATCH 075/105] somewhat working transactions 3.0 --- dhfs-parent/objects/pom.xml | 4 + .../dhfs/objects/CurrentTransaction.java | 14 +- .../dhfs/objects/JDataVersionedWrapper.java | 3 - .../usatiuk/dhfs/objects/JObjectManager.java | 273 ++++++------------ .../com/usatiuk/dhfs/objects/LockManager.java | 14 + .../dhfs/objects/MergingKvIterator.java | 57 ++-- .../usatiuk/dhfs/objects/SnapshotManager.java | 248 ++++++++++++++++ .../objects/TombstoneMergingKvIterator.java | 54 ++++ .../dhfs/objects/TransactionManagerImpl.java | 2 + .../dhfs/objects/TransactionObjectLocked.java | 11 + .../dhfs/objects/TransactionObjectNoLock.java | 10 + .../com/usatiuk/dhfs/objects/TxWriteback.java | 4 +- .../usatiuk/dhfs/objects/TxWritebackImpl.java | 11 +- .../WritebackObjectPersistentStore.java | 25 +- .../CachingObjectPersistentStore.java | 2 +- .../objects/persistence/IteratorStart.java | 2 +- .../LmdbObjectPersistentStore.java | 16 + .../transaction/ReadTrackingObjectSource.java | 58 ---- .../ReadTrackingObjectSourceFactory.java | 111 +++++++ .../ReadTrackingTransactionObjectSource.java | 25 ++ .../dhfs/objects/transaction/Transaction.java | 12 +- .../transaction/TransactionFactory.java | 2 +- .../transaction/TransactionFactoryImpl.java | 80 +++-- .../transaction/TransactionHandle.java | 2 - .../transaction/TransactionObjectSource.java | 10 - .../transaction/TransactionPrivate.java | 8 +- .../dhfs/objects/MergingKvIteratorTest.java | 105 +++++++ .../com/usatiuk/dhfs/objects/ObjectsTest.java | 11 +- .../dhfs/objects/RemoteTransaction.java | 4 - .../com/usatiuk/dhfs/utils/DataLocker.java | 2 + 30 files changed, 826 insertions(+), 354 deletions(-) create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/LockManager.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionObjectLocked.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionObjectNoLock.java delete mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSource.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingTransactionObjectSource.java delete mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java diff --git a/dhfs-parent/objects/pom.xml b/dhfs-parent/objects/pom.xml index 8f4c33ff..d5dc8f9b 100644 --- a/dhfs-parent/objects/pom.xml +++ b/dhfs-parent/objects/pom.xml @@ -69,6 +69,10 @@ lmdbjava 0.9.1 + + org.apache.commons + commons-collections4 + diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java index 71ac9fa4..d3ef24ca 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java @@ -1,13 +1,15 @@ package com.usatiuk.dhfs.objects; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; import com.usatiuk.dhfs.objects.transaction.LockingStrategy; import com.usatiuk.dhfs.objects.transaction.Transaction; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; +import org.apache.commons.lang3.tuple.Pair; import javax.annotation.Nonnull; import java.util.Collection; -import java.util.List; +import java.util.Iterator; import java.util.Optional; @ApplicationScoped @@ -15,11 +17,6 @@ public class CurrentTransaction implements Transaction { @Inject TransactionManager transactionManager; - @Override - public long getId() { - return transactionManager.current().getId(); - } - @Override public void onCommit(Runnable runnable) { transactionManager.current().onCommit(runnable); @@ -46,6 +43,11 @@ public class CurrentTransaction implements Transaction { return transactionManager.current().findAllObjects(); } + @Override + public Iterator> getIterator(IteratorStart start, JObjectKey key) { + return transactionManager.current().getIterator(start, key); + } + @Override public void put(JData obj) { transactionManager.current().put(obj); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java index facba141..d1aaddc2 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataVersionedWrapper.java @@ -5,7 +5,4 @@ import jakarta.annotation.Nonnull; import java.io.Serializable; public record JDataVersionedWrapper(@Nonnull JData data, long version) implements Serializable { - public JDataVersionedWrapper withVersion(long version) { - return new JDataVersionedWrapper(data, version); - } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 0ff1d007..5c0aa18b 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -2,7 +2,6 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.transaction.*; import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; -import com.usatiuk.dhfs.utils.DataLocker; import io.quarkus.logging.Log; import io.quarkus.runtime.StartupEvent; import jakarta.annotation.Priority; @@ -11,10 +10,7 @@ import jakarta.enterprise.event.Observes; import jakarta.enterprise.inject.Instance; import jakarta.inject.Inject; -import java.lang.ref.Cleaner; -import java.lang.ref.WeakReference; import java.util.*; -import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; import java.util.function.Function; @@ -27,21 +23,21 @@ import java.util.stream.Stream; @ApplicationScoped public class JObjectManager { private final List _preCommitTxHooks; - private final DataLocker _objLocker = new DataLocker(); - private final ConcurrentHashMap> _objects = new ConcurrentHashMap<>(); private final AtomicLong _txCounter = new AtomicLong(); private boolean _ready = false; @Inject - WritebackObjectPersistentStore writebackObjectPersistentStore; + SnapshotManager snapshotManager; @Inject TransactionFactory transactionFactory; + @Inject + LockManager lockManager; private void verifyReady() { if (!_ready) throw new IllegalStateException("Wrong service order!"); } void init(@Observes @Priority(200) StartupEvent event) { - var read = writebackObjectPersistentStore.readObject(JDataDummy.TX_ID_OBJ_NAME).orElse(null); + var read = snapshotManager.readObjectDirect(JDataDummy.TX_ID_OBJ_NAME).orElse(null); if (read != null) { _txCounter.set(read.version()); } @@ -52,80 +48,24 @@ public class JObjectManager { _preCommitTxHooks = preCommitTxHooks.stream().sorted(Comparator.comparingInt(PreCommitTxHook::getPriority)).toList(); } - private JDataVersionedWrapper get(Class type, JObjectKey key) { - verifyReady(); - while (true) { - { - var got = _objects.get(key); - - if (got != null) { - var ref = got.get(); - if (ref == null) { - _objects.remove(key, got); - } else if (type.isInstance(ref.data())) { - return (JDataVersionedWrapper) ref; - } else { - throw new IllegalArgumentException("Object type mismatch: " + ref.data().getClass() + " vs " + type); - } - } - } - //noinspection unused - try (var readLock = _objLocker.lock(key)) { - if (_objects.containsKey(key)) continue; - var read = writebackObjectPersistentStore.readObject(key).orElse(null); - - if (read == null) return null; - - if (type.isInstance(read.data())) { - var wrapper = new JDataWrapper<>((JDataVersionedWrapper) read); - var old = _objects.put(key, wrapper); - assert old == null; - return (JDataVersionedWrapper) read; - } else { - throw new IllegalArgumentException("Object type mismatch: " + read.getClass() + " vs " + type); - } - } - } - } - - private TransactionObjectNoLock getObj(Class type, JObjectKey key) { - verifyReady(); - var got = get(type, key); - return new TransactionObjectNoLock<>(Optional.ofNullable(got)); - } - - private TransactionObjectLocked getObjLock(Class type, JObjectKey key) { - verifyReady(); - var lock = _objLocker.lock(key); - var got = get(type, key); - return new TransactionObjectLocked<>(Optional.ofNullable(got), lock); - } - public TransactionPrivate createTransaction() { verifyReady(); - var counter = _txCounter.getAndIncrement(); - Log.trace("Creating transaction " + counter); - return transactionFactory.createTransaction(counter, new TransactionObjectSourceImpl(counter)); + return transactionFactory.createTransaction(_txCounter.get()); } public TransactionHandle commit(TransactionPrivate tx) { verifyReady(); - Log.trace("Committing transaction " + tx.getId()); - // FIXME: Better way? - tx.put(JDataDummy.getInstance()); - - var current = new LinkedHashMap>(); - var dependenciesLocked = new LinkedHashMap>(); - Map> reads; + var writes = new LinkedHashMap>(); + var dependenciesLocked = new LinkedHashMap>(); + Map> readSet; var toUnlock = new ArrayList(); Consumer addDependency = key -> { dependenciesLocked.computeIfAbsent(key, k -> { - var got = getObjLock(JData.class, k); - Log.trace("Adding dependency " + k.toString() + " -> " + got.data().map(JDataVersionedWrapper::data).map(JData::key).orElse(null)); - toUnlock.add(got.lock); - return got; + var lock = lockManager.lockObject(k); + toUnlock.add(lock); + return snapshotManager.readObjectDirect(k); }); }; @@ -135,13 +75,12 @@ public class JObjectManager { try { try { Function getCurrent = - key -> switch (current.get(key)) { + key -> switch (writes.get(key)) { case TxRecord.TxObjectRecordWrite write -> write.data(); case TxRecord.TxObjectRecordDeleted deleted -> null; - case null -> - tx.readSource().get(JData.class, key).data().map(JDataVersionedWrapper::data).orElse(null); + case null -> tx.readSource().get(JData.class, key).orElse(null); default -> { - throw new TxCommitException("Unexpected value: " + current.get(key)); + throw new TxCommitException("Unexpected value: " + writes.get(key)); } }; @@ -177,71 +116,82 @@ public class JObjectManager { } } } - current.putAll(currentIteration); + writes.putAll(currentIteration); } while (somethingChanged); - } finally { - reads = tx.reads(); - Stream.concat(reads.keySet().stream(), current.keySet().stream()) + if (writes.isEmpty()) { + Log.trace("Committing transaction - no changes"); + return new TransactionHandle() { + @Override + public void onFlush(Runnable runnable) { + runnable.run(); + } + }; + } + + Log.trace("Committing transaction start"); + // FIXME: Better way? + addDependency.accept(JDataDummy.TX_ID_OBJ_NAME); + tx.put(JDataDummy.getInstance()); + } finally { + readSet = tx.reads(); + + Stream.concat(readSet.keySet().stream(), writes.keySet().stream()) .sorted(Comparator.comparing(JObjectKey::toString)) .forEach(addDependency); - for (var read : reads.entrySet()) { + for (var read : readSet.entrySet()) { if (read.getValue() instanceof TransactionObjectLocked locked) { - toUnlock.add(locked.lock); + toUnlock.add(locked.lock()); } } } - for (var read : reads.entrySet()) { + var snapshotId = tx.snapshot().id(); + var newId = _txCounter.get() + 1; + + for (var read : readSet.entrySet()) { var dep = dependenciesLocked.get(read.getKey()); - if (dep.data().isEmpty()) { + if (dep.isEmpty() != read.getValue().data().isEmpty()) { Log.trace("Checking read dependency " + read.getKey() + " - not found"); + throw new TxCommitException("Serialization hazard: " + dep.isEmpty() + " vs " + read.getValue().data().isEmpty()); + } + + if (dep.isEmpty()) { + // TODO: Every write gets a dependency due to hooks continue; +// assert false; +// throw new TxCommitException("Serialization hazard: " + dep.isEmpty() + " vs " + read.getValue().data().isEmpty()); } - if (dep.data().orElse(null) != read.getValue().data().orElse(null)) { - Log.trace("Checking dependency " + read.getKey() + " - changed already"); - throw new TxCommitException("Serialization hazard: " + dep.data().get().version() + " vs " + tx.getId()); - } - - if (dep.data().get().version() >= tx.getId()) { - assert false; + if (dep.get().version() > snapshotId) { Log.trace("Checking dependency " + read.getKey() + " - newer than"); - throw new TxCommitException("Serialization hazard: " + dep.data().get().version() + " vs " + tx.getId()); + throw new TxCommitException("Serialization hazard: " + dep.get().version() + " vs " + snapshotId); } Log.trace("Checking dependency " + read.getKey() + " - ok with read"); } - Log.tracef("Flushing transaction %d to storage", tx.getId()); + Log.tracef("Flushing transaction %d to storage", newId); - for (var action : current.entrySet()) { - var dep = dependenciesLocked.get(action.getKey()); - if (dep.data().isPresent() && dep.data.get().version() >= tx.getId()) { - Log.trace("Skipping write " + action.getKey() + " - dependency " + dep.data().get().version() + " vs " + tx.getId()); - continue; - } + var realNewId = _txCounter.getAndIncrement() + 1; + assert realNewId == newId; - switch (action.getValue()) { - case TxRecord.TxObjectRecordWrite write -> { - Log.trace("Writing " + action.getKey()); - var wrapped = new JDataVersionedWrapper(write.data(), tx.getId()); - _objects.put(action.getKey(), new JDataWrapper<>(wrapped)); - } - case TxRecord.TxObjectRecordDeleted deleted -> { - Log.trace("Deleting " + action.getKey()); - _objects.remove(action.getKey()); - } - default -> { - throw new TxCommitException("Unexpected value: " + action.getValue()); - } - } - } - - Log.tracef("Committing transaction %d to storage", tx.getId()); - var addFlushCallback = writebackObjectPersistentStore.commitTx(current.values(), tx.getId()); + Log.tracef("Committing transaction %d to storage", newId); + var addFlushCallback = snapshotManager.commitTx( + writes.values().stream() + .filter(r -> { + if (r instanceof TxRecord.TxObjectRecordWrite(JData data)) { + var dep = dependenciesLocked.get(data.key()); + if (dep.isPresent() && dep.get().version() > snapshotId) { + Log.trace("Skipping write " + data.key() + " - dependency " + dep.get().version() + " vs " + snapshotId); + return false; + } + } + return true; + }).toList(), + newId); for (var callback : tx.getOnCommit()) { callback.run(); @@ -252,11 +202,6 @@ public class JObjectManager { } return new TransactionHandle() { - @Override - public long getId() { - return tx.getId(); - } - @Override public void onFlush(Runnable runnable) { addFlushCallback.accept(runnable); @@ -269,72 +214,44 @@ public class JObjectManager { for (var unlock : toUnlock) { unlock.close(); } + tx.close(); } } public void rollback(TransactionPrivate tx) { verifyReady(); - Log.trace("Rolling back transaction " + tx.getId()); tx.reads().forEach((key, value) -> { if (value instanceof TransactionObjectLocked locked) { - locked.lock.close(); + locked.lock().close(); } }); + tx.close(); } - private record TransactionObjectNoLock - (Optional data) - implements TransactionObject { - } - - private record TransactionObjectLocked - (Optional data, AutoCloseableNoThrow lock) - implements TransactionObject { - } - - private class JDataWrapper extends WeakReference { - private static final Cleaner CLEANER = Cleaner.create(); - - public JDataWrapper(JDataVersionedWrapper referent) { - super(referent); - var key = referent.data().key(); - CLEANER.register(referent, () -> { - _objects.remove(key, this); - }); - } - - @Override - public String toString() { - return "JDataWrapper{" + - "ref=" + get() + - '}'; - } - } - - private class TransactionObjectSourceImpl implements TransactionObjectSource { - private final long _txId; - - private TransactionObjectSourceImpl(long txId) { - _txId = txId; - } - - @Override - public TransactionObject get(Class type, JObjectKey key) { - var got = getObj(type, key); - if (got.data().isPresent() && got.data().get().version() > _txId) { - throw new TxCommitException("Serialization race for " + key + ": " + got.data().get().version() + " vs " + _txId); - } - return got; - } - - @Override - public TransactionObject getWriteLocked(Class type, JObjectKey key) { - var got = getObjLock(type, key); - if (got.data().isPresent() && got.data().get().version() > _txId) { - got.lock().close(); - throw new TxCommitException("Serialization race for " + key + ": " + got.data().get().version() + " vs " + _txId); - } - return got; - } - } + // private class TransactionObjectSourceImpl implements TransactionObjectSource { +// private final long _txId; +// +// private TransactionObjectSourceImpl(long txId) { +// _txId = txId; +// } +// +// @Override +// public TransactionObject get(Class type, JObjectKey key) { +// var got = getObj(type, key); +// if (got.data().isPresent() && got.data().get().version() > _txId) { +// throw new TxCommitException("Serialization race for " + key + ": " + got.data().get().version() + " vs " + _txId); +// } +// return got; +// } +// +// @Override +// public TransactionObject getWriteLocked(Class type, JObjectKey key) { +// var got = getObjLock(type, key); +// if (got.data().isPresent() && got.data().get().version() > _txId) { +// got.lock().close(); +// throw new TxCommitException("Serialization race for " + key + ": " + got.data().get().version() + " vs " + _txId); +// } +// return got; +// } +// } } \ No newline at end of file diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/LockManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/LockManager.java new file mode 100644 index 00000000..8d7ae3d1 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/LockManager.java @@ -0,0 +1,14 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; +import com.usatiuk.dhfs.utils.DataLocker; +import jakarta.enterprise.context.ApplicationScoped; + +@ApplicationScoped +public class LockManager { + private final DataLocker _objLocker = new DataLocker(); + + public AutoCloseableNoThrow lockObject(JObjectKey key) { + return _objLocker.lock(key); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java index 93305583..8a679da8 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java @@ -2,27 +2,22 @@ package com.usatiuk.dhfs.objects; import org.apache.commons.lang3.tuple.Pair; -import java.util.List; -import java.util.NoSuchElementException; -import java.util.SortedMap; -import java.util.TreeMap; -import java.util.stream.Stream; +import java.util.*; public class MergingKvIterator, V> implements CloseableKvIterator { - private final List> _iterators; + private final Map, Integer> _iterators; private final SortedMap> _sortedIterators = new TreeMap<>(); public MergingKvIterator(List> iterators) { - _iterators = iterators; + int counter = 0; + var iteratorsTmp = new HashMap, Integer>(); + for (CloseableKvIterator iterator : iterators) { + iteratorsTmp.put(iterator, counter++); + } + _iterators = Collections.unmodifiableMap(iteratorsTmp); for (CloseableKvIterator iterator : iterators) { - if (!iterator.hasNext()) { - continue; - } - K key = iterator.peekNextKey(); - if (key != null) { - _sortedIterators.put(key, iterator); - } + advanceIterator(iterator); } } @@ -31,23 +26,36 @@ public class MergingKvIterator, V> implements CloseableK this(List.of(iterators)); } - @SafeVarargs - public MergingKvIterator(MergingKvIterator parent, CloseableKvIterator... iterators) { - this(Stream.concat(parent._iterators.stream(), Stream.of(iterators)).toList()); + private void advanceIterator(CloseableKvIterator iterator) { + if (!iterator.hasNext()) { + return; + } + + K key = iterator.peekNextKey(); + if (!_sortedIterators.containsKey(key)) { + _sortedIterators.put(key, iterator); + return; + } + + var oursPrio = _iterators.get(iterator); + var them = _sortedIterators.get(key); + var theirsPrio = _iterators.get(them); + if (oursPrio < theirsPrio) { + _sortedIterators.put(key, iterator); + advanceIterator(them); + } } @Override public K peekNextKey() { - var cur = _sortedIterators.pollFirstEntry(); - if (cur == null) { + if (_sortedIterators.isEmpty()) throw new NoSuchElementException(); - } - return cur.getKey(); + return _sortedIterators.firstKey(); } @Override public void close() { - for (CloseableKvIterator iterator : _iterators) { + for (CloseableKvIterator iterator : _iterators.keySet()) { iterator.close(); } } @@ -64,10 +72,7 @@ public class MergingKvIterator, V> implements CloseableK throw new NoSuchElementException(); } var curVal = cur.getValue().next(); - if (cur.getValue().hasNext()) { - var nextKey = cur.getValue().peekNextKey(); - _sortedIterators.put(nextKey, cur.getValue()); - } + advanceIterator(cur.getValue()); return curVal; } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java new file mode 100644 index 00000000..75653f01 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java @@ -0,0 +1,248 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import com.usatiuk.dhfs.objects.transaction.TxRecord; +import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; +import io.quarkus.logging.Log; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.apache.commons.collections4.MultiValuedMap; +import org.apache.commons.collections4.multimap.HashSetValuedHashMap; +import org.apache.commons.lang3.mutable.MutableObject; +import org.apache.commons.lang3.tuple.Pair; + +import javax.annotation.Nonnull; +import java.lang.ref.Cleaner; +import java.util.*; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.function.Consumer; + +@ApplicationScoped +public class SnapshotManager { + @Inject + WritebackObjectPersistentStore delegateStore; + + private interface SnapshotEntry { + } + + private record SnapshotEntryObject(JDataVersionedWrapper data) implements SnapshotEntry { + } + + private record SnapshotEntryDeleted() implements SnapshotEntry { + } + + private record SnapshotKey(JObjectKey key, long version) implements Comparable { + @Override + public int compareTo(@Nonnull SnapshotKey o) { + return Comparator.comparing(SnapshotKey::key) + .thenComparing(SnapshotKey::version) + .compare(this, o); + } + } + + private long _lastSnapshotId = 0; + private long _lastAliveSnapshotId = -1; + + private final Queue _snapshotIds = new ArrayDeque<>(); + private final ConcurrentSkipListMap _objects = new ConcurrentSkipListMap<>(); + private final MultiValuedMap _snapshotBounds = new HashSetValuedHashMap<>(); + private final HashMap _snapshotRefCounts = new HashMap<>(); + + private void verify() { + assert _snapshotIds.isEmpty() == (_lastAliveSnapshotId == -1); + assert _snapshotIds.isEmpty() || _snapshotIds.peek() == _lastAliveSnapshotId; + } + + Consumer commitTx(Collection> writes, long id) { + synchronized (this) { + if (!_snapshotIds.isEmpty()) { + verify(); + for (var action : writes) { + var current = delegateStore.readObjectVerbose(action.key()); + Pair newSnapshotEntry = switch (current) { + case WritebackObjectPersistentStore.VerboseReadResultPersisted( + Optional data + ) -> Pair.of(new SnapshotKey(action.key(), _snapshotIds.peek()), + data.map(SnapshotEntryObject::new).orElse(new SnapshotEntryDeleted())); + case WritebackObjectPersistentStore.VerboseReadResultPending( + TxWriteback.PendingWriteEntry pending + ) -> switch (pending) { + case TxWriteback.PendingWrite write -> + Pair.of(new SnapshotKey(action.key(), write.bundleId()), new SnapshotEntryObject(write.data())); + case TxWriteback.PendingDelete delete -> + Pair.of(new SnapshotKey(action.key(), delete.bundleId()), new SnapshotEntryDeleted()); + default -> throw new IllegalStateException("Unexpected value: " + pending); + }; + default -> throw new IllegalStateException("Unexpected value: " + current); + }; + + _objects.put(newSnapshotEntry.getLeft(), newSnapshotEntry.getRight()); + _snapshotBounds.put(newSnapshotEntry.getLeft().version(), newSnapshotEntry.getLeft()); + } + } + + verify(); + return delegateStore.commitTx(writes, id); + } + } + + private void unrefSnapshot(long id) { + synchronized (this) { + verify(); + var refCount = _snapshotRefCounts.merge(id, -1L, (a, b) -> a + b == 0 ? null : a + b); + if (!(refCount == null && id == _lastAliveSnapshotId)) { + return; + } + + long curCount; + long curId = id; + do { + _snapshotIds.poll(); + + for (var key : _snapshotBounds.remove(curId)) { + _objects.remove(key); + } + + if (_snapshotIds.isEmpty()) { + _lastAliveSnapshotId = -1; + break; + } + + curId = _snapshotIds.peek(); + _lastAliveSnapshotId = curId; + + curCount = _snapshotRefCounts.getOrDefault(curId, 0L); + } while (curCount == 0); + verify(); + } + } + + public class Snapshot implements AutoCloseableNoThrow { + private final long _id; + private static final Cleaner CLEANER = Cleaner.create(); + private final MutableObject _closed = new MutableObject<>(false); + + public long id() { + return _id; + } + + private Snapshot(long id) { + _id = id; + synchronized (SnapshotManager.this) { + verify(); + if (_lastSnapshotId > id) + throw new IllegalArgumentException("Snapshot id less than last? " + id + " vs " + _lastSnapshotId); + _lastSnapshotId = id; + if (_lastAliveSnapshotId == -1) + _lastAliveSnapshotId = id; + _snapshotIds.add(id); + _snapshotRefCounts.merge(id, 1L, Long::sum); + verify(); + } + var closedRef = _closed; + var idRef = _id; + CLEANER.register(this, () -> { + if (!closedRef.getValue()) { + Log.error("Snapshot " + idRef + " was not closed before GC"); + } + }); + } + + public class SnapshotKvIterator implements CloseableKvIterator> { + private final CloseableKvIterator _backing; + private Pair> _next; + + public SnapshotKvIterator(IteratorStart start, JObjectKey key) { + _backing = new NavigableMapKvIterator<>(_objects, start, new SnapshotKey(key, 0L)); + fillNext(); + } + + private void fillNext() { + while (_backing.hasNext() && _next == null) { + var next = _backing.next(); + var nextNextKey = _backing.hasNext() ? _backing.peekNextKey() : null; + while (nextNextKey != null && nextNextKey.key.equals(next.getKey().key()) && nextNextKey.version() <= _id) { + next = _backing.next(); + nextNextKey = _backing.peekNextKey(); + } + if (next.getKey().version() <= _id) { + _next = switch (next.getValue()) { + case SnapshotEntryObject(JDataVersionedWrapper data) -> + Pair.of(next.getKey().key(), new TombstoneMergingKvIterator.Data<>(data)); + case SnapshotEntryDeleted() -> + Pair.of(next.getKey().key(), new TombstoneMergingKvIterator.Tombstone<>()); + default -> throw new IllegalStateException("Unexpected value: " + next.getValue()); + }; + } + } + } + + @Override + public JObjectKey peekNextKey() { + if (_next == null) + throw new NoSuchElementException(); + return _next.getKey(); + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public boolean hasNext() { + return _next != null; + } + + @Override + public Pair> next() { + if (_next == null) + throw new NoSuchElementException("No more elements"); + var ret = _next; + _next = null; + fillNext(); + return ret; + } + + } + + public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { + return new TombstoneMergingKvIterator<>(new SnapshotKvIterator(start, key), delegateStore.getIterator(start, key)); + } + + public CloseableKvIterator getIterator(JObjectKey key) { + return getIterator(IteratorStart.GE, key); + } + + @Nonnull + public Optional readObject(JObjectKey name) { + try (var it = getIterator(name)) { + if (it.hasNext()) { + var read = it.next(); + if (read.getKey().equals(name)) { + return Optional.of(read.getValue()); + } + } + } + return Optional.empty(); + } + + @Override + public void close() { + if (_closed.getValue()) { + return; + } + _closed.setValue(true); + unrefSnapshot(_id); + } + } + + public Snapshot createSnapshot(long id) { + return new Snapshot(id); + } + + @Nonnull + Optional readObjectDirect(JObjectKey name) { + return delegateStore.readObject(name); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java new file mode 100644 index 00000000..90fbe455 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java @@ -0,0 +1,54 @@ +package com.usatiuk.dhfs.objects; + +import org.apache.commons.lang3.tuple.Pair; + +import java.util.List; + +public class TombstoneMergingKvIterator, V> implements CloseableKvIterator { + private final CloseableKvIterator _backing; + + public TombstoneMergingKvIterator(List>> iterators) { + _backing = new PredicateKvIterator<>( + new MergingKvIterator<>(iterators), + pair -> { + if (pair instanceof Tombstone) { + return null; + } + return ((Data) pair).value; + }); + } + + @SafeVarargs + public TombstoneMergingKvIterator(CloseableKvIterator>... iterators) { + this(List.of(iterators)); + } + + public interface DataType { + } + + public record Tombstone() implements DataType { + } + + public record Data(V value) implements DataType { + } + + @Override + public K peekNextKey() { + return _backing.peekNextKey(); + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public boolean hasNext() { + return _backing.hasNext(); + } + + @Override + public Pair next() { + return _backing.next(); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java index 2f3c212d..bf617e19 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManagerImpl.java @@ -37,6 +37,7 @@ public class TransactionManagerImpl implements TransactionManager { Log.trace("Transaction commit failed", e); throw e; } finally { + _currentTransaction.get().close(); _currentTransaction.remove(); } } @@ -53,6 +54,7 @@ public class TransactionManagerImpl implements TransactionManager { Log.error("Transaction rollback failed", e); throw e; } finally { + _currentTransaction.get().close(); _currentTransaction.remove(); } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionObjectLocked.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionObjectLocked.java new file mode 100644 index 00000000..ac3a856c --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionObjectLocked.java @@ -0,0 +1,11 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.transaction.TransactionObject; +import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; + +import java.util.Optional; + +public record TransactionObjectLocked + (Optional data, AutoCloseableNoThrow lock) + implements TransactionObject { +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionObjectNoLock.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionObjectNoLock.java new file mode 100644 index 00000000..7672d09a --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionObjectNoLock.java @@ -0,0 +1,10 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.transaction.TransactionObject; + +import java.util.Optional; + +public record TransactionObjectNoLock + (Optional data) + implements TransactionObject { +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java index 8723d4ee..2c50bb46 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java @@ -32,9 +32,9 @@ public interface TxWriteback { record PendingDelete(JObjectKey key, long bundleId) implements PendingWriteEntry { } - CloseableKvIterator getIterator(IteratorStart start, JObjectKey key); + CloseableKvIterator> getIterator(IteratorStart start, JObjectKey key); - default CloseableKvIterator getIterator(JObjectKey key) { + default CloseableKvIterator> getIterator(JObjectKey key) { return getIterator(IteratorStart.GE, key); } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java index 4c6676c8..8d4a6077 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java @@ -379,14 +379,13 @@ public class TxWritebackImpl implements TxWriteback { // Returns an iterator with a view of all commited objects // Does not have to guarantee consistent view, snapshots are handled by upper layers @Override - public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { + public CloseableKvIterator> getIterator(IteratorStart start, JObjectKey key) { return new PredicateKvIterator<>( new NavigableMapKvIterator<>(_pendingWrites, start, key), - e -> { - if (e instanceof PendingWrite pw) { - return pw.data(); - } - return null; + e -> switch (e) { + case PendingWrite p -> new TombstoneMergingKvIterator.Data<>(p.data()); + case PendingDelete d -> new TombstoneMergingKvIterator.Tombstone<>(); + default -> throw new IllegalStateException("Unexpected value: " + e); }); } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java index 4fd115c0..da690eff 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java @@ -45,6 +45,24 @@ public class WritebackObjectPersistentStore { }; } + public interface VerboseReadResult { + } + + public record VerboseReadResultPersisted(Optional data) implements VerboseReadResult { + } + + public record VerboseReadResultPending(TxWriteback.PendingWriteEntry pending) implements VerboseReadResult { + } + + @Nonnull + VerboseReadResult readObjectVerbose(JObjectKey key) { + var pending = txWriteback.getPendingWrite(key).orElse(null); + if (pending != null) { + return new VerboseReadResultPending(pending); + } + return new VerboseReadResultPersisted(delegate.readObject(key)); + } + Consumer commitTx(Collection> writes, long id) { var bundle = txWriteback.createBundle(); try { @@ -78,11 +96,12 @@ public class WritebackObjectPersistentStore { // Returns an iterator with a view of all commited objects // Does not have to guarantee consistent view, snapshots are handled by upper layers - public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { - return new MergingKvIterator<>(delegate.getIterator(start, key), txWriteback.getIterator(start, key)); + public CloseableKvIterator> getIterator(IteratorStart start, JObjectKey key) { + return new MergingKvIterator<>(txWriteback.getIterator(start, key), + new PredicateKvIterator<>(delegate.getIterator(start, key), TombstoneMergingKvIterator.Data::new)); } - public CloseableKvIterator getIterator(JObjectKey key) { + public CloseableKvIterator> getIterator(JObjectKey key) { return getIterator(IteratorStart.GE, key); } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java index 60afe37a..915bb034 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java @@ -116,7 +116,7 @@ public class CachingObjectPersistentStore { new NavigableMapKvIterator<>(_sortedCache, start, key), e -> e.object().orElse(null) ), - delegate.getIterator(start, key)); + delegate.getIterator(start, key)); // TODO: Doesn't work } public CloseableKvIterator getIterator(JObjectKey key) { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/IteratorStart.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/IteratorStart.java index 6dd270c9..338c025f 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/IteratorStart.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/IteratorStart.java @@ -4,5 +4,5 @@ public enum IteratorStart { LT, LE, GT, - GE + GE, } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java index 05e77dfa..9bb3b38a 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java @@ -10,12 +10,14 @@ import io.quarkus.runtime.StartupEvent; import jakarta.annotation.Priority; import jakarta.enterprise.context.ApplicationScoped; import jakarta.enterprise.event.Observes; +import org.apache.commons.lang3.mutable.MutableObject; import org.apache.commons.lang3.tuple.Pair; import org.eclipse.microprofile.config.inject.ConfigProperty; import org.lmdbjava.*; import javax.annotation.Nonnull; import java.io.IOException; +import java.lang.ref.Cleaner; import java.nio.charset.StandardCharsets; import java.nio.file.Path; import java.util.Collection; @@ -94,7 +96,17 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { private final Cursor _cursor = _db.openCursor(_txn); private boolean _hasNext = false; + private static final Cleaner CLEANER = Cleaner.create(); + private final MutableObject _closed = new MutableObject<>(false); + LmdbKvIterator(IteratorStart start, JObjectKey key) { + var closedRef = _closed; + CLEANER.register(this, () -> { + if (!closedRef.getValue()) { + Log.error("Iterator was not closed before GC"); + } + }); + verifyReady(); if (!_cursor.get(key.toString().getBytes(StandardCharsets.UTF_8), GetOp.MDB_SET_RANGE)) { return; @@ -131,6 +143,10 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { @Override public void close() { + if (_closed.getValue()) { + return; + } + _closed.setValue(true); _cursor.close(); _txn.close(); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSource.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSource.java deleted file mode 100644 index 8035eac6..00000000 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSource.java +++ /dev/null @@ -1,58 +0,0 @@ -package com.usatiuk.dhfs.objects.transaction; - -import com.usatiuk.dhfs.objects.JData; -import com.usatiuk.dhfs.objects.JObjectKey; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -public class ReadTrackingObjectSource implements TransactionObjectSource { - private final TransactionObjectSource _delegate; - - private final Map> _readSet = new HashMap<>(); - - public ReadTrackingObjectSource(TransactionObjectSource delegate) { - _delegate = delegate; - } - - public Map> getRead() { - return Collections.unmodifiableMap(_readSet); - } - - @Override - public TransactionObject get(Class type, JObjectKey key) { - var got = _readSet.get(key); - - if (got == null) { - var read = _delegate.get(type, key); - _readSet.put(key, read); - return read; - } - - got.data().ifPresent(data -> { - if (!type.isInstance(data.data())) - throw new IllegalStateException("Type mismatch for " + got + ": expected " + type + ", got " + data.getClass()); - }); - - return (TransactionObject) got; - } - - @Override - public TransactionObject getWriteLocked(Class type, JObjectKey key) { - var got = _readSet.get(key); - - if (got == null) { - var read = _delegate.getWriteLocked(type, key); - _readSet.put(key, read); - return read; - } - - got.data().ifPresent(data -> { - if (!type.isInstance(data.data())) - throw new IllegalStateException("Type mismatch for " + got + ": expected " + type + ", got " + data.getClass()); - }); - - return (TransactionObject) got; - } -} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java new file mode 100644 index 00000000..65985dc6 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java @@ -0,0 +1,111 @@ +package com.usatiuk.dhfs.objects.transaction; + +import com.usatiuk.dhfs.objects.*; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.*; + +@ApplicationScoped +public class ReadTrackingObjectSourceFactory { + @Inject + LockManager lockManager; + + public ReadTrackingTransactionObjectSource create(SnapshotManager.Snapshot snapshot) { + return new ReadTrackingObjectSourceImpl(snapshot); + } + + public class ReadTrackingObjectSourceImpl implements ReadTrackingTransactionObjectSource { + private final SnapshotManager.Snapshot _snapshot; + + private final Map> _readSet = new HashMap<>(); + private final Queue _iterators = new ArrayDeque<>(); + + public ReadTrackingObjectSourceImpl(SnapshotManager.Snapshot snapshot) { + _snapshot = snapshot; + } + + public Map> getRead() { + return Collections.unmodifiableMap(_readSet); + } + + @Override + public Optional get(Class type, JObjectKey key) { + var got = _readSet.get(key); + + if (got == null) { + var read = _snapshot.readObject(key); + _readSet.put(key, new TransactionObjectNoLock<>(read)); + return read.map(JDataVersionedWrapper::data).map(type::cast); + } + + return got.data().map(JDataVersionedWrapper::data).map(type::cast); + } + + @Override + public Optional getWriteLocked(Class type, JObjectKey key) { + var got = _readSet.get(key); + + if (got == null) { + var lock = lockManager.lockObject(key); + try { + var read = _snapshot.readObject(key); + _readSet.put(key, new TransactionObjectLocked<>(read, lock)); + return read.map(JDataVersionedWrapper::data).map(type::cast); + } catch (Exception e) { + lock.close(); + throw e; + } + } + + return got.data().map(JDataVersionedWrapper::data).map(type::cast); + } + + @Override + public void close() { + for (var it : _iterators) { + it.close(); + } + } + + private class ReadTrackingIterator implements CloseableKvIterator { + private final CloseableKvIterator _backing; + + public ReadTrackingIterator(IteratorStart start, JObjectKey key) { + _backing = _snapshot.getIterator(start, key); + } + + @Override + public JObjectKey peekNextKey() { + return _backing.peekNextKey(); + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public boolean hasNext() { + return _backing.hasNext(); + } + + @Override + public Pair next() { + var got = _backing.next(); + _readSet.putIfAbsent(got.getKey(), new TransactionObjectNoLock<>(Optional.of(got.getValue()))); + return Pair.of(got.getKey(), got.getValue().data()); + } + } + + @Override + public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { + var got = new ReadTrackingIterator(start, key); + _iterators.add(got); + return got; + } + } +} \ No newline at end of file diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingTransactionObjectSource.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingTransactionObjectSource.java new file mode 100644 index 00000000..14ee4c3a --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingTransactionObjectSource.java @@ -0,0 +1,25 @@ +package com.usatiuk.dhfs.objects.transaction; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.Iterator; +import java.util.Map; +import java.util.Optional; + +public interface ReadTrackingTransactionObjectSource extends AutoCloseableNoThrow { + Optional get(Class type, JObjectKey key); + + Optional getWriteLocked(Class type, JObjectKey key); + + Iterator> getIterator(IteratorStart start, JObjectKey key); + + default Iterator> getIterator(JObjectKey key) { + return getIterator(IteratorStart.GE, key); + } + + Map> getRead(); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java index e78525a9..0d295511 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java @@ -2,15 +2,16 @@ package com.usatiuk.dhfs.objects.transaction; import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import org.apache.commons.lang3.tuple.Pair; import javax.annotation.Nonnull; import java.util.Collection; +import java.util.Iterator; import java.util.Optional; // The transaction interface actually used by user code to retrieve objects public interface Transaction extends TransactionHandle { - long getId(); - void onCommit(Runnable runnable); Optional get(Class type, JObjectKey key, LockingStrategy strategy); @@ -25,4 +26,11 @@ public interface Transaction extends TransactionHandle { default Optional get(Class type, JObjectKey key) { return get(type, key, LockingStrategy.OPTIMISTIC); } + + Iterator> getIterator(IteratorStart start, JObjectKey key); + + default Iterator> getIterator(JObjectKey key) { + return getIterator(IteratorStart.GE, key); + } + } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactory.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactory.java index eea5cfc5..c4007d69 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactory.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactory.java @@ -1,5 +1,5 @@ package com.usatiuk.dhfs.objects.transaction; public interface TransactionFactory { - TransactionPrivate createTransaction(long id, TransactionObjectSource source); + TransactionPrivate createTransaction(long snapshotId); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index cb5a4ccb..846b6567 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -1,11 +1,12 @@ package com.usatiuk.dhfs.objects.transaction; import com.usatiuk.dhfs.objects.JData; -import com.usatiuk.dhfs.objects.JDataVersionedWrapper; import com.usatiuk.dhfs.objects.JObjectKey; -import com.usatiuk.dhfs.objects.WritebackObjectPersistentStore; +import com.usatiuk.dhfs.objects.SnapshotManager; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; +import org.apache.commons.lang3.tuple.Pair; import javax.annotation.Nonnull; import java.util.*; @@ -13,28 +14,26 @@ import java.util.*; @ApplicationScoped public class TransactionFactoryImpl implements TransactionFactory { @Inject - WritebackObjectPersistentStore store; // FIXME: + SnapshotManager snapshotManager; + @Inject + ReadTrackingObjectSourceFactory readTrackingObjectSourceFactory; @Override - public TransactionPrivate createTransaction(long id, TransactionObjectSource source) { - return new TransactionImpl(id, source); + public TransactionPrivate createTransaction(long snapshotId) { + return new TransactionImpl(snapshotId); } private class TransactionImpl implements TransactionPrivate { - private final long _id; - private final ReadTrackingObjectSource _source; + private final ReadTrackingTransactionObjectSource _source; private final Map> _writes = new HashMap<>(); private Map> _newWrites = new HashMap<>(); private final List _onCommit = new ArrayList<>(); private final List _onFlush = new ArrayList<>(); + private final SnapshotManager.Snapshot _snapshot; - private TransactionImpl(long id, TransactionObjectSource source) { - _id = id; - _source = new ReadTrackingObjectSource(source); - } - - public long getId() { - return _id; + private TransactionImpl(long snapshotId) { + _snapshot = snapshotManager.createSnapshot(snapshotId); + _source = readTrackingObjectSourceFactory.create(_snapshot); } @Override @@ -52,6 +51,11 @@ public class TransactionFactoryImpl implements TransactionFactory { return Collections.unmodifiableCollection(_onCommit); } + @Override + public SnapshotManager.Snapshot snapshot() { + return _snapshot; + } + @Override public Collection getOnFlush() { return Collections.unmodifiableCollection(_onFlush); @@ -61,11 +65,7 @@ public class TransactionFactoryImpl implements TransactionFactory { public Optional get(Class type, JObjectKey key, LockingStrategy strategy) { switch (_writes.get(key)) { case TxRecord.TxObjectRecordWrite write -> { - if (type.isInstance(write.data())) { - return Optional.of((T) write.data()); - } else { - throw new IllegalStateException("Type mismatch for " + key + ": expected " + type + ", got " + write.data().getClass()); - } + return Optional.of(type.cast(write.data())); } case TxRecord.TxObjectRecordDeleted deleted -> { return Optional.empty(); @@ -75,45 +75,38 @@ public class TransactionFactoryImpl implements TransactionFactory { } return switch (strategy) { - case OPTIMISTIC -> (Optional) _source.get(type, key).data().map(JDataVersionedWrapper::data); - case WRITE -> (Optional) _source.getWriteLocked(type, key).data().map(JDataVersionedWrapper::data); + case OPTIMISTIC -> _source.get(type, key); + case WRITE -> _source.getWriteLocked(type, key); }; } @Override public void delete(JObjectKey key) { -// get(JData.class, key, LockingStrategy.OPTIMISTIC); - - // FIXME var got = _writes.get(key); if (got != null) { - switch (got) { - case TxRecord.TxObjectRecordDeleted deleted -> { - return; - } - default -> { - } + if (got instanceof TxRecord.TxObjectRecordDeleted) { + return; } } -// -// var read = _source.get(JData.class, key).orElse(null); -// if (read == null) { -// return; -// } - _writes.put(key, new TxRecord.TxObjectRecordDeleted(key)); // FIXME: + + _writes.put(key, new TxRecord.TxObjectRecordDeleted(key)); _newWrites.put(key, new TxRecord.TxObjectRecordDeleted(key)); } @Nonnull @Override public Collection findAllObjects() { - return store.findAllObjects(); +// return store.findAllObjects(); + return List.of(); + } + + @Override + public Iterator> getIterator(IteratorStart start, JObjectKey key) { + return _source.getIterator(start, key); } @Override public void put(JData obj) { -// get(JData.class, obj.getKey(), LockingStrategy.OPTIMISTIC); - _writes.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj)); _newWrites.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj)); } @@ -131,9 +124,14 @@ public class TransactionFactoryImpl implements TransactionFactory { } @Override - public ReadTrackingObjectSource readSource() { + public ReadTrackingTransactionObjectSource readSource() { return _source; } - } + @Override + public void close() { + _source.close(); + _snapshot.close(); + } + } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionHandle.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionHandle.java index d55ee1ea..262798c6 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionHandle.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionHandle.java @@ -1,7 +1,5 @@ package com.usatiuk.dhfs.objects.transaction; public interface TransactionHandle { - long getId(); - void onFlush(Runnable runnable); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java deleted file mode 100644 index 7fa8b516..00000000 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionObjectSource.java +++ /dev/null @@ -1,10 +0,0 @@ -package com.usatiuk.dhfs.objects.transaction; - -import com.usatiuk.dhfs.objects.JData; -import com.usatiuk.dhfs.objects.JObjectKey; - -public interface TransactionObjectSource { - TransactionObject get(Class type, JObjectKey key); - - TransactionObject getWriteLocked(Class type, JObjectKey key); -} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java index 1de3b1d8..7a3c0705 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java @@ -1,17 +1,21 @@ package com.usatiuk.dhfs.objects.transaction; import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.SnapshotManager; +import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; import java.util.Collection; import java.util.Map; // The transaction interface actually used by user code to retrieve objects -public interface TransactionPrivate extends Transaction, TransactionHandlePrivate { +public interface TransactionPrivate extends Transaction, TransactionHandlePrivate, AutoCloseableNoThrow { Collection> drainNewWrites(); Map> reads(); - ReadTrackingObjectSource readSource(); + ReadTrackingTransactionObjectSource readSource(); Collection getOnCommit(); + + SnapshotManager.Snapshot snapshot(); } diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java new file mode 100644 index 00000000..6166bb8c --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java @@ -0,0 +1,105 @@ +package com.usatiuk.dhfs.objects; + +import org.apache.commons.lang3.tuple.Pair; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; + +public class MergingKvIteratorTest { + + private class SimpleIteratorWrapper, V> implements CloseableKvIterator { + private final Iterator> _iterator; + private Pair _next; + + public SimpleIteratorWrapper(Iterator> iterator) { + _iterator = iterator; + fillNext(); + } + + private void fillNext() { + while (_iterator.hasNext() && _next == null) { + _next = _iterator.next(); + } + } + + @Override + public K peekNextKey() { + if (_next == null) { + throw new NoSuchElementException(); + } + return _next.getKey(); + } + + @Override + public void close() { + } + + @Override + public boolean hasNext() { + return _next != null; + } + + @Override + public Pair next() { + if (_next == null) { + throw new NoSuchElementException("No more elements"); + } + var ret = _next; + _next = null; + fillNext(); + return ret; + } + } + + @Test + public void testTestIterator() { + var list = List.of(Pair.of(1, 2), Pair.of(3, 4), Pair.of(5, 6)); + var iterator = new SimpleIteratorWrapper<>(list.iterator()); + var realIterator = list.iterator(); + while (realIterator.hasNext()) { + Assertions.assertTrue(iterator.hasNext()); + Assertions.assertEquals(realIterator.next(), iterator.next()); + } + Assertions.assertFalse(iterator.hasNext()); + + var emptyList = List.>of(); + var emptyIterator = new SimpleIteratorWrapper<>(emptyList.iterator()); + Assertions.assertFalse(emptyIterator.hasNext()); + } + + @Test + public void testSimple() { + var source1 = List.of(Pair.of(1, 2), Pair.of(3, 4), Pair.of(5, 6)).iterator(); + var source2 = List.of(Pair.of(2, 3), Pair.of(4, 5), Pair.of(6, 7)).iterator(); + var mergingIterator = new MergingKvIterator<>(new SimpleIteratorWrapper<>(source1), new SimpleIteratorWrapper<>(source2)); + var expected = List.of(Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4), Pair.of(4, 5), Pair.of(5, 6), Pair.of(6, 7)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + } + + @Test + public void testPriority() { + var source1 = List.of(Pair.of(1, 2), Pair.of(2, 4), Pair.of(5, 6)); + var source2 = List.of(Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 7)); + var mergingIterator = new MergingKvIterator<>(new SimpleIteratorWrapper<>(source1.iterator()), new SimpleIteratorWrapper<>(source2.iterator())); + var expected = List.of(Pair.of(1, 2), Pair.of(2, 4), Pair.of(5, 6)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + Assertions.assertFalse(mergingIterator.hasNext()); + + var mergingIterator2 = new MergingKvIterator<>(new SimpleIteratorWrapper<>(source2.iterator()), new SimpleIteratorWrapper<>(source1.iterator())); + var expected2 = List.of(Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 7)); + for (var pair : expected2) { + Assertions.assertTrue(mergingIterator2.hasNext()); + Assertions.assertEquals(pair, mergingIterator2.next()); + } + Assertions.assertFalse(mergingIterator2.hasNext()); + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index 1c3db657..6036cad9 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -8,6 +8,7 @@ import io.quarkus.test.junit.QuarkusTest; import jakarta.inject.Inject; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.RepeatedTest; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; @@ -52,7 +53,7 @@ public class ObjectsTest { }); } - @Test + @RepeatedTest(100) void createDeleteObject() { txm.run(() -> { var newParent = new Parent(JObjectKey.of("ParentCreateDeleteObject"), "John"); @@ -237,13 +238,7 @@ public class ObjectsTest { return curTx.get(Parent.class, new JObjectKey(key)).orElse(null); }); - // It is possible that thread 2 did get the object after thread 1 committed it, so there is no conflict - Assertions.assertTrue(!thread1Failed.get() || !thread2Failed.get()); - - if (strategy.equals(LockingStrategy.WRITE)) { - if (!thread1Failed.get()) - Assertions.assertFalse(thread2Failed.get()); - } + Assertions.assertFalse(!thread1Failed.get() && !thread2Failed.get()); if (!thread1Failed.get()) { if (!thread2Failed.get()) { diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java index 8ff36c18..6e48ea94 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteTransaction.java @@ -23,10 +23,6 @@ public class RemoteTransaction { @Inject PersistentPeerDataService persistentPeerDataService; - public long getId() { - return curTx.getId(); - } - private Optional> tryDownloadRemote(RemoteObjectMeta obj) { MutableObject> success = new MutableObject<>(null); diff --git a/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java index a45d0b9a..1292e235 100644 --- a/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java +++ b/dhfs-parent/utils/src/main/java/com/usatiuk/dhfs/utils/DataLocker.java @@ -60,6 +60,8 @@ public class DataLocker { @Override public void close() { synchronized (_tag) { + if (_tag.released) + return; _tag.released = true; // Notify all because when the object is locked again, // it's a different lock tag From f5c815f02ab5ed45ad1d89addfbd840867e8b9b8 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sat, 22 Feb 2025 18:35:12 +0100 Subject: [PATCH 076/105] somewhat even more working transactions 3.0? --- .../usatiuk/dhfs/objects/JObjectManager.java | 8 +- .../usatiuk/dhfs/objects/SnapshotManager.java | 102 +++++++++- .../dhfs/objects/TransactionManager.java | 20 +- .../com/usatiuk/dhfs/objects/ObjectsTest.java | 190 ++++++++++++++++++ 4 files changed, 304 insertions(+), 16 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 5c0aa18b..0a563412 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -173,11 +173,6 @@ public class JObjectManager { Log.trace("Checking dependency " + read.getKey() + " - ok with read"); } - Log.tracef("Flushing transaction %d to storage", newId); - - var realNewId = _txCounter.getAndIncrement() + 1; - assert realNewId == newId; - Log.tracef("Committing transaction %d to storage", newId); var addFlushCallback = snapshotManager.commitTx( writes.values().stream() @@ -193,6 +188,9 @@ public class JObjectManager { }).toList(), newId); + var realNewId = _txCounter.getAndIncrement() + 1; + assert realNewId == newId; + for (var callback : tx.getOnCommit()) { callback.run(); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java index 75653f01..5f5f2c1d 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java @@ -25,6 +25,9 @@ public class SnapshotManager { private interface SnapshotEntry { } + private record SnapshotEntryRead(JDataVersionedWrapper data, long whenToRemove) implements SnapshotEntry { + } + private record SnapshotEntryObject(JDataVersionedWrapper data) implements SnapshotEntry { } @@ -47,6 +50,7 @@ public class SnapshotManager { private final ConcurrentSkipListMap _objects = new ConcurrentSkipListMap<>(); private final MultiValuedMap _snapshotBounds = new HashSetValuedHashMap<>(); private final HashMap _snapshotRefCounts = new HashMap<>(); + private final ConcurrentSkipListMap _snapshotVersions = new ConcurrentSkipListMap<>(); private void verify() { assert _snapshotIds.isEmpty() == (_lastAliveSnapshotId == -1); @@ -55,15 +59,20 @@ public class SnapshotManager { Consumer commitTx(Collection> writes, long id) { synchronized (this) { + assert id > _lastSnapshotId; if (!_snapshotIds.isEmpty()) { verify(); + boolean hadBackward = false; for (var action : writes) { var current = delegateStore.readObjectVerbose(action.key()); Pair newSnapshotEntry = switch (current) { case WritebackObjectPersistentStore.VerboseReadResultPersisted( Optional data - ) -> Pair.of(new SnapshotKey(action.key(), _snapshotIds.peek()), - data.map(SnapshotEntryObject::new).orElse(new SnapshotEntryDeleted())); + ) -> { + hadBackward = true; + yield Pair.of(new SnapshotKey(action.key(), _snapshotIds.peek()), + data.map(o -> new SnapshotEntryRead(o, id)).orElse(new SnapshotEntryDeleted())); + } case WritebackObjectPersistentStore.VerboseReadResultPending( TxWriteback.PendingWriteEntry pending ) -> switch (pending) { @@ -79,6 +88,11 @@ public class SnapshotManager { _objects.put(newSnapshotEntry.getLeft(), newSnapshotEntry.getRight()); _snapshotBounds.put(newSnapshotEntry.getLeft().version(), newSnapshotEntry.getLeft()); } + + if (hadBackward) + for (var sid : _snapshotIds) { + _snapshotVersions.merge(sid, 1L, Long::sum); + } } verify(); @@ -96,10 +110,22 @@ public class SnapshotManager { long curCount; long curId = id; + long nextId; do { _snapshotIds.poll(); + _snapshotVersions.remove(curId); + nextId = _snapshotIds.isEmpty() ? -1 : _snapshotIds.peek(); for (var key : _snapshotBounds.remove(curId)) { + var entry = _objects.get(key); + if (entry instanceof SnapshotEntryRead read) { + if (curId != read.whenToRemove() - 1) { + assert nextId != -1; + if (nextId < read.whenToRemove()) { + _objects.put(new SnapshotKey(key.key(), nextId), entry); + } + } + } _objects.remove(key); } @@ -137,6 +163,7 @@ public class SnapshotManager { _lastAliveSnapshotId = id; _snapshotIds.add(id); _snapshotRefCounts.merge(id, 1L, Long::sum); + _snapshotVersions.put(id, 0L); verify(); } var closedRef = _closed; @@ -169,6 +196,8 @@ public class SnapshotManager { _next = switch (next.getValue()) { case SnapshotEntryObject(JDataVersionedWrapper data) -> Pair.of(next.getKey().key(), new TombstoneMergingKvIterator.Data<>(data)); + case SnapshotEntryRead(JDataVersionedWrapper data, long whenToRemove) -> + Pair.of(next.getKey().key(), new TombstoneMergingKvIterator.Data<>(data)); case SnapshotEntryDeleted() -> Pair.of(next.getKey().key(), new TombstoneMergingKvIterator.Tombstone<>()); default -> throw new IllegalStateException("Unexpected value: " + next.getValue()); @@ -206,8 +235,75 @@ public class SnapshotManager { } + public class AutoRefreshingSnapshotKvIterator implements CloseableKvIterator { + private CloseableKvIterator _backing; + private long _lastRefreshed = -1L; + private Pair _next; + + public AutoRefreshingSnapshotKvIterator(IteratorStart start, JObjectKey key) { + synchronized (SnapshotManager.this) { + long curVersion = _snapshotVersions.get(_id); + _backing = new TombstoneMergingKvIterator<>(new SnapshotKvIterator(start, key), delegateStore.getIterator(start, key)); + _next = _backing.hasNext() ? _backing.next() : null; + _lastRefreshed = curVersion; + } + } + + private void doRefresh() { + long curVersion = _snapshotVersions.get(_id); + if (curVersion == _lastRefreshed) { + return; + } + if (_next == null) return; + synchronized (SnapshotManager.this) { + curVersion = _snapshotVersions.get(_id); + _backing.close(); + _backing = new TombstoneMergingKvIterator<>(new SnapshotKvIterator(IteratorStart.GE, _next.getKey()), delegateStore.getIterator(IteratorStart.GE, _next.getKey())); + var next = _backing.hasNext() ? _backing.next() : null; + assert next != null; + assert next.equals(_next); + _next = next; + _lastRefreshed = curVersion; + } + } + + private void prepareNext() { + doRefresh(); + if (_backing.hasNext()) { + _next = _backing.next(); + } else { + _next = null; + } + } + + @Override + public JObjectKey peekNextKey() { + return _next.getKey(); + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public boolean hasNext() { + return _next != null; + } + + @Override + public Pair next() { + if (_next == null) { + throw new NoSuchElementException("No more elements"); + } + var ret = _next; + prepareNext(); + return ret; + } + } + public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { - return new TombstoneMergingKvIterator<>(new SnapshotKvIterator(start, key), delegateStore.getIterator(start, key)); + return new AutoRefreshingSnapshotKvIterator(start, key); } public CloseableKvIterator getIterator(JObjectKey key) { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java index 754858f0..2fe54390 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TransactionManager.java @@ -46,10 +46,15 @@ public interface TransactionManager { } } - default void runTries(VoidFn fn, int tries) { + default TransactionHandle runTries(VoidFn fn, int tries) { if (current() != null) { fn.apply(); - return; + return new TransactionHandle() { + @Override + public void onFlush(Runnable runnable) { + current().onCommit(runnable); + } + }; } begin(); @@ -61,25 +66,24 @@ public interface TransactionManager { Log.error("Transaction commit failed", txCommitException); throw txCommitException; } - runTries(fn, tries - 1); - return; + return runTries(fn, tries - 1); } catch (Throwable e) { rollback(); throw e; } try { - commit(); + return commit(); } catch (TxCommitException txCommitException) { if (tries == 0) { Log.error("Transaction commit failed", txCommitException); throw txCommitException; } - runTries(fn, tries - 1); + return runTries(fn, tries - 1); } } - default void run(VoidFn fn) { - runTries(fn, 10); + default TransactionHandle run(VoidFn fn) { + return runTries(fn, 10); } default T run(Supplier supplier) { diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index 6036cad9..7d9ecab1 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -13,8 +13,11 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; +import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicBoolean; @QuarkusTest @@ -25,6 +28,17 @@ public class ObjectsTest { @Inject Transaction curTx; + private void deleteAndCheck(JObjectKey key) { + txm.run(() -> { + curTx.delete(key); + }); + + txm.run(() -> { + var parent = curTx.get(JData.class, key).orElse(null); + Assertions.assertNull(parent); + }); + } + @Test void createObject() { txm.run(() -> { @@ -252,6 +266,182 @@ public class ObjectsTest { } } + @RepeatedTest(100) + void snapshotTest1() { + var key = "SnapshotTest1"; + var barrier1 = new CyclicBarrier(2); + var barrier2 = new CyclicBarrier(2); + try (ExecutorService ex = Executors.newFixedThreadPool(3)) { + ex.invokeAll(List.of( + () -> { + barrier1.await(); + Log.info("Thread 2 starting tx"); + txm.run(() -> { + Log.info("Thread 2 started tx"); + curTx.put(new Parent(JObjectKey.of(key), "John")); + Log.info("Thread 2 committing"); + }); + Log.info("Thread 2 commited"); + try { + barrier2.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + return null; + }, + () -> { + Log.info("Thread 1 starting tx"); + txm.run(() -> { + try { + Log.info("Thread 1 started tx"); + barrier1.await(); + barrier2.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + Log.info("Thread 1 reading"); + Assertions.assertTrue(curTx.get(Parent.class, new JObjectKey(key)).isEmpty()); + Log.info("Thread 1 done reading"); + }); + Log.info("Thread 1 finished"); + return null; + } + )).forEach(f -> { + try { + f.get(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + }); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + txm.run(() -> { + Assertions.assertEquals("John", curTx.get(Parent.class, new JObjectKey(key)).orElseThrow().name()); + }); + deleteAndCheck(new JObjectKey(key)); + } + + @RepeatedTest(100) + void snapshotTest2() { + var key = "SnapshotTest2"; + var barrier1 = new CyclicBarrier(2); + var barrier2 = new CyclicBarrier(2); + txm.run(() -> { + curTx.put(new Parent(JObjectKey.of(key), "John")); + }); + try (ExecutorService ex = Executors.newFixedThreadPool(3)) { + ex.invokeAll(List.of( + () -> { + barrier1.await(); + Log.info("Thread 2 starting tx"); + txm.run(() -> { + Log.info("Thread 2 started tx"); + curTx.put(new Parent(JObjectKey.of(key), "John2")); + Log.info("Thread 2 committing"); + }); + Log.info("Thread 2 commited"); + try { + barrier2.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + return null; + }, + () -> { + Log.info("Thread 1 starting tx"); + txm.run(() -> { + try { + Log.info("Thread 1 started tx"); + barrier1.await(); + barrier2.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + Log.info("Thread 1 reading"); + Assertions.assertEquals("John", curTx.get(Parent.class, new JObjectKey(key)).orElseThrow().name()); + Log.info("Thread 1 done reading"); + }); + Log.info("Thread 1 finished"); + return null; + } + )).forEach(f -> { + try { + f.get(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + }); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + txm.run(() -> { + Assertions.assertEquals("John2", curTx.get(Parent.class, new JObjectKey(key)).orElseThrow().name()); + }); + deleteAndCheck(new JObjectKey(key)); + } + + @RepeatedTest(100) + void snapshotTest3() throws InterruptedException { + var key = "SnapshotTest3"; + var barrier0 = new CountDownLatch(1); + var barrier1 = new CyclicBarrier(2); + var barrier2 = new CyclicBarrier(2); + txm.run(() -> { + curTx.put(new Parent(JObjectKey.of(key), "John")); + }).onFlush(barrier0::countDown); + barrier0.await(); + try (ExecutorService ex = Executors.newFixedThreadPool(3)) { + ex.invokeAll(List.of( + () -> { + barrier1.await(); + Log.info("Thread 2 starting tx"); + txm.run(() -> { + Log.info("Thread 2 started tx"); + curTx.put(new Parent(JObjectKey.of(key), "John2")); + Log.info("Thread 2 committing"); + }); + Log.info("Thread 2 commited"); + try { + barrier2.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + return null; + }, + () -> { + Log.info("Thread 1 starting tx"); + txm.run(() -> { + try { + Log.info("Thread 1 started tx"); + barrier1.await(); + barrier2.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + Log.info("Thread 1 reading"); + Assertions.assertEquals("John", curTx.get(Parent.class, new JObjectKey(key)).orElseThrow().name()); + Log.info("Thread 1 done reading"); + }); + Log.info("Thread 1 finished"); + return null; + } + )).forEach(f -> { + try { + f.get(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + }); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + txm.run(() -> { + Assertions.assertEquals("John2", curTx.get(Parent.class, new JObjectKey(key)).orElseThrow().name()); + }); + deleteAndCheck(new JObjectKey(key)); + } + // } // // @Test From dfa2fe78bd67a81bd5401b3aed60f3bd8c4b3f2d Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sat, 22 Feb 2025 18:44:28 +0100 Subject: [PATCH 077/105] retry creating snapshot --- .../com/usatiuk/dhfs/objects/JObjectManager.java | 7 ++++++- .../com/usatiuk/dhfs/objects/SnapshotManager.java | 13 ++++++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 0a563412..690f9903 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -50,7 +50,12 @@ public class JObjectManager { public TransactionPrivate createTransaction() { verifyReady(); - return transactionFactory.createTransaction(_txCounter.get()); + while (true) { + try { + return transactionFactory.createTransaction(_txCounter.get()); + } catch (SnapshotManager.IllegalSnapshotIdException ignored) { + } + } } public TransactionHandle commit(TransactionPrivate tx) { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java index 5f5f2c1d..38e61bf0 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java @@ -143,6 +143,17 @@ public class SnapshotManager { } } + public static class IllegalSnapshotIdException extends IllegalArgumentException { + public IllegalSnapshotIdException(String message) { + super(message); + } + + @Override + public synchronized Throwable fillInStackTrace() { + return this; + } + } + public class Snapshot implements AutoCloseableNoThrow { private final long _id; private static final Cleaner CLEANER = Cleaner.create(); @@ -157,7 +168,7 @@ public class SnapshotManager { synchronized (SnapshotManager.this) { verify(); if (_lastSnapshotId > id) - throw new IllegalArgumentException("Snapshot id less than last? " + id + " vs " + _lastSnapshotId); + throw new IllegalSnapshotIdException("Snapshot id " + id + " is less than last snapshot id " + _lastSnapshotId); _lastSnapshotId = id; if (_lastAliveSnapshotId == -1) _lastAliveSnapshotId = id; From 36bc7eea4059e8a853f67bf6a630e6809af8151d Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sat, 22 Feb 2025 19:54:05 +0100 Subject: [PATCH 078/105] passing tests --- .../usatiuk/dhfs/objects/JObjectManager.java | 4 +- .../usatiuk/dhfs/objects/SnapshotManager.java | 84 ++++++++++++++----- .../LmdbObjectPersistentStore.java | 3 + .../transaction/TransactionFactoryImpl.java | 2 + .../files/DhfsFileServiceSimpleTestImpl.java | 10 ++- 5 files changed, 77 insertions(+), 26 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 690f9903..6e90f885 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -52,7 +52,9 @@ public class JObjectManager { verifyReady(); while (true) { try { - return transactionFactory.createTransaction(_txCounter.get()); + var tx = transactionFactory.createTransaction(_txCounter.get()); + Log.tracev("Created transaction with snapshotId={0}", tx.snapshot().id()); + return tx; } catch (SnapshotManager.IllegalSnapshotIdException ignored) { } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java index 38e61bf0..9e9dafe2 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java @@ -23,15 +23,37 @@ public class SnapshotManager { WritebackObjectPersistentStore delegateStore; private interface SnapshotEntry { + long whenToRemove(); + + SnapshotEntry withWhenToRemove(long whenToRemove); } private record SnapshotEntryRead(JDataVersionedWrapper data, long whenToRemove) implements SnapshotEntry { + @Override + public SnapshotEntry withWhenToRemove(long whenToRemove) { + return new SnapshotEntryRead(data, whenToRemove); + } } - private record SnapshotEntryObject(JDataVersionedWrapper data) implements SnapshotEntry { + private record SnapshotEntryReadEmpty(long whenToRemove) implements SnapshotEntry { + @Override + public SnapshotEntry withWhenToRemove(long whenToRemove) { + return new SnapshotEntryReadEmpty(whenToRemove); + } } - private record SnapshotEntryDeleted() implements SnapshotEntry { + private record SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) implements SnapshotEntry { + @Override + public SnapshotEntry withWhenToRemove(long whenToRemove) { + return new SnapshotEntryObject(data, whenToRemove); + } + } + + private record SnapshotEntryDeleted(long whenToRemove) implements SnapshotEntry { + @Override + public SnapshotEntry withWhenToRemove(long whenToRemove) { + return new SnapshotEntryDeleted(whenToRemove); + } } private record SnapshotKey(JObjectKey key, long version) implements Comparable { @@ -71,20 +93,25 @@ public class SnapshotManager { ) -> { hadBackward = true; yield Pair.of(new SnapshotKey(action.key(), _snapshotIds.peek()), - data.map(o -> new SnapshotEntryRead(o, id)).orElse(new SnapshotEntryDeleted())); + data.map(o -> new SnapshotEntryRead(o, id)).orElse(new SnapshotEntryReadEmpty(id))); } case WritebackObjectPersistentStore.VerboseReadResultPending( TxWriteback.PendingWriteEntry pending - ) -> switch (pending) { - case TxWriteback.PendingWrite write -> - Pair.of(new SnapshotKey(action.key(), write.bundleId()), new SnapshotEntryObject(write.data())); - case TxWriteback.PendingDelete delete -> - Pair.of(new SnapshotKey(action.key(), delete.bundleId()), new SnapshotEntryDeleted()); - default -> throw new IllegalStateException("Unexpected value: " + pending); - }; + ) -> { + assert pending.bundleId() < id; + yield switch (pending) { + case TxWriteback.PendingWrite write -> + Pair.of(new SnapshotKey(action.key(), write.bundleId()), new SnapshotEntryObject(write.data(), write.bundleId())); + case TxWriteback.PendingDelete delete -> + Pair.of(new SnapshotKey(action.key(), delete.bundleId()), new SnapshotEntryDeleted(delete.bundleId())); + default -> throw new IllegalStateException("Unexpected value: " + pending); + }; + } default -> throw new IllegalStateException("Unexpected value: " + current); }; + Log.tracev("Adding snapshot entry {0}", newSnapshotEntry); + _objects.put(newSnapshotEntry.getLeft(), newSnapshotEntry.getRight()); _snapshotBounds.put(newSnapshotEntry.getLeft().version(), newSnapshotEntry.getLeft()); } @@ -101,6 +128,7 @@ public class SnapshotManager { } private void unrefSnapshot(long id) { + Log.tracev("Unref snapshot {0}", id); synchronized (this) { verify(); var refCount = _snapshotRefCounts.merge(id, -1L, (a, b) -> a + b == 0 ? null : a + b); @@ -112,19 +140,23 @@ public class SnapshotManager { long curId = id; long nextId; do { + Log.tracev("Removing snapshot {0}", curId); _snapshotIds.poll(); _snapshotVersions.remove(curId); nextId = _snapshotIds.isEmpty() ? -1 : _snapshotIds.peek(); for (var key : _snapshotBounds.remove(curId)) { var entry = _objects.get(key); - if (entry instanceof SnapshotEntryRead read) { - if (curId != read.whenToRemove() - 1) { - assert nextId != -1; - if (nextId < read.whenToRemove()) { - _objects.put(new SnapshotKey(key.key(), nextId), entry); - } + if (nextId == -1) { + Log.tracev("Could not find place to place entry {0}, curId={1}, nextId={2}, whenToRemove={3}, snapshotIds={4}", + entry, curId, nextId, entry.whenToRemove(), _snapshotIds); + } else if (nextId < entry.whenToRemove()) { + if (!(entry instanceof SnapshotEntryRead || entry instanceof SnapshotEntryReadEmpty)) { + Log.errorv("Unexpected entry type: {0}, key: {1}, nextId: {2}, whenToRemove: {3}, snapshotIds: {4}", + entry, key, nextId, entry.whenToRemove(), _snapshotIds); + assert false; } + _objects.put(new SnapshotKey(key.key(), nextId), entry); } _objects.remove(key); } @@ -172,9 +204,10 @@ public class SnapshotManager { _lastSnapshotId = id; if (_lastAliveSnapshotId == -1) _lastAliveSnapshotId = id; - _snapshotIds.add(id); - _snapshotRefCounts.merge(id, 1L, Long::sum); - _snapshotVersions.put(id, 0L); + if (_snapshotRefCounts.merge(id, 1L, Long::sum) == 1) { + _snapshotIds.add(id); + _snapshotVersions.put(id, 0L); + } verify(); } var closedRef = _closed; @@ -201,15 +234,17 @@ public class SnapshotManager { var nextNextKey = _backing.hasNext() ? _backing.peekNextKey() : null; while (nextNextKey != null && nextNextKey.key.equals(next.getKey().key()) && nextNextKey.version() <= _id) { next = _backing.next(); - nextNextKey = _backing.peekNextKey(); + nextNextKey = _backing.hasNext() ? _backing.peekNextKey() : null; } - if (next.getKey().version() <= _id) { + if (next.getKey().version() <= _id && next.getValue().whenToRemove() > _id) { _next = switch (next.getValue()) { - case SnapshotEntryObject(JDataVersionedWrapper data) -> + case SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) -> Pair.of(next.getKey().key(), new TombstoneMergingKvIterator.Data<>(data)); case SnapshotEntryRead(JDataVersionedWrapper data, long whenToRemove) -> Pair.of(next.getKey().key(), new TombstoneMergingKvIterator.Data<>(data)); - case SnapshotEntryDeleted() -> + case SnapshotEntryReadEmpty(long whenToRemove) -> + Pair.of(next.getKey().key(), new TombstoneMergingKvIterator.Tombstone<>()); + case SnapshotEntryDeleted(long whenToRemove) -> Pair.of(next.getKey().key(), new TombstoneMergingKvIterator.Tombstone<>()); default -> throw new IllegalStateException("Unexpected value: " + next.getValue()); }; @@ -241,6 +276,7 @@ public class SnapshotManager { var ret = _next; _next = null; fillNext(); + Log.tracev("Read: {0}, next: {1}", ret, _next); return ret; } @@ -268,6 +304,7 @@ public class SnapshotManager { if (_next == null) return; synchronized (SnapshotManager.this) { curVersion = _snapshotVersions.get(_id); + Log.tracev("Refreshing snapshot iterator {0}, last refreshed {1}, current version {2}", _id, _lastRefreshed, curVersion); _backing.close(); _backing = new TombstoneMergingKvIterator<>(new SnapshotKvIterator(IteratorStart.GE, _next.getKey()), delegateStore.getIterator(IteratorStart.GE, _next.getKey())); var next = _backing.hasNext() ? _backing.next() : null; @@ -309,6 +346,7 @@ public class SnapshotManager { } var ret = _next; prepareNext(); + Log.tracev("Read: {0}, next: {1}", ret, _next); return ret; } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java index 9bb3b38a..19a5fd6c 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java @@ -139,6 +139,8 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { } } } + + Log.tracev("got: {0}, hasNext: {1}", got, _hasNext); } @Override @@ -163,6 +165,7 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { } var ret = Pair.of(JObjectKey.fromBytes(_cursor.key()), ByteString.copyFrom(_cursor.val())); _hasNext = _cursor.next(); + Log.tracev("Read: {0}, hasNext: {1}", ret, _hasNext); return ret; } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index 846b6567..85025fc6 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -4,6 +4,7 @@ import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.SnapshotManager; import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; import org.apache.commons.lang3.tuple.Pair; @@ -20,6 +21,7 @@ public class TransactionFactoryImpl implements TransactionFactory { @Override public TransactionPrivate createTransaction(long snapshotId) { + Log.tracev("Trying to create transaction with snapshotId={0}", snapshotId); return new TransactionImpl(snapshotId); } diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java index 6fa33055..aa2a0da0 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java @@ -10,6 +10,7 @@ import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.kleppmanntree.AlreadyExistsException; import jakarta.inject.Inject; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.RepeatedTest; import org.junit.jupiter.api.Test; import java.util.Map; @@ -94,13 +95,15 @@ public class DhfsFileServiceSimpleTestImpl { // } // } - @Test + @RepeatedTest(100) void dontMkdirTwiceTest() { Assertions.assertDoesNotThrow(() -> fileService.mkdir("/dontMkdirTwiceTest", 777)); Assertions.assertThrows(AlreadyExistsException.class, () -> fileService.mkdir("/dontMkdirTwiceTest", 777)); + fileService.unlink("/dontMkdirTwiceTest"); + Assertions.assertFalse(fileService.open("/dontMkdirTwiceTest").isPresent()); } - @Test + @RepeatedTest(100) void writeTest() { var ret = fileService.create("/writeTest", 777); Assertions.assertTrue(ret.isPresent()); @@ -117,6 +120,9 @@ public class DhfsFileServiceSimpleTestImpl { Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray()); fileService.write(uuid, 3, new byte[]{17, 18}); Assertions.assertArrayEquals(new byte[]{0, 1, 2, 17, 18, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray()); + + fileService.unlink("/writeTest"); + Assertions.assertFalse(fileService.open("/writeTest").isPresent()); } @Test From 92004a8163c33532377431f71b24ddf602018a98 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sat, 22 Feb 2025 20:25:57 +0100 Subject: [PATCH 079/105] snapshot leak fix --- .../usatiuk/dhfs/objects/SnapshotManager.java | 50 +++++++++++++------ 1 file changed, 34 insertions(+), 16 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java index 9e9dafe2..9a74ee6d 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java @@ -6,8 +6,6 @@ import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; -import org.apache.commons.collections4.MultiValuedMap; -import org.apache.commons.collections4.multimap.HashSetValuedHashMap; import org.apache.commons.lang3.mutable.MutableObject; import org.apache.commons.lang3.tuple.Pair; @@ -70,7 +68,7 @@ public class SnapshotManager { private final Queue _snapshotIds = new ArrayDeque<>(); private final ConcurrentSkipListMap _objects = new ConcurrentSkipListMap<>(); - private final MultiValuedMap _snapshotBounds = new HashSetValuedHashMap<>(); + private final TreeMap> _snapshotBounds = new TreeMap<>(); private final HashMap _snapshotRefCounts = new HashMap<>(); private final ConcurrentSkipListMap _snapshotVersions = new ConcurrentSkipListMap<>(); @@ -112,14 +110,19 @@ public class SnapshotManager { Log.tracev("Adding snapshot entry {0}", newSnapshotEntry); - _objects.put(newSnapshotEntry.getLeft(), newSnapshotEntry.getRight()); - _snapshotBounds.put(newSnapshotEntry.getLeft().version(), newSnapshotEntry.getLeft()); + var val = _objects.put(newSnapshotEntry.getLeft(), newSnapshotEntry.getRight()); +// assert val == null; + _snapshotBounds.merge(newSnapshotEntry.getLeft().version(), new ArrayDeque<>(List.of(newSnapshotEntry.getLeft())), + (a, b) -> { + a.addAll(b); + return a; + }); } - if (hadBackward) - for (var sid : _snapshotIds) { - _snapshotVersions.merge(sid, 1L, Long::sum); - } +// if (hadBackward) + for (var sid : _snapshotIds) { + _snapshotVersions.merge(sid, 1L, Long::sum); + } } verify(); @@ -145,21 +148,36 @@ public class SnapshotManager { _snapshotVersions.remove(curId); nextId = _snapshotIds.isEmpty() ? -1 : _snapshotIds.peek(); - for (var key : _snapshotBounds.remove(curId)) { + var keys = _snapshotBounds.headMap(curId, true); + + long finalCurId = curId; + long finalNextId = nextId; + keys.values().stream().flatMap(Collection::stream).forEach(key -> { var entry = _objects.get(key); - if (nextId == -1) { + if (entry == null) { +// Log.warnv("Entry not found for key {0}", key); + return; + } + if (finalNextId == -1) { Log.tracev("Could not find place to place entry {0}, curId={1}, nextId={2}, whenToRemove={3}, snapshotIds={4}", - entry, curId, nextId, entry.whenToRemove(), _snapshotIds); - } else if (nextId < entry.whenToRemove()) { + entry, finalCurId, finalNextId, entry.whenToRemove(), _snapshotIds); + } else if (finalNextId < entry.whenToRemove()) { if (!(entry instanceof SnapshotEntryRead || entry instanceof SnapshotEntryReadEmpty)) { Log.errorv("Unexpected entry type: {0}, key: {1}, nextId: {2}, whenToRemove: {3}, snapshotIds: {4}", - entry, key, nextId, entry.whenToRemove(), _snapshotIds); + entry, key, finalNextId, entry.whenToRemove(), _snapshotIds); assert false; } - _objects.put(new SnapshotKey(key.key(), nextId), entry); + _objects.put(new SnapshotKey(key.key(), finalNextId), entry); + _snapshotBounds.merge(finalNextId, new ArrayDeque<>(List.of(new SnapshotKey(key.key(), finalNextId))), + (a, b) -> { + a.addAll(b); + return a; + }); } _objects.remove(key); - } + }); + + keys.clear(); if (_snapshotIds.isEmpty()) { _lastAliveSnapshotId = -1; From fbc9336e8d3596d06d0cf7930afbb78d08d57639 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sat, 22 Feb 2025 20:51:17 +0100 Subject: [PATCH 080/105] caching fix --- .../dhfs/objects/MappingKvIterator.java | 37 +++++++++++++++++++ .../usatiuk/dhfs/objects/TxWritebackImpl.java | 2 +- .../WritebackObjectPersistentStore.java | 2 +- .../CachingObjectPersistentStore.java | 36 +++++++++++++++++- 4 files changed, 73 insertions(+), 4 deletions(-) create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MappingKvIterator.java diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MappingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MappingKvIterator.java new file mode 100644 index 00000000..d980d359 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MappingKvIterator.java @@ -0,0 +1,37 @@ +package com.usatiuk.dhfs.objects; + +import org.apache.commons.lang3.tuple.Pair; + +import java.util.function.Function; + +public class MappingKvIterator, V, V_T> implements CloseableKvIterator { + private final CloseableKvIterator _backing; + private final Function _transformer; + + public MappingKvIterator(CloseableKvIterator backing, Function transformer) { + _backing = backing; + _transformer = transformer; + } + + @Override + public K peekNextKey() { + return _backing.peekNextKey(); + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public boolean hasNext() { + return _backing.hasNext(); + } + + @Override + public Pair next() { + var got = _backing.next(); + return Pair.of(got.getKey(), _transformer.apply(got.getValue())); + } + +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java index 8d4a6077..3fd67c60 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java @@ -380,7 +380,7 @@ public class TxWritebackImpl implements TxWriteback { // Does not have to guarantee consistent view, snapshots are handled by upper layers @Override public CloseableKvIterator> getIterator(IteratorStart start, JObjectKey key) { - return new PredicateKvIterator<>( + return new MappingKvIterator<>( new NavigableMapKvIterator<>(_pendingWrites, start, key), e -> switch (e) { case PendingWrite p -> new TombstoneMergingKvIterator.Data<>(p.data()); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java index da690eff..a373750b 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java @@ -98,7 +98,7 @@ public class WritebackObjectPersistentStore { // Does not have to guarantee consistent view, snapshots are handled by upper layers public CloseableKvIterator> getIterator(IteratorStart start, JObjectKey key) { return new MergingKvIterator<>(txWriteback.getIterator(start, key), - new PredicateKvIterator<>(delegate.getIterator(start, key), TombstoneMergingKvIterator.Data::new)); + new MappingKvIterator<>(delegate.getIterator(start, key), TombstoneMergingKvIterator.Data::new)); } public CloseableKvIterator> getIterator(JObjectKey key) { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java index 915bb034..1a208be9 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java @@ -59,6 +59,7 @@ public class CachingObjectPersistentStore { } private void put(JObjectKey key, Optional obj) { +// Log.tracev("Adding {0} to cache: {1}", key, obj); synchronized (_cache) { int size = obj.map(o -> o.data().estimateSize()).orElse(0); @@ -103,11 +104,43 @@ public class CachingObjectPersistentStore { _curSize -= Optional.ofNullable(_cache.get(key)).map(CacheEntry::size).orElse(0L); _cache.remove(key); _sortedCache.remove(key); +// Log.tracev("Removing {0} from cache", key); } } delegate.commitTx(names); } + + private class CachingKvIterator implements CloseableKvIterator { + private final CloseableKvIterator _delegate; + + private CachingKvIterator(CloseableKvIterator delegate) { + _delegate = delegate; + } + + @Override + public JObjectKey peekNextKey() { + return _delegate.peekNextKey(); + } + + @Override + public void close() { + _delegate.close(); + } + + @Override + public boolean hasNext() { + return _delegate.hasNext(); + } + + @Override + public Pair next() { + var next = _delegate.next(); + put(next.getKey(), Optional.of(next.getValue())); + return next; + } + } + // Returns an iterator with a view of all commited objects // Does not have to guarantee consistent view, snapshots are handled by upper layers public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { @@ -115,8 +148,7 @@ public class CachingObjectPersistentStore { new PredicateKvIterator<>( new NavigableMapKvIterator<>(_sortedCache, start, key), e -> e.object().orElse(null) - ), - delegate.getIterator(start, key)); // TODO: Doesn't work + ), new CachingKvIterator(delegate.getIterator(start, key))); } public CloseableKvIterator getIterator(JObjectKey key) { From 0976a9367559512756d8e94e45fb0496a9df3600 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sat, 22 Feb 2025 20:58:06 +0100 Subject: [PATCH 081/105] add a log --- .../com/usatiuk/dhfs/objects/SnapshotManager.java | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java index 9a74ee6d..ec24b4b2 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java @@ -326,8 +326,18 @@ public class SnapshotManager { _backing.close(); _backing = new TombstoneMergingKvIterator<>(new SnapshotKvIterator(IteratorStart.GE, _next.getKey()), delegateStore.getIterator(IteratorStart.GE, _next.getKey())); var next = _backing.hasNext() ? _backing.next() : null; - assert next != null; - assert next.equals(_next); + boolean fail = false; + if (next == null) { + Log.errorv("Failed to refresh snapshot iterator, null {0}, last refreshed {1}," + + " current version {2}, current value {3}", _id, _lastRefreshed, curVersion, next); + fail = true; + } else if (!next.equals(_next)) { + Log.errorv("Failed to refresh snapshot iterator, mismatch {0}, last refreshed {1}," + + " current version {2}, current value {3}, read value {4}", _id, _lastRefreshed, curVersion, _next, next); + fail = true; + } + + assert !fail; _next = next; _lastRefreshed = curVersion; } From fa76828d0452f51d81e6666c72eb4435f4d9903c Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sat, 22 Feb 2025 21:28:47 +0100 Subject: [PATCH 082/105] more fixes --- .../java/com/usatiuk/dhfs/objects/SnapshotManager.java | 10 +++++++++- .../persistence/CachingObjectPersistentStore.java | 2 ++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java index ec24b4b2..0f503b78 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java @@ -254,7 +254,8 @@ public class SnapshotManager { next = _backing.next(); nextNextKey = _backing.hasNext() ? _backing.peekNextKey() : null; } - if (next.getKey().version() <= _id && next.getValue().whenToRemove() > _id) { + // next.getValue().whenToRemove() >=_id, read tx might have same snapshot id as some write tx + if (next.getKey().version() <= _id && next.getValue().whenToRemove() >= _id) { _next = switch (next.getValue()) { case SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) -> Pair.of(next.getKey().key(), new TombstoneMergingKvIterator.Data<>(data)); @@ -310,6 +311,8 @@ public class SnapshotManager { long curVersion = _snapshotVersions.get(_id); _backing = new TombstoneMergingKvIterator<>(new SnapshotKvIterator(start, key), delegateStore.getIterator(start, key)); _next = _backing.hasNext() ? _backing.next() : null; + if (_next != null) + assert _next.getValue().version() <= _id; _lastRefreshed = curVersion; } } @@ -347,6 +350,7 @@ public class SnapshotManager { doRefresh(); if (_backing.hasNext()) { _next = _backing.next(); + assert _next.getValue().version() <= _id; } else { _next = null; } @@ -354,6 +358,9 @@ public class SnapshotManager { @Override public JObjectKey peekNextKey() { + if (_next == null) { + throw new NoSuchElementException(); + } return _next.getKey(); } @@ -373,6 +380,7 @@ public class SnapshotManager { throw new NoSuchElementException("No more elements"); } var ret = _next; + assert ret.getValue().version() <= _id; prepareNext(); Log.tracev("Read: {0}, next: {1}", ret, _next); return ret; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java index 1a208be9..4126f743 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java @@ -143,6 +143,8 @@ public class CachingObjectPersistentStore { // Returns an iterator with a view of all commited objects // Does not have to guarantee consistent view, snapshots are handled by upper layers + // Warning: it has a nasty side effect of global caching, so in this case don't even call next on it, + // if some objects are still in writeback public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { return new MergingKvIterator<>( new PredicateKvIterator<>( From b12606f9f45ba9dcd335c5831e231cf35f608d08 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sat, 22 Feb 2025 22:16:03 +0100 Subject: [PATCH 083/105] more fixes 2 --- .../usatiuk/dhfs/objects/SnapshotManager.java | 56 ++++++++++++++----- 1 file changed, 42 insertions(+), 14 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java index 0f503b78..778c1306 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java @@ -85,12 +85,15 @@ public class SnapshotManager { boolean hadBackward = false; for (var action : writes) { var current = delegateStore.readObjectVerbose(action.key()); + // Add to snapshot the previous visible version of the replaced object + // I.e. should be visible to all transactions with id <= id + // and at least as its corresponding version Pair newSnapshotEntry = switch (current) { case WritebackObjectPersistentStore.VerboseReadResultPersisted( Optional data ) -> { hadBackward = true; - yield Pair.of(new SnapshotKey(action.key(), _snapshotIds.peek()), + yield Pair.of(new SnapshotKey(action.key(), Math.max(_snapshotIds.peek(), data.map(JDataVersionedWrapper::version).orElse(0L))), data.map(o -> new SnapshotEntryRead(o, id)).orElse(new SnapshotEntryReadEmpty(id))); } case WritebackObjectPersistentStore.VerboseReadResultPending( @@ -99,15 +102,22 @@ public class SnapshotManager { assert pending.bundleId() < id; yield switch (pending) { case TxWriteback.PendingWrite write -> - Pair.of(new SnapshotKey(action.key(), write.bundleId()), new SnapshotEntryObject(write.data(), write.bundleId())); + Pair.of(new SnapshotKey(action.key(), write.bundleId()), new SnapshotEntryObject(write.data(), id)); case TxWriteback.PendingDelete delete -> - Pair.of(new SnapshotKey(action.key(), delete.bundleId()), new SnapshotEntryDeleted(delete.bundleId())); + Pair.of(new SnapshotKey(action.key(), delete.bundleId()), new SnapshotEntryDeleted(id)); default -> throw new IllegalStateException("Unexpected value: " + pending); }; } default -> throw new IllegalStateException("Unexpected value: " + current); }; + if (newSnapshotEntry.getValue() instanceof SnapshotEntryRead re) { + assert re.data().version() <= newSnapshotEntry.getKey().version(); + } + if (newSnapshotEntry.getValue() instanceof SnapshotEntryObject re) { + assert re.data().version() <= newSnapshotEntry.getKey().version(); + } + Log.tracev("Adding snapshot entry {0}", newSnapshotEntry); var val = _objects.put(newSnapshotEntry.getLeft(), newSnapshotEntry.getRight()); @@ -147,11 +157,16 @@ public class SnapshotManager { _snapshotIds.poll(); _snapshotVersions.remove(curId); nextId = _snapshotIds.isEmpty() ? -1 : _snapshotIds.peek(); + while (nextId == curId) { + _snapshotIds.poll(); + nextId = _snapshotIds.isEmpty() ? -1 : _snapshotIds.peek(); + } var keys = _snapshotBounds.headMap(curId, true); long finalCurId = curId; long finalNextId = nextId; + ArrayList> toReAdd = new ArrayList<>(); keys.values().stream().flatMap(Collection::stream).forEach(key -> { var entry = _objects.get(key); if (entry == null) { @@ -162,21 +177,21 @@ public class SnapshotManager { Log.tracev("Could not find place to place entry {0}, curId={1}, nextId={2}, whenToRemove={3}, snapshotIds={4}", entry, finalCurId, finalNextId, entry.whenToRemove(), _snapshotIds); } else if (finalNextId < entry.whenToRemove()) { - if (!(entry instanceof SnapshotEntryRead || entry instanceof SnapshotEntryReadEmpty)) { - Log.errorv("Unexpected entry type: {0}, key: {1}, nextId: {2}, whenToRemove: {3}, snapshotIds: {4}", - entry, key, finalNextId, entry.whenToRemove(), _snapshotIds); - assert false; - } _objects.put(new SnapshotKey(key.key(), finalNextId), entry); - _snapshotBounds.merge(finalNextId, new ArrayDeque<>(List.of(new SnapshotKey(key.key(), finalNextId))), - (a, b) -> { - a.addAll(b); - return a; - }); + assert finalNextId > finalCurId; + toReAdd.add(Pair.of(finalNextId, new SnapshotKey(key.key(), finalNextId))); } _objects.remove(key); }); + toReAdd.forEach(p -> { + _snapshotBounds.merge(p.getLeft(), new ArrayDeque<>(List.of(p.getRight())), + (a, b) -> { + a.addAll(b); + return a; + }); + }); + keys.clear(); if (_snapshotIds.isEmpty()) { @@ -255,7 +270,7 @@ public class SnapshotManager { nextNextKey = _backing.hasNext() ? _backing.peekNextKey() : null; } // next.getValue().whenToRemove() >=_id, read tx might have same snapshot id as some write tx - if (next.getKey().version() <= _id && next.getValue().whenToRemove() >= _id) { + if (next.getKey().version() <= _id && next.getValue().whenToRemove() > _id) { _next = switch (next.getValue()) { case SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) -> Pair.of(next.getKey().key(), new TombstoneMergingKvIterator.Data<>(data)); @@ -268,6 +283,13 @@ public class SnapshotManager { default -> throw new IllegalStateException("Unexpected value: " + next.getValue()); }; } + if (_next != null) { + if (_next.getValue() instanceof TombstoneMergingKvIterator.Data( + JDataVersionedWrapper value + )) { + assert value.version() <= _id; + } + } } } @@ -293,6 +315,12 @@ public class SnapshotManager { if (_next == null) throw new NoSuchElementException("No more elements"); var ret = _next; + if (ret.getValue() instanceof TombstoneMergingKvIterator.Data( + JDataVersionedWrapper value + )) { + assert value.version() <= _id; + } + _next = null; fillNext(); Log.tracev("Read: {0}, next: {1}", ret, _next); From cde5e44e77f1ef27ff6b433742085621d274b45e Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sat, 22 Feb 2025 22:39:04 +0100 Subject: [PATCH 084/105] more fixes 3 --- .../com/usatiuk/dhfs/objects/JObjectManager.java | 2 +- .../com/usatiuk/dhfs/objects/SnapshotManager.java | 15 +++++++++++++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 6e90f885..af500e3a 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -139,7 +139,7 @@ public class JObjectManager { Log.trace("Committing transaction start"); // FIXME: Better way? addDependency.accept(JDataDummy.TX_ID_OBJ_NAME); - tx.put(JDataDummy.getInstance()); + writes.put(JDataDummy.TX_ID_OBJ_NAME, new TxRecord.TxObjectRecordWrite<>(JDataDummy.getInstance())); } finally { readSet = tx.reads(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java index 778c1306..af87f457 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java @@ -14,6 +14,7 @@ import java.lang.ref.Cleaner; import java.util.*; import java.util.concurrent.ConcurrentSkipListMap; import java.util.function.Consumer; +import java.util.function.Function; @ApplicationScoped public class SnapshotManager { @@ -334,10 +335,19 @@ public class SnapshotManager { private long _lastRefreshed = -1L; private Pair _next; + private final Function, TombstoneMergingKvIterator.DataType> _downstreamTombstoneMapper + = d -> switch (d) { + case TombstoneMergingKvIterator.Tombstone() -> d; + case TombstoneMergingKvIterator.Data data -> + data.value().version() <= _id ? data : new TombstoneMergingKvIterator.Tombstone<>(); + default -> throw new IllegalStateException("Unexpected value: " + d); + }; + public AutoRefreshingSnapshotKvIterator(IteratorStart start, JObjectKey key) { synchronized (SnapshotManager.this) { long curVersion = _snapshotVersions.get(_id); - _backing = new TombstoneMergingKvIterator<>(new SnapshotKvIterator(start, key), delegateStore.getIterator(start, key)); + _backing = new TombstoneMergingKvIterator<>(new SnapshotKvIterator(start, key), + new MappingKvIterator<>(delegateStore.getIterator(start, key), _downstreamTombstoneMapper)); _next = _backing.hasNext() ? _backing.next() : null; if (_next != null) assert _next.getValue().version() <= _id; @@ -355,7 +365,8 @@ public class SnapshotManager { curVersion = _snapshotVersions.get(_id); Log.tracev("Refreshing snapshot iterator {0}, last refreshed {1}, current version {2}", _id, _lastRefreshed, curVersion); _backing.close(); - _backing = new TombstoneMergingKvIterator<>(new SnapshotKvIterator(IteratorStart.GE, _next.getKey()), delegateStore.getIterator(IteratorStart.GE, _next.getKey())); + _backing = new TombstoneMergingKvIterator<>(new SnapshotKvIterator(IteratorStart.GE, _next.getKey()), + new MappingKvIterator<>(delegateStore.getIterator(IteratorStart.GE, _next.getKey()), _downstreamTombstoneMapper)); var next = _backing.hasNext() ? _backing.next() : null; boolean fail = false; if (next == null) { From c7104e772e64a73aade6191f25bc54917c6d0231 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 23 Feb 2025 10:11:34 +0100 Subject: [PATCH 085/105] slight cleanup --- .../usatiuk/dhfs/objects/SnapshotManager.java | 50 +++---------------- 1 file changed, 8 insertions(+), 42 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java index af87f457..48ca0487 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java @@ -13,6 +13,7 @@ import javax.annotation.Nonnull; import java.lang.ref.Cleaner; import java.util.*; import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; import java.util.function.Function; @@ -23,36 +24,12 @@ public class SnapshotManager { private interface SnapshotEntry { long whenToRemove(); - - SnapshotEntry withWhenToRemove(long whenToRemove); - } - - private record SnapshotEntryRead(JDataVersionedWrapper data, long whenToRemove) implements SnapshotEntry { - @Override - public SnapshotEntry withWhenToRemove(long whenToRemove) { - return new SnapshotEntryRead(data, whenToRemove); - } - } - - private record SnapshotEntryReadEmpty(long whenToRemove) implements SnapshotEntry { - @Override - public SnapshotEntry withWhenToRemove(long whenToRemove) { - return new SnapshotEntryReadEmpty(whenToRemove); - } } private record SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) implements SnapshotEntry { - @Override - public SnapshotEntry withWhenToRemove(long whenToRemove) { - return new SnapshotEntryObject(data, whenToRemove); - } } private record SnapshotEntryDeleted(long whenToRemove) implements SnapshotEntry { - @Override - public SnapshotEntry withWhenToRemove(long whenToRemove) { - return new SnapshotEntryDeleted(whenToRemove); - } } private record SnapshotKey(JObjectKey key, long version) implements Comparable { @@ -66,12 +43,12 @@ public class SnapshotManager { private long _lastSnapshotId = 0; private long _lastAliveSnapshotId = -1; + private final AtomicLong _snapshotVersion = new AtomicLong(0); private final Queue _snapshotIds = new ArrayDeque<>(); private final ConcurrentSkipListMap _objects = new ConcurrentSkipListMap<>(); private final TreeMap> _snapshotBounds = new TreeMap<>(); private final HashMap _snapshotRefCounts = new HashMap<>(); - private final ConcurrentSkipListMap _snapshotVersions = new ConcurrentSkipListMap<>(); private void verify() { assert _snapshotIds.isEmpty() == (_lastAliveSnapshotId == -1); @@ -83,7 +60,6 @@ public class SnapshotManager { assert id > _lastSnapshotId; if (!_snapshotIds.isEmpty()) { verify(); - boolean hadBackward = false; for (var action : writes) { var current = delegateStore.readObjectVerbose(action.key()); // Add to snapshot the previous visible version of the replaced object @@ -93,9 +69,8 @@ public class SnapshotManager { case WritebackObjectPersistentStore.VerboseReadResultPersisted( Optional data ) -> { - hadBackward = true; yield Pair.of(new SnapshotKey(action.key(), Math.max(_snapshotIds.peek(), data.map(JDataVersionedWrapper::version).orElse(0L))), - data.map(o -> new SnapshotEntryRead(o, id)).orElse(new SnapshotEntryReadEmpty(id))); + data.map(o -> new SnapshotEntryObject(o, id)).orElse(new SnapshotEntryDeleted(id))); } case WritebackObjectPersistentStore.VerboseReadResultPending( TxWriteback.PendingWriteEntry pending @@ -112,7 +87,7 @@ public class SnapshotManager { default -> throw new IllegalStateException("Unexpected value: " + current); }; - if (newSnapshotEntry.getValue() instanceof SnapshotEntryRead re) { + if (newSnapshotEntry.getValue() instanceof SnapshotEntryObject re) { assert re.data().version() <= newSnapshotEntry.getKey().version(); } if (newSnapshotEntry.getValue() instanceof SnapshotEntryObject re) { @@ -130,10 +105,7 @@ public class SnapshotManager { }); } -// if (hadBackward) - for (var sid : _snapshotIds) { - _snapshotVersions.merge(sid, 1L, Long::sum); - } + _snapshotVersion.incrementAndGet(); } verify(); @@ -156,7 +128,6 @@ public class SnapshotManager { do { Log.tracev("Removing snapshot {0}", curId); _snapshotIds.poll(); - _snapshotVersions.remove(curId); nextId = _snapshotIds.isEmpty() ? -1 : _snapshotIds.peek(); while (nextId == curId) { _snapshotIds.poll(); @@ -240,7 +211,6 @@ public class SnapshotManager { _lastAliveSnapshotId = id; if (_snapshotRefCounts.merge(id, 1L, Long::sum) == 1) { _snapshotIds.add(id); - _snapshotVersions.put(id, 0L); } verify(); } @@ -275,10 +245,6 @@ public class SnapshotManager { _next = switch (next.getValue()) { case SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) -> Pair.of(next.getKey().key(), new TombstoneMergingKvIterator.Data<>(data)); - case SnapshotEntryRead(JDataVersionedWrapper data, long whenToRemove) -> - Pair.of(next.getKey().key(), new TombstoneMergingKvIterator.Data<>(data)); - case SnapshotEntryReadEmpty(long whenToRemove) -> - Pair.of(next.getKey().key(), new TombstoneMergingKvIterator.Tombstone<>()); case SnapshotEntryDeleted(long whenToRemove) -> Pair.of(next.getKey().key(), new TombstoneMergingKvIterator.Tombstone<>()); default -> throw new IllegalStateException("Unexpected value: " + next.getValue()); @@ -345,7 +311,7 @@ public class SnapshotManager { public AutoRefreshingSnapshotKvIterator(IteratorStart start, JObjectKey key) { synchronized (SnapshotManager.this) { - long curVersion = _snapshotVersions.get(_id); + long curVersion = _snapshotVersion.get(); _backing = new TombstoneMergingKvIterator<>(new SnapshotKvIterator(start, key), new MappingKvIterator<>(delegateStore.getIterator(start, key), _downstreamTombstoneMapper)); _next = _backing.hasNext() ? _backing.next() : null; @@ -356,13 +322,13 @@ public class SnapshotManager { } private void doRefresh() { - long curVersion = _snapshotVersions.get(_id); + long curVersion = _snapshotVersion.get(); if (curVersion == _lastRefreshed) { return; } if (_next == null) return; synchronized (SnapshotManager.this) { - curVersion = _snapshotVersions.get(_id); + curVersion = _snapshotVersion.get(); Log.tracev("Refreshing snapshot iterator {0}, last refreshed {1}, current version {2}", _id, _lastRefreshed, curVersion); _backing.close(); _backing = new TombstoneMergingKvIterator<>(new SnapshotKvIterator(IteratorStart.GE, _next.getKey()), From 70db929051595f8e4fe9fe4baa7932bbdee493eb Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 23 Feb 2025 10:26:47 +0100 Subject: [PATCH 086/105] add check in cache that we don't put stale info there --- .../com/usatiuk/dhfs/objects/SnapshotManager.java | 10 ++++++---- .../persistence/CachingObjectPersistentStore.java | 12 ++++++++++++ 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java index 48ca0487..98cd4b9d 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java @@ -296,6 +296,9 @@ public class SnapshotManager { } + // In case something was added to the snapshot, it is not guaranteed that the iterators will see it, + // so refresh them manually. Otherwise, it could be possible that something from the writeback cache will + // be served instead. public class AutoRefreshingSnapshotKvIterator implements CloseableKvIterator { private CloseableKvIterator _backing; private long _lastRefreshed = -1L; @@ -334,23 +337,22 @@ public class SnapshotManager { _backing = new TombstoneMergingKvIterator<>(new SnapshotKvIterator(IteratorStart.GE, _next.getKey()), new MappingKvIterator<>(delegateStore.getIterator(IteratorStart.GE, _next.getKey()), _downstreamTombstoneMapper)); var next = _backing.hasNext() ? _backing.next() : null; - boolean fail = false; if (next == null) { Log.errorv("Failed to refresh snapshot iterator, null {0}, last refreshed {1}," + " current version {2}, current value {3}", _id, _lastRefreshed, curVersion, next); - fail = true; + assert false; } else if (!next.equals(_next)) { Log.errorv("Failed to refresh snapshot iterator, mismatch {0}, last refreshed {1}," + " current version {2}, current value {3}, read value {4}", _id, _lastRefreshed, curVersion, _next, next); - fail = true; + assert false; } - assert !fail; _next = next; _lastRefreshed = curVersion; } } + // _next should always be valid, so it's ok to do the refresh "lazily" private void prepareNext() { doRefresh(); if (_backing.hasNext()) { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java index 4126f743..34f900f6 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java @@ -11,6 +11,7 @@ import org.eclipse.microprofile.config.inject.ConfigProperty; import javax.annotation.Nonnull; import java.util.Collection; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.Optional; import java.util.concurrent.ConcurrentSkipListMap; @@ -22,6 +23,7 @@ import java.util.stream.Stream; public class CachingObjectPersistentStore { private final LinkedHashMap _cache = new LinkedHashMap<>(8, 0.75f, true); private final ConcurrentSkipListMap _sortedCache = new ConcurrentSkipListMap<>(); + private final HashSet _pendingWrites = new HashSet<>(); private final DataLocker _locker = new DataLocker(); @Inject SerializingObjectPersistentStore delegate; @@ -61,6 +63,7 @@ public class CachingObjectPersistentStore { private void put(JObjectKey key, Optional obj) { // Log.tracev("Adding {0} to cache: {1}", key, obj); synchronized (_cache) { + assert !_pendingWrites.contains(key); int size = obj.map(o -> o.data().estimateSize()).orElse(0); _curSize += size; @@ -105,9 +108,18 @@ public class CachingObjectPersistentStore { _cache.remove(key); _sortedCache.remove(key); // Log.tracev("Removing {0} from cache", key); + var added = _pendingWrites.add(key); + assert added; } } delegate.commitTx(names); + synchronized (_cache) { + for (var key : Stream.concat(names.written().stream().map(Pair::getLeft), + names.deleted().stream()).toList()) { + var removed = _pendingWrites.remove(key); + assert removed; + } + } } From 922bdf226c8a45a4d1fd45ca5d786e1d5474492d Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 23 Feb 2025 10:39:28 +0100 Subject: [PATCH 087/105] separate SelfRefreshingKvIterator --- .../objects/SelfRefreshingKvIterator.java | 100 ++++++++++++++++++ .../usatiuk/dhfs/objects/SnapshotManager.java | 89 +++------------- 2 files changed, 115 insertions(+), 74 deletions(-) create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java new file mode 100644 index 00000000..400ce735 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java @@ -0,0 +1,100 @@ +package com.usatiuk.dhfs.objects; + +import io.quarkus.logging.Log; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.NoSuchElementException; +import java.util.function.Supplier; + +public class SelfRefreshingKvIterator, V> implements CloseableKvIterator { + private CloseableKvIterator _backing; + private long _lastRefreshed = -1L; + private Pair _next; + private final Object _synchronizer; + private final Supplier> _iteratorSupplier; + private final Supplier _versionSupplier; + + public SelfRefreshingKvIterator(Supplier> iteratorSupplier, Supplier versionSupplier, Object synchronizer) { + _iteratorSupplier = iteratorSupplier; + _versionSupplier = versionSupplier; + _synchronizer = synchronizer; + + synchronized (_synchronizer) { + long curVersion = _versionSupplier.get(); + _backing = _iteratorSupplier.get(); + _next = _backing.hasNext() ? _backing.next() : null; +// if (_next != null) +// assert _next.getValue().version() <= _id; + _lastRefreshed = curVersion; + } + } + + private void doRefresh() { + long curVersion = _versionSupplier.get(); + if (curVersion == _lastRefreshed) { + return; + } + if (_next == null) return; + synchronized (_synchronizer) { + curVersion = _versionSupplier.get(); + Log.tracev("Refreshing iterator last refreshed {0}, current version {1}", _lastRefreshed, curVersion); + _backing.close(); + _backing = _iteratorSupplier.get(); + var next = _backing.hasNext() ? _backing.next() : null; + if (next == null) { + Log.errorv("Failed to refresh iterator, null last refreshed {0}," + + " current version {1}, current value {2}", _lastRefreshed, curVersion, next); + assert false; + } else if (!next.equals(_next)) { + Log.errorv("Failed to refresh iterator, mismatch last refreshed {0}," + + " current version {1}, current value {2}, read value {3}", _lastRefreshed, curVersion, _next, next); + assert false; + } + + _next = next; + _lastRefreshed = curVersion; + } + } + + // _next should always be valid, so it's ok to do the refresh "lazily" + private void prepareNext() { + doRefresh(); + if (_backing.hasNext()) { + _next = _backing.next(); +// assert _next.getValue().version() <= _id; + } else { + _next = null; + } + } + + @Override + public K peekNextKey() { + if (_next == null) { + throw new NoSuchElementException(); + } + return _next.getKey(); + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public boolean hasNext() { + return _next != null; + } + + @Override + public Pair next() { + if (_next == null) { + throw new NoSuchElementException("No more elements"); + } + var ret = _next; +// assert ret.getValue().version() <= _id; + prepareNext(); + Log.tracev("Read: {0}, next: {1}", ret, _next); + return ret; + } + +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java index 98cd4b9d..d0ef2b65 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java @@ -15,7 +15,6 @@ import java.util.*; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; -import java.util.function.Function; @ApplicationScoped public class SnapshotManager { @@ -299,76 +298,16 @@ public class SnapshotManager { // In case something was added to the snapshot, it is not guaranteed that the iterators will see it, // so refresh them manually. Otherwise, it could be possible that something from the writeback cache will // be served instead. - public class AutoRefreshingSnapshotKvIterator implements CloseableKvIterator { - private CloseableKvIterator _backing; - private long _lastRefreshed = -1L; - private Pair _next; + public class CheckingSnapshotKvIterator implements CloseableKvIterator { + private final CloseableKvIterator _backing; - private final Function, TombstoneMergingKvIterator.DataType> _downstreamTombstoneMapper - = d -> switch (d) { - case TombstoneMergingKvIterator.Tombstone() -> d; - case TombstoneMergingKvIterator.Data data -> - data.value().version() <= _id ? data : new TombstoneMergingKvIterator.Tombstone<>(); - default -> throw new IllegalStateException("Unexpected value: " + d); - }; - - public AutoRefreshingSnapshotKvIterator(IteratorStart start, JObjectKey key) { - synchronized (SnapshotManager.this) { - long curVersion = _snapshotVersion.get(); - _backing = new TombstoneMergingKvIterator<>(new SnapshotKvIterator(start, key), - new MappingKvIterator<>(delegateStore.getIterator(start, key), _downstreamTombstoneMapper)); - _next = _backing.hasNext() ? _backing.next() : null; - if (_next != null) - assert _next.getValue().version() <= _id; - _lastRefreshed = curVersion; - } - } - - private void doRefresh() { - long curVersion = _snapshotVersion.get(); - if (curVersion == _lastRefreshed) { - return; - } - if (_next == null) return; - synchronized (SnapshotManager.this) { - curVersion = _snapshotVersion.get(); - Log.tracev("Refreshing snapshot iterator {0}, last refreshed {1}, current version {2}", _id, _lastRefreshed, curVersion); - _backing.close(); - _backing = new TombstoneMergingKvIterator<>(new SnapshotKvIterator(IteratorStart.GE, _next.getKey()), - new MappingKvIterator<>(delegateStore.getIterator(IteratorStart.GE, _next.getKey()), _downstreamTombstoneMapper)); - var next = _backing.hasNext() ? _backing.next() : null; - if (next == null) { - Log.errorv("Failed to refresh snapshot iterator, null {0}, last refreshed {1}," + - " current version {2}, current value {3}", _id, _lastRefreshed, curVersion, next); - assert false; - } else if (!next.equals(_next)) { - Log.errorv("Failed to refresh snapshot iterator, mismatch {0}, last refreshed {1}," + - " current version {2}, current value {3}, read value {4}", _id, _lastRefreshed, curVersion, _next, next); - assert false; - } - - _next = next; - _lastRefreshed = curVersion; - } - } - - // _next should always be valid, so it's ok to do the refresh "lazily" - private void prepareNext() { - doRefresh(); - if (_backing.hasNext()) { - _next = _backing.next(); - assert _next.getValue().version() <= _id; - } else { - _next = null; - } + public CheckingSnapshotKvIterator(CloseableKvIterator backing) { + _backing = backing; } @Override public JObjectKey peekNextKey() { - if (_next == null) { - throw new NoSuchElementException(); - } - return _next.getKey(); + return _backing.peekNextKey(); } @Override @@ -378,24 +317,26 @@ public class SnapshotManager { @Override public boolean hasNext() { - return _next != null; + return _backing.hasNext(); } @Override public Pair next() { - if (_next == null) { - throw new NoSuchElementException("No more elements"); - } - var ret = _next; + var ret = _backing.next(); assert ret.getValue().version() <= _id; - prepareNext(); - Log.tracev("Read: {0}, next: {1}", ret, _next); return ret; } } public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { - return new AutoRefreshingSnapshotKvIterator(start, key); + return new CheckingSnapshotKvIterator(new SelfRefreshingKvIterator<>(() -> + new TombstoneMergingKvIterator<>(new SnapshotKvIterator(start, key), + new MappingKvIterator<>(delegateStore.getIterator(start, key), d -> switch (d) { + case TombstoneMergingKvIterator.Tombstone() -> d; + case TombstoneMergingKvIterator.Data data -> + data.value().version() <= _id ? data : new TombstoneMergingKvIterator.Tombstone<>(); + default -> throw new IllegalStateException("Unexpected value: " + d); + })), _snapshotVersion::get, SnapshotManager.this)); } public CloseableKvIterator getIterator(JObjectKey key) { From 6924c70cd4fe965c05daad194b7b2c8765de1433 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 23 Feb 2025 12:12:47 +0100 Subject: [PATCH 088/105] more iterator fixes --- .../objects/InvalidIteratorException.java | 11 +++ .../dhfs/objects/InvalidatableKvIterator.java | 66 ++++++++++++++++ .../objects/SelfRefreshingKvIterator.java | 78 +++++++++++-------- .../usatiuk/dhfs/objects/SnapshotManager.java | 43 ++++++---- .../usatiuk/dhfs/objects/TxWritebackImpl.java | 59 +++++++++----- .../WritebackObjectPersistentStore.java | 16 +++- .../CachingObjectPersistentStore.java | 1 + 7 files changed, 205 insertions(+), 69 deletions(-) create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InvalidIteratorException.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InvalidatableKvIterator.java diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InvalidIteratorException.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InvalidIteratorException.java new file mode 100644 index 00000000..fa2bb988 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InvalidIteratorException.java @@ -0,0 +1,11 @@ +package com.usatiuk.dhfs.objects; + +public class InvalidIteratorException extends RuntimeException { + public InvalidIteratorException() { + super(); + } + + public InvalidIteratorException(String message) { + super(message); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InvalidatableKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InvalidatableKvIterator.java new file mode 100644 index 00000000..712499c3 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InvalidatableKvIterator.java @@ -0,0 +1,66 @@ +package com.usatiuk.dhfs.objects; + +import io.quarkus.logging.Log; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.concurrent.locks.Lock; +import java.util.function.Supplier; + +public class InvalidatableKvIterator, V> implements CloseableKvIterator { + private final CloseableKvIterator _backing; + private final Supplier _versionSupplier; + private final long _version; + private final Lock _lock; + + public InvalidatableKvIterator(CloseableKvIterator backing, Supplier versionSupplier, Lock lock) { + _backing = backing; + _versionSupplier = versionSupplier; + _lock = lock; + _version = _versionSupplier.get(); + } + + private void checkVersion() { + if (_versionSupplier.get() != _version) { + Log.errorv("Version mismatch: {0} != {1}", _versionSupplier.get(), _version); + throw new InvalidIteratorException(); + } + } + + @Override + public K peekNextKey() { + _lock.lock(); + try { + checkVersion(); + return _backing.peekNextKey(); + } finally { + _lock.unlock(); + } + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public boolean hasNext() { + _lock.lock(); + try { + checkVersion(); + return _backing.hasNext(); + } finally { + _lock.unlock(); + } + } + + @Override + public Pair next() { + _lock.lock(); + try { + checkVersion(); + return _backing.next(); + } finally { + _lock.unlock(); + } + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java index 400ce735..4b4f6df2 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java @@ -1,69 +1,84 @@ package com.usatiuk.dhfs.objects; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; import io.quarkus.logging.Log; import org.apache.commons.lang3.tuple.Pair; import java.util.NoSuchElementException; +import java.util.concurrent.locks.Lock; +import java.util.function.Function; import java.util.function.Supplier; +// Also checks that the next provided item is always consistent after a refresh public class SelfRefreshingKvIterator, V> implements CloseableKvIterator { private CloseableKvIterator _backing; - private long _lastRefreshed = -1L; - private Pair _next; - private final Object _synchronizer; - private final Supplier> _iteratorSupplier; + private long _curVersion = -1L; + private final Lock _lock; + private final Function, CloseableKvIterator> _iteratorSupplier; private final Supplier _versionSupplier; + private Pair _next; - public SelfRefreshingKvIterator(Supplier> iteratorSupplier, Supplier versionSupplier, Object synchronizer) { + public SelfRefreshingKvIterator(Function, CloseableKvIterator> iteratorSupplier, Supplier versionSupplier, Lock lock, + IteratorStart start, K key) { _iteratorSupplier = iteratorSupplier; _versionSupplier = versionSupplier; - _synchronizer = synchronizer; + _lock = lock; - synchronized (_synchronizer) { + _lock.lock(); + try { long curVersion = _versionSupplier.get(); - _backing = _iteratorSupplier.get(); + _backing = _iteratorSupplier.apply(Pair.of(start, key)); _next = _backing.hasNext() ? _backing.next() : null; -// if (_next != null) -// assert _next.getValue().version() <= _id; - _lastRefreshed = curVersion; + _curVersion = curVersion; + } finally { + _lock.unlock(); } } - private void doRefresh() { - long curVersion = _versionSupplier.get(); - if (curVersion == _lastRefreshed) { - return; - } - if (_next == null) return; - synchronized (_synchronizer) { - curVersion = _versionSupplier.get(); - Log.tracev("Refreshing iterator last refreshed {0}, current version {1}", _lastRefreshed, curVersion); - _backing.close(); - _backing = _iteratorSupplier.get(); + private void maybeRefresh() { + _lock.lock(); + CloseableKvIterator oldBacking = null; + try { + if (_versionSupplier.get() == _curVersion) { + return; + } + long newVersion = _versionSupplier.get(); + Log.tracev("Refreshing iterator last refreshed {0}, current version {1}", _curVersion, newVersion); + oldBacking = _backing; + _backing = _iteratorSupplier.apply(Pair.of(IteratorStart.GE, _next.getKey())); var next = _backing.hasNext() ? _backing.next() : null; if (next == null) { Log.errorv("Failed to refresh iterator, null last refreshed {0}," + - " current version {1}, current value {2}", _lastRefreshed, curVersion, next); + " current version {1}, current value {2}", _curVersion, newVersion, next); assert false; } else if (!next.equals(_next)) { Log.errorv("Failed to refresh iterator, mismatch last refreshed {0}," + - " current version {1}, current value {2}, read value {3}", _lastRefreshed, curVersion, _next, next); + " current version {1}, current value {2}, read value {3}", _curVersion, newVersion, _next, next); assert false; } _next = next; - _lastRefreshed = curVersion; + _curVersion = newVersion; + } finally { + _lock.unlock(); + if (oldBacking != null) { + oldBacking.close(); + } } } // _next should always be valid, so it's ok to do the refresh "lazily" private void prepareNext() { - doRefresh(); - if (_backing.hasNext()) { - _next = _backing.next(); -// assert _next.getValue().version() <= _id; - } else { - _next = null; + _lock.lock(); + try { + maybeRefresh(); + if (_backing.hasNext()) { + _next = _backing.next(); + } else { + _next = null; + } + } finally { + _lock.unlock(); } } @@ -91,7 +106,6 @@ public class SelfRefreshingKvIterator, V> implements Clo throw new NoSuchElementException("No more elements"); } var ret = _next; -// assert ret.getValue().version() <= _id; prepareNext(); Log.tracev("Read: {0}, next: {1}", ret, _next); return ret; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java index d0ef2b65..16bd6146 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java @@ -14,6 +14,7 @@ import java.lang.ref.Cleaner; import java.util.*; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Consumer; @ApplicationScoped @@ -21,6 +22,8 @@ public class SnapshotManager { @Inject WritebackObjectPersistentStore delegateStore; + private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock(); + private interface SnapshotEntry { long whenToRemove(); } @@ -55,7 +58,8 @@ public class SnapshotManager { } Consumer commitTx(Collection> writes, long id) { - synchronized (this) { + _lock.writeLock().lock(); + try { assert id > _lastSnapshotId; if (!_snapshotIds.isEmpty()) { verify(); @@ -67,10 +71,9 @@ public class SnapshotManager { Pair newSnapshotEntry = switch (current) { case WritebackObjectPersistentStore.VerboseReadResultPersisted( Optional data - ) -> { - yield Pair.of(new SnapshotKey(action.key(), Math.max(_snapshotIds.peek(), data.map(JDataVersionedWrapper::version).orElse(0L))), - data.map(o -> new SnapshotEntryObject(o, id)).orElse(new SnapshotEntryDeleted(id))); - } + ) -> + Pair.of(new SnapshotKey(action.key(), Math.max(_snapshotIds.peek(), data.map(JDataVersionedWrapper::version).orElse(0L))), + data.map(o -> new SnapshotEntryObject(o, id)).orElse(new SnapshotEntryDeleted(id))); case WritebackObjectPersistentStore.VerboseReadResultPending( TxWriteback.PendingWriteEntry pending ) -> { @@ -108,13 +111,19 @@ public class SnapshotManager { } verify(); + // Commit under lock, iterators will see new version after the lock is released and writeback + // cache is updated + // TODO: Maybe writeback iterator being invalidated wouldn't be a problem? return delegateStore.commitTx(writes, id); + } finally { + _lock.writeLock().unlock(); } } private void unrefSnapshot(long id) { Log.tracev("Unref snapshot {0}", id); - synchronized (this) { + _lock.writeLock().lock(); + try { verify(); var refCount = _snapshotRefCounts.merge(id, -1L, (a, b) -> a + b == 0 ? null : a + b); if (!(refCount == null && id == _lastAliveSnapshotId)) { @@ -176,6 +185,8 @@ public class SnapshotManager { curCount = _snapshotRefCounts.getOrDefault(curId, 0L); } while (curCount == 0); verify(); + } finally { + _lock.writeLock().unlock(); } } @@ -201,7 +212,8 @@ public class SnapshotManager { private Snapshot(long id) { _id = id; - synchronized (SnapshotManager.this) { + _lock.writeLock().lock(); + try { verify(); if (_lastSnapshotId > id) throw new IllegalSnapshotIdException("Snapshot id " + id + " is less than last snapshot id " + _lastSnapshotId); @@ -212,6 +224,8 @@ public class SnapshotManager { _snapshotIds.add(id); } verify(); + } finally { + _lock.writeLock().unlock(); } var closedRef = _closed; var idRef = _id; @@ -295,9 +309,6 @@ public class SnapshotManager { } - // In case something was added to the snapshot, it is not guaranteed that the iterators will see it, - // so refresh them manually. Otherwise, it could be possible that something from the writeback cache will - // be served instead. public class CheckingSnapshotKvIterator implements CloseableKvIterator { private final CloseableKvIterator _backing; @@ -329,14 +340,18 @@ public class SnapshotManager { } public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { - return new CheckingSnapshotKvIterator(new SelfRefreshingKvIterator<>(() -> - new TombstoneMergingKvIterator<>(new SnapshotKvIterator(start, key), - new MappingKvIterator<>(delegateStore.getIterator(start, key), d -> switch (d) { + // In case something was added to the snapshot, it is not guaranteed that the iterators will see it, + // so refresh them manually. Otherwise, it could be possible that something from the writeback cache will + // be served instead. Note that refreshing the iterator will also refresh the writeback iterator, + // so it also should be consistent. + return new CheckingSnapshotKvIterator(new SelfRefreshingKvIterator<>((params) -> + new TombstoneMergingKvIterator<>(new SnapshotKvIterator(params.getLeft(), params.getRight()), + new MappingKvIterator<>(delegateStore.getIterator(params.getLeft(), params.getRight()), d -> switch (d) { case TombstoneMergingKvIterator.Tombstone() -> d; case TombstoneMergingKvIterator.Data data -> data.value().version() <= _id ? data : new TombstoneMergingKvIterator.Tombstone<>(); default -> throw new IllegalStateException("Unexpected value: " + d); - })), _snapshotVersion::get, SnapshotManager.this)); + })), _snapshotVersion::get, _lock.readLock(), start, key)); } public CloseableKvIterator getIterator(JObjectKey key) { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java index 3fd67c60..1926ca95 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java @@ -20,12 +20,15 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantReadWriteLock; @ApplicationScoped public class TxWritebackImpl implements TxWriteback { private final LinkedList _pendingBundles = new LinkedList<>(); + private final ReentrantReadWriteLock _pendingBundlesVersionLock = new ReentrantReadWriteLock(); private final ConcurrentSkipListMap _pendingWrites = new ConcurrentSkipListMap<>(); + private final AtomicLong _pendingWritesVersion = new AtomicLong(); private final LinkedHashMap _notFlushedBundles = new LinkedHashMap<>(); private final Object _flushWaitSynchronizer = new Object(); @@ -128,12 +131,14 @@ public class TxWritebackImpl implements TxWriteback { Log.trace("Bundle " + bundle.getId() + " committed"); + // Remove from pending writes, after real commit synchronized (_pendingBundles) { bundle._entries.values().forEach(e -> { var cur = _pendingWrites.get(e.key()); if (cur.bundleId() <= bundle.getId()) _pendingWrites.remove(e.key(), cur); }); + // No need to increment version } List> callbacks = new ArrayList<>(); @@ -219,22 +224,28 @@ public class TxWritebackImpl implements TxWriteback { @Override public void commitBundle(TxBundle bundle) { verifyReady(); - synchronized (_pendingBundles) { - ((TxBundleImpl) bundle).setReady(); - ((TxBundleImpl) bundle)._entries.values().forEach(e -> { - switch (e) { - case TxBundleImpl.CommittedEntry c -> - _pendingWrites.put(c.key(), new PendingWrite(c.data, bundle.getId())); - case TxBundleImpl.DeletedEntry d -> - _pendingWrites.put(d.key(), new PendingDelete(d.key, bundle.getId())); - default -> throw new IllegalStateException("Unexpected value: " + e); + _pendingBundlesVersionLock.writeLock().lock(); + try { + synchronized (_pendingBundles) { + ((TxBundleImpl) bundle).setReady(); + ((TxBundleImpl) bundle)._entries.values().forEach(e -> { + switch (e) { + case TxBundleImpl.CommittedEntry c -> + _pendingWrites.put(c.key(), new PendingWrite(c.data, bundle.getId())); + case TxBundleImpl.DeletedEntry d -> + _pendingWrites.put(d.key(), new PendingDelete(d.key, bundle.getId())); + default -> throw new IllegalStateException("Unexpected value: " + e); + } + }); + _pendingWritesVersion.incrementAndGet(); + if (_pendingBundles.peek() == bundle) + _pendingBundles.notify(); + synchronized (_flushWaitSynchronizer) { + currentSize += ((TxBundleImpl) bundle).calculateTotalSize(); } - }); - if (_pendingBundles.peek() == bundle) - _pendingBundles.notify(); - synchronized (_flushWaitSynchronizer) { - currentSize += ((TxBundleImpl) bundle).calculateTotalSize(); } + } finally { + _pendingBundlesVersionLock.writeLock().unlock(); } } @@ -378,14 +389,20 @@ public class TxWritebackImpl implements TxWriteback { // Returns an iterator with a view of all commited objects // Does not have to guarantee consistent view, snapshots are handled by upper layers + // Invalidated by commitBundle, but might return data after it has been really committed @Override public CloseableKvIterator> getIterator(IteratorStart start, JObjectKey key) { - return new MappingKvIterator<>( - new NavigableMapKvIterator<>(_pendingWrites, start, key), - e -> switch (e) { - case PendingWrite p -> new TombstoneMergingKvIterator.Data<>(p.data()); - case PendingDelete d -> new TombstoneMergingKvIterator.Tombstone<>(); - default -> throw new IllegalStateException("Unexpected value: " + e); - }); + _pendingBundlesVersionLock.readLock().lock(); + try { + return new InvalidatableKvIterator<>(new MappingKvIterator<>( + new NavigableMapKvIterator<>(_pendingWrites, start, key), + e -> switch (e) { + case PendingWrite p -> new TombstoneMergingKvIterator.Data<>(p.data()); + case PendingDelete d -> new TombstoneMergingKvIterator.Tombstone<>(); + default -> throw new IllegalStateException("Unexpected value: " + e); + }), _pendingWritesVersion::get, _pendingBundlesVersionLock.readLock()); + } finally { + _pendingBundlesVersionLock.readLock().unlock(); + } } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java index a373750b..4adc4489 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java @@ -11,6 +11,8 @@ import javax.annotation.Nonnull; import java.util.Collection; import java.util.HashSet; import java.util.Optional; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Consumer; @ApplicationScoped @@ -19,6 +21,8 @@ public class WritebackObjectPersistentStore { CachingObjectPersistentStore delegate; @Inject TxWriteback txWriteback; + private final AtomicLong _commitCounter = new AtomicLong(0); + private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock(); @Nonnull public Collection findAllObjects() { @@ -88,6 +92,7 @@ public class WritebackObjectPersistentStore { Log.tracef("Committing transaction %d to storage", id); txWriteback.commitBundle(bundle); + _commitCounter.incrementAndGet(); long bundleId = bundle.getId(); @@ -96,9 +101,16 @@ public class WritebackObjectPersistentStore { // Returns an iterator with a view of all commited objects // Does not have to guarantee consistent view, snapshots are handled by upper layers + // Should be refreshed after each commit public CloseableKvIterator> getIterator(IteratorStart start, JObjectKey key) { - return new MergingKvIterator<>(txWriteback.getIterator(start, key), - new MappingKvIterator<>(delegate.getIterator(start, key), TombstoneMergingKvIterator.Data::new)); + _lock.readLock().lock(); + try { + return new InvalidatableKvIterator<>(new MergingKvIterator<>(txWriteback.getIterator(start, key), + new MappingKvIterator<>(delegate.getIterator(start, key), TombstoneMergingKvIterator.Data::new)), + _commitCounter::get, _lock.readLock()); + } finally { + _lock.readLock().unlock(); + } } public CloseableKvIterator> getIterator(JObjectKey key) { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java index 34f900f6..807887c2 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java @@ -113,6 +113,7 @@ public class CachingObjectPersistentStore { } } delegate.commitTx(names); + // Now, reading from the backing store should return the new data synchronized (_cache) { for (var key : Stream.concat(names.written().stream().map(Pair::getLeft), names.deleted().stream()).toList()) { From 9b2dbe01f1c049839356e05364addcb8083e1527 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 23 Feb 2025 12:38:37 +0100 Subject: [PATCH 089/105] safer cache iterator --- .../InconsistentSelfRefreshingKvIterator.java | 125 ++++++++++++++++++ .../dhfs/objects/StaleIteratorException.java | 11 ++ .../CachingObjectPersistentStore.java | 60 +++++---- 3 files changed, 173 insertions(+), 23 deletions(-) create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentSelfRefreshingKvIterator.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/StaleIteratorException.java diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentSelfRefreshingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentSelfRefreshingKvIterator.java new file mode 100644 index 00000000..858f2754 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentSelfRefreshingKvIterator.java @@ -0,0 +1,125 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import io.quarkus.logging.Log; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.concurrent.locks.Lock; +import java.util.function.Function; +import java.util.function.Supplier; + +// Also checks that the next provided item is always consistent after a refresh +public class InconsistentSelfRefreshingKvIterator, V> implements CloseableKvIterator { + private CloseableKvIterator _backing; + private long _curVersion = -1L; + private final Lock _lock; + private final Function, CloseableKvIterator> _iteratorSupplier; + private final Supplier _versionSupplier; + private K _lastReturnedKey = null; + private K _peekedKey = null; + private boolean _peekedNext = false; + private final Pair _initialStart; + + public InconsistentSelfRefreshingKvIterator(Function, CloseableKvIterator> iteratorSupplier, Supplier versionSupplier, Lock lock, + IteratorStart start, K key) { + _iteratorSupplier = iteratorSupplier; + _versionSupplier = versionSupplier; + _lock = lock; + _initialStart = Pair.of(start, key); + + _lock.lock(); + try { + long curVersion = _versionSupplier.get(); + _backing = _iteratorSupplier.apply(Pair.of(start, key)); + _curVersion = curVersion; + } finally { + _lock.unlock(); + } + } + + private void maybeRefresh() { + _lock.lock(); + CloseableKvIterator oldBacking = null; + try { + if (_versionSupplier.get() == _curVersion) { + return; + } + long newVersion = _versionSupplier.get(); + Log.tracev("Refreshing iterator last refreshed {0}, current version {1}", _curVersion, newVersion); + oldBacking = _backing; + if (_peekedKey != null) { + _backing = _iteratorSupplier.apply(Pair.of(IteratorStart.GE, _peekedKey)); + if (!_backing.hasNext() || !_backing.peekNextKey().equals(_peekedKey)) { + throw new StaleIteratorException(); + } + } else if (_lastReturnedKey != null) { + _backing = _iteratorSupplier.apply(Pair.of(IteratorStart.GT, _lastReturnedKey)); + } else { + _backing = _iteratorSupplier.apply(_initialStart); + } + + if (_peekedNext && !_backing.hasNext()) { + throw new StaleIteratorException(); + } + + _curVersion = newVersion; + } finally { + _lock.unlock(); + if (oldBacking != null) { + oldBacking.close(); + } + } + } + + @Override + public K peekNextKey() { + if (_peekedKey != null) { + return _peekedKey; + } + _lock.lock(); + try { + maybeRefresh(); + _peekedKey = _backing.peekNextKey(); + _peekedNext = true; + return _peekedKey; + } finally { + _lock.unlock(); + } + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public boolean hasNext() { + if (_peekedNext) { + return true; + } + _lock.lock(); + try { + maybeRefresh(); + _peekedNext = _backing.hasNext(); + return _peekedNext; + } finally { + _lock.unlock(); + } + } + + @Override + public Pair next() { + _lock.lock(); + try { + maybeRefresh(); + var got = _backing.next(); + _peekedNext = false; + _peekedKey = null; + _lastReturnedKey = got.getKey(); + return got; + } finally { + _lock.unlock(); + } + } + +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/StaleIteratorException.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/StaleIteratorException.java new file mode 100644 index 00000000..249f1c2f --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/StaleIteratorException.java @@ -0,0 +1,11 @@ +package com.usatiuk.dhfs.objects; + +public class StaleIteratorException extends RuntimeException { + public StaleIteratorException() { + super(); + } + + public StaleIteratorException(String message) { + super(message); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java index 807887c2..77b89fa7 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java @@ -17,12 +17,18 @@ import java.util.Optional; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Stream; @ApplicationScoped public class CachingObjectPersistentStore { private final LinkedHashMap _cache = new LinkedHashMap<>(8, 0.75f, true); private final ConcurrentSkipListMap _sortedCache = new ConcurrentSkipListMap<>(); + + private final AtomicLong _cacheVersion = new AtomicLong(0); + private final ReentrantReadWriteLock _cacheVersionLock = new ReentrantReadWriteLock(); + private final HashSet _pendingWrites = new HashSet<>(); private final DataLocker _locker = new DataLocker(); @Inject @@ -99,27 +105,33 @@ public class CachingObjectPersistentStore { } public void commitTx(TxManifestObj names) { - // During commit, readObject shouldn't be called for these items, - // it should be handled by the upstream store - synchronized (_cache) { - for (var key : Stream.concat(names.written().stream().map(Pair::getLeft), - names.deleted().stream()).toList()) { - _curSize -= Optional.ofNullable(_cache.get(key)).map(CacheEntry::size).orElse(0L); - _cache.remove(key); - _sortedCache.remove(key); + _cacheVersionLock.writeLock().lock(); + try { + // During commit, readObject shouldn't be called for these items, + // it should be handled by the upstream store + synchronized (_cache) { + for (var key : Stream.concat(names.written().stream().map(Pair::getLeft), + names.deleted().stream()).toList()) { + _curSize -= Optional.ofNullable(_cache.get(key)).map(CacheEntry::size).orElse(0L); + _cache.remove(key); + _sortedCache.remove(key); // Log.tracev("Removing {0} from cache", key); - var added = _pendingWrites.add(key); - assert added; + var added = _pendingWrites.add(key); + assert added; + } } - } - delegate.commitTx(names); - // Now, reading from the backing store should return the new data - synchronized (_cache) { - for (var key : Stream.concat(names.written().stream().map(Pair::getLeft), - names.deleted().stream()).toList()) { - var removed = _pendingWrites.remove(key); - assert removed; + delegate.commitTx(names); + // Now, reading from the backing store should return the new data + synchronized (_cache) { + for (var key : Stream.concat(names.written().stream().map(Pair::getLeft), + names.deleted().stream()).toList()) { + var removed = _pendingWrites.remove(key); + assert removed; + } } + _cacheVersion.incrementAndGet(); + } finally { + _cacheVersionLock.writeLock().unlock(); } } @@ -159,11 +171,13 @@ public class CachingObjectPersistentStore { // Warning: it has a nasty side effect of global caching, so in this case don't even call next on it, // if some objects are still in writeback public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { - return new MergingKvIterator<>( - new PredicateKvIterator<>( - new NavigableMapKvIterator<>(_sortedCache, start, key), - e -> e.object().orElse(null) - ), new CachingKvIterator(delegate.getIterator(start, key))); + return new InconsistentSelfRefreshingKvIterator<>( + (bp) -> new MergingKvIterator<>( + new PredicateKvIterator<>( + new NavigableMapKvIterator<>(_sortedCache, bp.getLeft(), bp.getRight()), + e -> e.object().orElse(null) + ), new CachingKvIterator(delegate.getIterator(bp.getLeft(), bp.getRight()))), _cacheVersion::get, + _cacheVersionLock.readLock(), start, key); } public CloseableKvIterator getIterator(JObjectKey key) { From f1c1854e11f6ea89364d48e17e9da6851a66f98f Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 23 Feb 2025 12:47:16 +0100 Subject: [PATCH 090/105] serialize outside _cacheVersionLock --- .../persistence/CachingObjectPersistentStore.java | 3 ++- .../SerializingObjectPersistentStore.java | 14 +++++++++++--- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java index 77b89fa7..0a5085b0 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java @@ -105,6 +105,7 @@ public class CachingObjectPersistentStore { } public void commitTx(TxManifestObj names) { + var serialized = delegate.prepareManifest(names); _cacheVersionLock.writeLock().lock(); try { // During commit, readObject shouldn't be called for these items, @@ -120,7 +121,7 @@ public class CachingObjectPersistentStore { assert added; } } - delegate.commitTx(names); + delegate.commitTx(serialized); // Now, reading from the backing store should return the new data synchronized (_cache) { for (var key : Stream.concat(names.written().stream().map(Pair::getLeft), diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java index 6c339d03..4eef745b 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java @@ -70,11 +70,19 @@ public class SerializingObjectPersistentStore { return getIterator(IteratorStart.GE, key); } - void commitTx(TxManifestObj names) { - delegateStore.commitTx(new TxManifestRaw( + public TxManifestRaw prepareManifest(TxManifestObj names) { + return new TxManifestRaw( names.written().stream() .map(e -> Pair.of(e.getKey(), serializer.serialize(e.getValue()))) .toList() - , names.deleted())); + , names.deleted()); + } + + void commitTx(TxManifestObj names) { + delegateStore.commitTx(prepareManifest(names)); + } + + void commitTx(TxManifestRaw names) { + delegateStore.commitTx(names); } } From 0597dce86ff1f3f076f52d61cb7781bc7f4e5b35 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 23 Feb 2025 13:14:32 +0100 Subject: [PATCH 091/105] use bytebuffers --- .../com/usatiuk/dhfs/objects/JObjectKey.java | 16 +++++++ .../LmdbObjectPersistentStore.java | 42 +++++++++++-------- 2 files changed, 40 insertions(+), 18 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java index 26e5b347..b702069b 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectKey.java @@ -1,6 +1,9 @@ package com.usatiuk.dhfs.objects; +import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer; + import java.io.Serializable; +import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; public record JObjectKey(String name) implements Serializable, Comparable { @@ -22,7 +25,20 @@ public record JObjectKey(String name) implements Serializable, Comparable _env; - private Dbi _db; + private Env _env; + private Dbi _db; private boolean _ready = false; private static final String DB_NAME = "objects"; @@ -47,7 +48,7 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { Log.info("Initializing with root " + _root); _root.toFile().mkdirs(); } - _env = create(ByteArrayProxy.PROXY_BA) + _env = create() .setMapSize(1_000_000_000_000L) .setMaxDbs(1) .open(_root.toFile(), EnvFlags.MDB_NOTLS); @@ -80,20 +81,20 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { return List.of(); } + @Nonnull @Override public Optional readObject(JObjectKey name) { verifyReady(); - try (Txn txn = _env.txnRead()) { - var key = name.toString().getBytes(StandardCharsets.UTF_8); - var value = _db.get(txn, key); + try (Txn txn = _env.txnRead()) { + var value = _db.get(txn, name.toByteBuffer()); return Optional.ofNullable(value).map(ByteString::copyFrom); } } private class LmdbKvIterator implements CloseableKvIterator { - private final Txn _txn = _env.txnRead(); - private final Cursor _cursor = _db.openCursor(_txn); + private final Txn _txn = _env.txnRead(); + private final Cursor _cursor = _db.openCursor(_txn); private boolean _hasNext = false; private static final Cleaner CLEANER = Cleaner.create(); @@ -108,11 +109,12 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { }); verifyReady(); - if (!_cursor.get(key.toString().getBytes(StandardCharsets.UTF_8), GetOp.MDB_SET_RANGE)) { + if (!_cursor.get(key.toByteBuffer(), GetOp.MDB_SET_RANGE)) { return; } - var got = JObjectKey.fromBytes(_cursor.key()); + var got = JObjectKey.fromByteBuffer(_cursor.key()); + _cursor.key().flip(); var cmp = got.compareTo(key); assert cmp >= 0; @@ -163,7 +165,7 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { if (!_hasNext) { throw new NoSuchElementException("No more elements"); } - var ret = Pair.of(JObjectKey.fromBytes(_cursor.key()), ByteString.copyFrom(_cursor.val())); + var ret = Pair.of(JObjectKey.fromByteBuffer(_cursor.key()), ByteString.copyFrom(_cursor.val())); _hasNext = _cursor.next(); Log.tracev("Read: {0}, hasNext: {1}", ret, _hasNext); return ret; @@ -174,7 +176,9 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { if (!_hasNext) { throw new NoSuchElementException("No more elements"); } - return JObjectKey.fromBytes(_cursor.key()); + var ret = JObjectKey.fromByteBuffer(_cursor.key()); + _cursor.key().flip(); + return ret; } } @@ -186,14 +190,16 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { @Override public void commitTx(TxManifestRaw names) { verifyReady(); - try (Txn txn = _env.txnWrite()) { + try (Txn txn = _env.txnWrite()) { for (var written : names.written()) { - var key = written.getKey().toString().getBytes(StandardCharsets.UTF_8); - _db.put(txn, key, written.getValue().toByteArray()); + // TODO: + var bb = UninitializedByteBuffer.allocateUninitialized(written.getValue().size()); + bb.put(written.getValue().asReadOnlyByteBuffer()); + bb.flip(); + _db.put(txn, written.getKey().toByteBuffer(), bb); } for (JObjectKey key : names.deleted()) { - var keyBytes = key.toString().getBytes(StandardCharsets.UTF_8); - _db.delete(txn, keyBytes); + _db.delete(txn, key.toByteBuffer()); } txn.commit(); } From 98df76d0bcbd3f3abe496e5f4b7cb72527ca1561 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 23 Feb 2025 14:15:58 +0100 Subject: [PATCH 092/105] writethrough caching --- .../CachingObjectPersistentStore.java | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java index 0a5085b0..1d8d400e 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java @@ -111,13 +111,17 @@ public class CachingObjectPersistentStore { // During commit, readObject shouldn't be called for these items, // it should be handled by the upstream store synchronized (_cache) { - for (var key : Stream.concat(names.written().stream().map(Pair::getLeft), - names.deleted().stream()).toList()) { - _curSize -= Optional.ofNullable(_cache.get(key)).map(CacheEntry::size).orElse(0L); - _cache.remove(key); - _sortedCache.remove(key); -// Log.tracev("Removing {0} from cache", key); - var added = _pendingWrites.add(key); + for (var write : names.written()) { + put(write.getLeft(), Optional.of(write.getRight())); + var added = _pendingWrites.add(write.getLeft()); + assert added; + } + for (var del : names.deleted()) { + // TODO: tombstone cache? + _curSize -= Optional.ofNullable(_cache.get(del)).map(CacheEntry::size).orElse(0L); + _cache.remove(del); + _sortedCache.remove(del); + var added = _pendingWrites.add(del); assert added; } } From e64f50dd3b5b1d38b838d859813d68f8d7099b9b Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 23 Feb 2025 14:16:52 +0100 Subject: [PATCH 093/105] don't read wrong value in snapshot's readObject --- .../main/java/com/usatiuk/dhfs/objects/SnapshotManager.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java index 16bd6146..c34c7737 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java @@ -362,10 +362,10 @@ public class SnapshotManager { public Optional readObject(JObjectKey name) { try (var it = getIterator(name)) { if (it.hasNext()) { - var read = it.next(); - if (read.getKey().equals(name)) { - return Optional.of(read.getValue()); + if (!it.peekNextKey().equals(name)) { + return Optional.empty(); } + return Optional.of(it.next().getValue()); } } return Optional.empty(); From 716fb2151616990611a688e40adc9123af513ddf Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 23 Feb 2025 14:18:35 +0100 Subject: [PATCH 094/105] add-opens for lmdb --- dhfs-parent/pom.xml | 1 + dhfs-parent/server/docker-compose.yml | 1 + .../src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java | 1 + 3 files changed, 3 insertions(+) diff --git a/dhfs-parent/pom.xml b/dhfs-parent/pom.xml index 6a4d4717..18e6a84f 100644 --- a/dhfs-parent/pom.xml +++ b/dhfs-parent/pom.xml @@ -121,6 +121,7 @@ --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED + --add-opens=java.base/java.nio=ALL-UNNAMED ${skip.unit} true diff --git a/dhfs-parent/server/docker-compose.yml b/dhfs-parent/server/docker-compose.yml index a6a0aefa..c45708ba 100644 --- a/dhfs-parent/server/docker-compose.yml +++ b/dhfs-parent/server/docker-compose.yml @@ -31,6 +31,7 @@ services: - ./target/quarkus-app:/app command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED + --add-opens=java.base/java.nio=ALL-UNNAMED -Ddhfs.objects.persistence.files.root=/dhfs_root/p -Ddhfs.objects.root=/dhfs_root/d -Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0 diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java index 1d08d76e..9583493a 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java @@ -69,6 +69,7 @@ public class DhfsImage implements Future { .cmd("java", "-ea", "-Xmx128M", "--add-exports", "java.base/sun.nio.ch=ALL-UNNAMED", "--add-exports", "java.base/jdk.internal.access=ALL-UNNAMED", + "--add-opens=java.base/java.nio=ALL-UNNAMED", "-Ddhfs.objects.peerdiscovery.interval=1s", "-Ddhfs.objects.invalidation.delay=100", "-Ddhfs.objects.deletion.delay=0", From 05901f1acc8fafce91896f7923cda0a307c42f1f Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 23 Feb 2025 16:34:06 +0100 Subject: [PATCH 095/105] even more functional iterators --- dhfs-parent/objects/pom.xml | 4 + .../InconsistentKvIteratorWrapper.java | 114 +++++ .../InconsistentSelfRefreshingKvIterator.java | 8 +- .../dhfs/objects/MappingKvIterator.java | 6 + .../dhfs/objects/MergingKvIterator.java | 24 +- .../dhfs/objects/NavigableMapKvIterator.java | 7 + .../usatiuk/dhfs/objects/PendingDelete.java | 4 + .../usatiuk/dhfs/objects/PendingWrite.java | 4 + .../dhfs/objects/PendingWriteEntry.java | 5 + .../dhfs/objects/PredicateKvIterator.java | 7 + .../objects/SelfRefreshingKvIterator.java | 3 +- .../usatiuk/dhfs/objects/SnapshotManager.java | 25 +- .../objects/TombstoneMergingKvIterator.java | 20 +- .../com/usatiuk/dhfs/objects/TxWriteback.java | 40 -- .../usatiuk/dhfs/objects/TxWritebackImpl.java | 408 ---------------- .../WritebackObjectPersistentStore.java | 434 ++++++++++++++++-- .../CachingObjectPersistentStore.java | 25 +- .../java/com/usatiuk/dhfs/objects/Just.java | 24 + .../dhfs/objects/MergingKvIteratorTest.java | 27 +- .../com/usatiuk/dhfs/objects/ObjectsTest.java | 134 ++++++ .../objects/repository/peersync/PeerInfo.java | 7 +- 21 files changed, 807 insertions(+), 523 deletions(-) create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentKvIteratorWrapper.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PendingDelete.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PendingWrite.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PendingWriteEntry.java delete mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java delete mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java diff --git a/dhfs-parent/objects/pom.xml b/dhfs-parent/objects/pom.xml index d5dc8f9b..b11658fb 100644 --- a/dhfs-parent/objects/pom.xml +++ b/dhfs-parent/objects/pom.xml @@ -73,6 +73,10 @@ org.apache.commons commons-collections4 + + org.pcollections + pcollections + diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentKvIteratorWrapper.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentKvIteratorWrapper.java new file mode 100644 index 00000000..de5b6766 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentKvIteratorWrapper.java @@ -0,0 +1,114 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import io.quarkus.logging.Log; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.NoSuchElementException; +import java.util.function.Function; + +// Also checks that the next provided item is always consistent after a refresh +public class InconsistentKvIteratorWrapper, V> implements CloseableKvIterator { + private CloseableKvIterator _backing; + private final Function, CloseableKvIterator> _iteratorSupplier; + private K _lastReturnedKey = null; + private K _peekedKey = null; + private boolean _peekedNext = false; + private final Pair _initialStart; + + public InconsistentKvIteratorWrapper(Function, CloseableKvIterator> iteratorSupplier, IteratorStart start, K key) { + _iteratorSupplier = iteratorSupplier; + _initialStart = Pair.of(start, key); + while (true) { + try { + _backing = _iteratorSupplier.apply(Pair.of(start, key)); + break; + } catch (StaleIteratorException ignored) { + continue; + } + } + } + + private void refresh() { + _backing.close(); + if (_peekedKey != null) { + _backing = _iteratorSupplier.apply(Pair.of(IteratorStart.GE, _peekedKey)); + if (!_backing.hasNext() || !_backing.peekNextKey().equals(_peekedKey)) { + assert false; + } + } else if (_lastReturnedKey != null) { + _backing = _iteratorSupplier.apply(Pair.of(IteratorStart.GT, _lastReturnedKey)); + } else { + _backing = _iteratorSupplier.apply(_initialStart); + } + + if (_peekedNext && !_backing.hasNext()) { + assert false; + } + } + + @Override + public K peekNextKey() { + while (true) { + if (_peekedKey != null) { + return _peekedKey; + } + try { + _peekedKey = _backing.peekNextKey(); + assert _lastReturnedKey == null || _peekedKey.compareTo(_lastReturnedKey) > 0; + } catch (NoSuchElementException ignored) { + assert !_peekedNext; + throw ignored; + } catch (StaleIteratorException ignored) { + refresh(); + continue; + } + _peekedNext = true; + Log.tracev("Peeked key: {0}", _peekedKey); + return _peekedKey; + } + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public boolean hasNext() { + while (true) { + if (_peekedNext) { + return true; + } + try { + _peekedNext = _backing.hasNext(); + Log.tracev("Peeked next: {0}", _peekedNext); + return _peekedNext; + } catch (StaleIteratorException ignored) { + refresh(); + continue; + } + } + } + + @Override + public Pair next() { + while (true) { + try { + var got = _backing.next(); + assert _lastReturnedKey == null || _peekedKey.compareTo(_lastReturnedKey) > 0; + _peekedNext = false; + _peekedKey = null; + _lastReturnedKey = got.getKey(); + return got; + } catch (NoSuchElementException ignored) { + assert !_peekedNext; + throw ignored; + } catch (StaleIteratorException ignored) { + refresh(); + continue; + } + } + } + +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentSelfRefreshingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentSelfRefreshingKvIterator.java index 858f2754..41088677 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentSelfRefreshingKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentSelfRefreshingKvIterator.java @@ -45,7 +45,6 @@ public class InconsistentSelfRefreshingKvIterator, V> im return; } long newVersion = _versionSupplier.get(); - Log.tracev("Refreshing iterator last refreshed {0}, current version {1}", _curVersion, newVersion); oldBacking = _backing; if (_peekedKey != null) { _backing = _iteratorSupplier.apply(Pair.of(IteratorStart.GE, _peekedKey)); @@ -62,6 +61,9 @@ public class InconsistentSelfRefreshingKvIterator, V> im throw new StaleIteratorException(); } + Log.tracev("Refreshed iterator last refreshed {0}, current version {1}", + _curVersion, newVersion); + _curVersion = newVersion; } finally { _lock.unlock(); @@ -80,7 +82,9 @@ public class InconsistentSelfRefreshingKvIterator, V> im try { maybeRefresh(); _peekedKey = _backing.peekNextKey(); + assert _lastReturnedKey == null || _peekedKey.compareTo(_lastReturnedKey) > 0; _peekedNext = true; + Log.tracev("Peeked key: {0}", _peekedKey); return _peekedKey; } finally { _lock.unlock(); @@ -101,6 +105,7 @@ public class InconsistentSelfRefreshingKvIterator, V> im try { maybeRefresh(); _peekedNext = _backing.hasNext(); + Log.tracev("Peeked next: {0}", _peekedNext); return _peekedNext; } finally { _lock.unlock(); @@ -113,6 +118,7 @@ public class InconsistentSelfRefreshingKvIterator, V> im try { maybeRefresh(); var got = _backing.next(); + assert _lastReturnedKey == null || got.getKey().compareTo(_lastReturnedKey) > 0; _peekedNext = false; _peekedKey = null; _lastReturnedKey = got.getKey(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MappingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MappingKvIterator.java index d980d359..f131e4f9 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MappingKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MappingKvIterator.java @@ -34,4 +34,10 @@ public class MappingKvIterator, V, V_T> implements Close return Pair.of(got.getKey(), _transformer.apply(got.getValue())); } + @Override + public String toString() { + return "MappingKvIterator{" + + "_backing=" + _backing + + '}'; + } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java index 8a679da8..f1c8b1fe 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java @@ -1,5 +1,6 @@ package com.usatiuk.dhfs.objects; +import io.quarkus.logging.Log; import org.apache.commons.lang3.tuple.Pair; import java.util.*; @@ -7,8 +8,10 @@ import java.util.*; public class MergingKvIterator, V> implements CloseableKvIterator { private final Map, Integer> _iterators; private final SortedMap> _sortedIterators = new TreeMap<>(); + private final String _name; - public MergingKvIterator(List> iterators) { + public MergingKvIterator(String name, List> iterators) { + _name = name; int counter = 0; var iteratorsTmp = new HashMap, Integer>(); for (CloseableKvIterator iterator : iterators) { @@ -19,11 +22,13 @@ public class MergingKvIterator, V> implements CloseableK for (CloseableKvIterator iterator : iterators) { advanceIterator(iterator); } + + Log.tracev("{0} Created: {1}", _name, _sortedIterators); } @SafeVarargs - public MergingKvIterator(CloseableKvIterator... iterators) { - this(List.of(iterators)); + public MergingKvIterator(String name, CloseableKvIterator... iterators) { + this(name, List.of(iterators)); } private void advanceIterator(CloseableKvIterator iterator) { @@ -43,6 +48,9 @@ public class MergingKvIterator, V> implements CloseableK if (oursPrio < theirsPrio) { _sortedIterators.put(key, iterator); advanceIterator(them); + } else { + iterator.next(); + advanceIterator(iterator); } } @@ -73,6 +81,16 @@ public class MergingKvIterator, V> implements CloseableK } var curVal = cur.getValue().next(); advanceIterator(cur.getValue()); + Log.tracev("{0} Read: {1}, next: {2}", _name, curVal, _sortedIterators); return curVal; } + + @Override + public String toString() { + return "MergingKvIterator{" + + "_name='" + _name + '\'' + + ", _sortedIterators=" + _sortedIterators + + ", _iterators=" + _iterators + + '}'; + } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java index ac224347..1ced2e15 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java @@ -60,4 +60,11 @@ public class NavigableMapKvIterator, V> implements Close return Pair.of(ret); } + @Override + public String toString() { + return "NavigableMapKvIterator{" + + "_iterator=" + _iterator + + ", _next=" + _next + + '}'; + } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PendingDelete.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PendingDelete.java new file mode 100644 index 00000000..8ecc85b5 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PendingDelete.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.objects; + +public record PendingDelete(JObjectKey key, long bundleId) implements PendingWriteEntry { +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PendingWrite.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PendingWrite.java new file mode 100644 index 00000000..065224f6 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PendingWrite.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.objects; + +public record PendingWrite(JDataVersionedWrapper data, long bundleId) implements PendingWriteEntry { +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PendingWriteEntry.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PendingWriteEntry.java new file mode 100644 index 00000000..1476e167 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PendingWriteEntry.java @@ -0,0 +1,5 @@ +package com.usatiuk.dhfs.objects; + +public interface PendingWriteEntry { + long bundleId(); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java index 4f2651c4..a6836983 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java @@ -53,4 +53,11 @@ public class PredicateKvIterator, V, V_T> implements Clo return ret; } + @Override + public String toString() { + return "PredicateKvIterator{" + + "_backing=" + _backing + + ", _next=" + _next + + '}'; + } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java index 4b4f6df2..bf9691c3 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java @@ -43,7 +43,6 @@ public class SelfRefreshingKvIterator, V> implements Clo return; } long newVersion = _versionSupplier.get(); - Log.tracev("Refreshing iterator last refreshed {0}, current version {1}", _curVersion, newVersion); oldBacking = _backing; _backing = _iteratorSupplier.apply(Pair.of(IteratorStart.GE, _next.getKey())); var next = _backing.hasNext() ? _backing.next() : null; @@ -56,6 +55,8 @@ public class SelfRefreshingKvIterator, V> implements Clo " current version {1}, current value {2}, read value {3}", _curVersion, newVersion, _next, next); assert false; } + Log.tracev("Refreshed iterator last refreshed {0}, current version {1}, old value {2}, new value {3}", + _curVersion, newVersion, _next, next); _next = next; _curVersion = newVersion; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java index c34c7737..e1aa640a 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java @@ -20,7 +20,7 @@ import java.util.function.Consumer; @ApplicationScoped public class SnapshotManager { @Inject - WritebackObjectPersistentStore delegateStore; + WritebackObjectPersistentStore writebackStore; private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock(); @@ -64,7 +64,7 @@ public class SnapshotManager { if (!_snapshotIds.isEmpty()) { verify(); for (var action : writes) { - var current = delegateStore.readObjectVerbose(action.key()); + var current = writebackStore.readObjectVerbose(action.key()); // Add to snapshot the previous visible version of the replaced object // I.e. should be visible to all transactions with id <= id // and at least as its corresponding version @@ -75,13 +75,13 @@ public class SnapshotManager { Pair.of(new SnapshotKey(action.key(), Math.max(_snapshotIds.peek(), data.map(JDataVersionedWrapper::version).orElse(0L))), data.map(o -> new SnapshotEntryObject(o, id)).orElse(new SnapshotEntryDeleted(id))); case WritebackObjectPersistentStore.VerboseReadResultPending( - TxWriteback.PendingWriteEntry pending + PendingWriteEntry pending ) -> { assert pending.bundleId() < id; yield switch (pending) { - case TxWriteback.PendingWrite write -> + case PendingWrite write -> Pair.of(new SnapshotKey(action.key(), write.bundleId()), new SnapshotEntryObject(write.data(), id)); - case TxWriteback.PendingDelete delete -> + case PendingDelete delete -> Pair.of(new SnapshotKey(action.key(), delete.bundleId()), new SnapshotEntryDeleted(id)); default -> throw new IllegalStateException("Unexpected value: " + pending); }; @@ -114,7 +114,7 @@ public class SnapshotManager { // Commit under lock, iterators will see new version after the lock is released and writeback // cache is updated // TODO: Maybe writeback iterator being invalidated wouldn't be a problem? - return delegateStore.commitTx(writes, id); + return writebackStore.commitTx(writes, id); } finally { _lock.writeLock().unlock(); } @@ -345,13 +345,10 @@ public class SnapshotManager { // be served instead. Note that refreshing the iterator will also refresh the writeback iterator, // so it also should be consistent. return new CheckingSnapshotKvIterator(new SelfRefreshingKvIterator<>((params) -> - new TombstoneMergingKvIterator<>(new SnapshotKvIterator(params.getLeft(), params.getRight()), - new MappingKvIterator<>(delegateStore.getIterator(params.getLeft(), params.getRight()), d -> switch (d) { - case TombstoneMergingKvIterator.Tombstone() -> d; - case TombstoneMergingKvIterator.Data data -> - data.value().version() <= _id ? data : new TombstoneMergingKvIterator.Tombstone<>(); - default -> throw new IllegalStateException("Unexpected value: " + d); - })), _snapshotVersion::get, _lock.readLock(), start, key)); + new TombstoneMergingKvIterator<>("snapshot", new SnapshotKvIterator(params.getLeft(), params.getRight()), + new MappingKvIterator<>(writebackStore.getIterator(params.getLeft(), params.getRight()), d -> + d.version() <= _id ? new TombstoneMergingKvIterator.Data<>(d) : new TombstoneMergingKvIterator.Tombstone<>() + )), _snapshotVersion::get, _lock.readLock(), start, key)); } public CloseableKvIterator getIterator(JObjectKey key) { @@ -387,6 +384,6 @@ public class SnapshotManager { @Nonnull Optional readObjectDirect(JObjectKey name) { - return delegateStore.readObject(name); + return writebackStore.readObject(name); } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java index 90fbe455..4d84d357 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java @@ -1,16 +1,20 @@ package com.usatiuk.dhfs.objects; +import io.quarkus.logging.Log; import org.apache.commons.lang3.tuple.Pair; import java.util.List; public class TombstoneMergingKvIterator, V> implements CloseableKvIterator { private final CloseableKvIterator _backing; + private final String _name; - public TombstoneMergingKvIterator(List>> iterators) { + public TombstoneMergingKvIterator(String name, List>> iterators) { + _name = name; _backing = new PredicateKvIterator<>( - new MergingKvIterator<>(iterators), + new MergingKvIterator<>(name + "-merging", iterators), pair -> { + Log.tracev("{0} - Processing pair {1}", _name, pair); if (pair instanceof Tombstone) { return null; } @@ -19,8 +23,8 @@ public class TombstoneMergingKvIterator, V> implements C } @SafeVarargs - public TombstoneMergingKvIterator(CloseableKvIterator>... iterators) { - this(List.of(iterators)); + public TombstoneMergingKvIterator(String name, CloseableKvIterator>... iterators) { + this(name, List.of(iterators)); } public interface DataType { @@ -51,4 +55,12 @@ public class TombstoneMergingKvIterator, V> implements C public Pair next() { return _backing.next(); } + + @Override + public String toString() { + return "TombstoneMergingKvIterator{" + + "_backing=" + _backing + + ", _name='" + _name + '\'' + + '}'; + } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java deleted file mode 100644 index 2c50bb46..00000000 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWriteback.java +++ /dev/null @@ -1,40 +0,0 @@ -package com.usatiuk.dhfs.objects; - -import com.usatiuk.dhfs.objects.persistence.IteratorStart; - -import java.util.Collection; -import java.util.Optional; - -public interface TxWriteback { - TxBundle createBundle(); - - void commitBundle(TxBundle bundle); - - void dropBundle(TxBundle bundle); - - void fence(long bundleId); - - Optional getPendingWrite(JObjectKey key); - - Collection getPendingWrites(); - - // Executes callback after bundle with bundleId id has been persisted - // if it was already, runs callback on the caller thread - void asyncFence(long bundleId, Runnable callback); - - interface PendingWriteEntry { - long bundleId(); - } - - record PendingWrite(JDataVersionedWrapper data, long bundleId) implements PendingWriteEntry { - } - - record PendingDelete(JObjectKey key, long bundleId) implements PendingWriteEntry { - } - - CloseableKvIterator> getIterator(IteratorStart start, JObjectKey key); - - default CloseableKvIterator> getIterator(JObjectKey key) { - return getIterator(IteratorStart.GE, key); - } -} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java deleted file mode 100644 index 1926ca95..00000000 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxWritebackImpl.java +++ /dev/null @@ -1,408 +0,0 @@ -package com.usatiuk.dhfs.objects; - -import com.usatiuk.dhfs.objects.persistence.CachingObjectPersistentStore; -import com.usatiuk.dhfs.objects.persistence.IteratorStart; -import com.usatiuk.dhfs.objects.persistence.TxManifestObj; -import io.quarkus.logging.Log; -import io.quarkus.runtime.ShutdownEvent; -import io.quarkus.runtime.StartupEvent; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import jakarta.inject.Inject; -import org.apache.commons.lang3.concurrent.BasicThreadFactory; -import org.apache.commons.lang3.tuple.Pair; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.util.*; -import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -@ApplicationScoped -public class TxWritebackImpl implements TxWriteback { - private final LinkedList _pendingBundles = new LinkedList<>(); - private final ReentrantReadWriteLock _pendingBundlesVersionLock = new ReentrantReadWriteLock(); - - private final ConcurrentSkipListMap _pendingWrites = new ConcurrentSkipListMap<>(); - private final AtomicLong _pendingWritesVersion = new AtomicLong(); - private final LinkedHashMap _notFlushedBundles = new LinkedHashMap<>(); - - private final Object _flushWaitSynchronizer = new Object(); - private final AtomicLong _lastWrittenTx = new AtomicLong(-1); - private final AtomicLong _counter = new AtomicLong(); - private final AtomicLong _waitedTotal = new AtomicLong(0); - @Inject - CachingObjectPersistentStore objectPersistentStore; - @ConfigProperty(name = "dhfs.objects.writeback.limit") - long sizeLimit; - private long currentSize = 0; - private ExecutorService _writebackExecutor; - private ExecutorService _statusExecutor; - private volatile boolean _ready = false; - - void init(@Observes @Priority(110) StartupEvent event) { - { - BasicThreadFactory factory = new BasicThreadFactory.Builder() - .namingPattern("tx-writeback-%d") - .build(); - - _writebackExecutor = Executors.newSingleThreadExecutor(factory); - _writebackExecutor.submit(this::writeback); - } - - _statusExecutor = Executors.newSingleThreadExecutor(); - _statusExecutor.submit(() -> { - try { - while (true) { - Thread.sleep(1000); - if (currentSize > 0) - Log.info("Tx commit status: size=" + currentSize / 1024 / 1024 + "MB"); - } - } catch (InterruptedException ignored) { - } - }); - _ready = true; - } - - void shutdown(@Observes @Priority(890) ShutdownEvent event) throws InterruptedException { - Log.info("Waiting for all transactions to drain"); - - synchronized (_flushWaitSynchronizer) { - _ready = false; - while (currentSize > 0) { - _flushWaitSynchronizer.wait(); - } - } - - _writebackExecutor.shutdownNow(); - Log.info("Total tx bundle wait time: " + _waitedTotal.get() + "ms"); - } - - private void verifyReady() { - if (!_ready) throw new IllegalStateException("Not doing transactions while shutting down!"); - } - - private void writeback() { - while (!Thread.interrupted()) { - try { - TxBundleImpl bundle = new TxBundleImpl(0); - synchronized (_pendingBundles) { - while (_pendingBundles.isEmpty() || !_pendingBundles.peek()._ready) - _pendingBundles.wait(); - - long diff = 0; - while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { - var toCompress = _pendingBundles.poll(); - diff -= toCompress.calculateTotalSize(); - bundle.compress(toCompress); - } - diff += bundle.calculateTotalSize(); - synchronized (_flushWaitSynchronizer) { - currentSize += diff; - } - } - - var toWrite = new ArrayList>(); - var toDelete = new ArrayList(); - - for (var e : bundle._entries.values()) { - switch (e) { - case TxBundleImpl.CommittedEntry(JObjectKey key, JDataVersionedWrapper data, int size) -> { - Log.trace("Writing new " + key); - toWrite.add(Pair.of(key, data)); - } - case TxBundleImpl.DeletedEntry(JObjectKey key) -> { - Log.trace("Deleting from persistent storage " + key); - toDelete.add(key); - } - default -> throw new IllegalStateException("Unexpected value: " + e); - } - } - - objectPersistentStore.commitTx( - new TxManifestObj<>( - Collections.unmodifiableList(toWrite), - Collections.unmodifiableList(toDelete) - )); - - Log.trace("Bundle " + bundle.getId() + " committed"); - - // Remove from pending writes, after real commit - synchronized (_pendingBundles) { - bundle._entries.values().forEach(e -> { - var cur = _pendingWrites.get(e.key()); - if (cur.bundleId() <= bundle.getId()) - _pendingWrites.remove(e.key(), cur); - }); - // No need to increment version - } - - List> callbacks = new ArrayList<>(); - synchronized (_notFlushedBundles) { - _lastWrittenTx.set(bundle.getId()); - while (!_notFlushedBundles.isEmpty() && _notFlushedBundles.firstEntry().getKey() <= bundle.getId()) { - callbacks.add(_notFlushedBundles.pollFirstEntry().getValue().setCommitted()); - } - } - callbacks.forEach(l -> l.forEach(Runnable::run)); - - synchronized (_flushWaitSynchronizer) { - currentSize -= bundle.calculateTotalSize(); - // FIXME: - if (currentSize <= sizeLimit || !_ready) - _flushWaitSynchronizer.notifyAll(); - } - } catch (InterruptedException ignored) { - } catch (Exception e) { - Log.error("Uncaught exception in writeback", e); - } catch (Throwable o) { - Log.error("Uncaught THROWABLE in writeback", o); - } - } - Log.info("Writeback thread exiting"); - } - - @Override - public TxBundle createBundle() { - verifyReady(); - boolean wait = false; - while (true) { - if (wait) { - synchronized (_flushWaitSynchronizer) { - long started = System.currentTimeMillis(); - while (currentSize > sizeLimit) { - try { - _flushWaitSynchronizer.wait(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - long waited = System.currentTimeMillis() - started; - _waitedTotal.addAndGet(waited); - if (Log.isTraceEnabled()) - Log.trace("Thread " + Thread.currentThread().getName() + " waited for tx bundle for " + waited + " ms"); - wait = false; - } - } - synchronized (_pendingBundles) { - synchronized (_flushWaitSynchronizer) { - if (currentSize > sizeLimit) { - if (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { - var target = _pendingBundles.poll(); - - long diff = -target.calculateTotalSize(); - while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { - var toCompress = _pendingBundles.poll(); - diff -= toCompress.calculateTotalSize(); - target.compress(toCompress); - } - diff += target.calculateTotalSize(); - currentSize += diff; - _pendingBundles.addFirst(target); - } - } - - if (currentSize > sizeLimit) { - wait = true; - continue; - } - } - synchronized (_notFlushedBundles) { - var bundle = new TxBundleImpl(_counter.incrementAndGet()); - _pendingBundles.addLast(bundle); - _notFlushedBundles.put(bundle.getId(), bundle); - return bundle; - } - } - } - } - - @Override - public void commitBundle(TxBundle bundle) { - verifyReady(); - _pendingBundlesVersionLock.writeLock().lock(); - try { - synchronized (_pendingBundles) { - ((TxBundleImpl) bundle).setReady(); - ((TxBundleImpl) bundle)._entries.values().forEach(e -> { - switch (e) { - case TxBundleImpl.CommittedEntry c -> - _pendingWrites.put(c.key(), new PendingWrite(c.data, bundle.getId())); - case TxBundleImpl.DeletedEntry d -> - _pendingWrites.put(d.key(), new PendingDelete(d.key, bundle.getId())); - default -> throw new IllegalStateException("Unexpected value: " + e); - } - }); - _pendingWritesVersion.incrementAndGet(); - if (_pendingBundles.peek() == bundle) - _pendingBundles.notify(); - synchronized (_flushWaitSynchronizer) { - currentSize += ((TxBundleImpl) bundle).calculateTotalSize(); - } - } - } finally { - _pendingBundlesVersionLock.writeLock().unlock(); - } - } - - @Override - public void dropBundle(TxBundle bundle) { - verifyReady(); - synchronized (_pendingBundles) { - Log.warn("Dropped bundle: " + bundle); - _pendingBundles.remove((TxBundleImpl) bundle); - synchronized (_flushWaitSynchronizer) { - currentSize -= ((TxBundleImpl) bundle).calculateTotalSize(); - } - } - } - - @Override - public void fence(long bundleId) { - var latch = new CountDownLatch(1); - asyncFence(bundleId, latch::countDown); - try { - latch.await(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - - @Override - public Optional getPendingWrite(JObjectKey key) { - synchronized (_pendingBundles) { - return Optional.ofNullable(_pendingWrites.get(key)); - } - } - - @Override - public Collection getPendingWrites() { - synchronized (_pendingBundles) { - return Collections.unmodifiableCollection(_pendingWrites.values()); - } - } - - @Override - public void asyncFence(long bundleId, Runnable fn) { - verifyReady(); - if (bundleId < 0) throw new IllegalArgumentException("txId should be >0!"); - if (_lastWrittenTx.get() >= bundleId) { - fn.run(); - return; - } - synchronized (_notFlushedBundles) { - if (_lastWrittenTx.get() >= bundleId) { - fn.run(); - return; - } - _notFlushedBundles.get(bundleId).addCallback(fn); - } - } - - private class TxBundleImpl implements TxBundle { - private final LinkedHashMap _entries = new LinkedHashMap<>(); - private final ArrayList _callbacks = new ArrayList<>(); - private long _txId; - private volatile boolean _ready = false; - private long _size = -1; - private boolean _wasCommitted = false; - - private TxBundleImpl(long txId) { - _txId = txId; - } - - @Override - public long getId() { - return _txId; - } - - public void setReady() { - _ready = true; - } - - public void addCallback(Runnable callback) { - synchronized (_callbacks) { - if (_wasCommitted) throw new IllegalStateException(); - _callbacks.add(callback); - } - } - - public List setCommitted() { - synchronized (_callbacks) { - _wasCommitted = true; - return Collections.unmodifiableList(_callbacks); - } - } - - @Override - public void commit(JDataVersionedWrapper obj) { - synchronized (_entries) { - _entries.put(obj.data().key(), new CommittedEntry(obj.data().key(), obj, obj.data().estimateSize())); - } - } - - @Override - public void delete(JObjectKey obj) { - synchronized (_entries) { - _entries.put(obj, new DeletedEntry(obj)); - } - } - - public long calculateTotalSize() { - if (_size >= 0) return _size; - _size = _entries.values().stream().mapToInt(BundleEntry::size).sum(); - return _size; - } - - public void compress(TxBundleImpl other) { - if (_txId >= other._txId) - throw new IllegalArgumentException("Compressing an older bundle into newer"); - - _txId = other._txId; - _size = -1; - - _entries.putAll(other._entries); - } - - private interface BundleEntry { - JObjectKey key(); - - int size(); - } - - private record CommittedEntry(JObjectKey key, JDataVersionedWrapper data, int size) - implements BundleEntry { - } - - private record DeletedEntry(JObjectKey key) - implements BundleEntry { - @Override - public int size() { - return 64; - } - } - } - - // Returns an iterator with a view of all commited objects - // Does not have to guarantee consistent view, snapshots are handled by upper layers - // Invalidated by commitBundle, but might return data after it has been really committed - @Override - public CloseableKvIterator> getIterator(IteratorStart start, JObjectKey key) { - _pendingBundlesVersionLock.readLock().lock(); - try { - return new InvalidatableKvIterator<>(new MappingKvIterator<>( - new NavigableMapKvIterator<>(_pendingWrites, start, key), - e -> switch (e) { - case PendingWrite p -> new TombstoneMergingKvIterator.Data<>(p.data()); - case PendingDelete d -> new TombstoneMergingKvIterator.Tombstone<>(); - default -> throw new IllegalStateException("Unexpected value: " + e); - }), _pendingWritesVersion::get, _pendingBundlesVersionLock.readLock()); - } finally { - _pendingBundlesVersionLock.readLock().unlock(); - } - } -} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java index 4adc4489..10aeb598 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java @@ -2,49 +2,394 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.persistence.CachingObjectPersistentStore; import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import com.usatiuk.dhfs.objects.persistence.TxManifestObj; import com.usatiuk.dhfs.objects.transaction.TxRecord; import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; import jakarta.inject.Inject; +import org.apache.commons.lang3.concurrent.BasicThreadFactory; +import org.apache.commons.lang3.tuple.Pair; +import org.eclipse.microprofile.config.inject.ConfigProperty; +import org.pcollections.PSortedMap; +import org.pcollections.TreePMap; import javax.annotation.Nonnull; -import java.util.Collection; -import java.util.HashSet; -import java.util.Optional; +import java.util.*; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Consumer; @ApplicationScoped public class WritebackObjectPersistentStore { - @Inject - CachingObjectPersistentStore delegate; - @Inject - TxWriteback txWriteback; - private final AtomicLong _commitCounter = new AtomicLong(0); - private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock(); + private final LinkedList _pendingBundles = new LinkedList<>(); - @Nonnull - public Collection findAllObjects() { - var pending = txWriteback.getPendingWrites(); - var found = new HashSet<>(delegate.findAllObjects()); - for (var p : pending) { - switch (p) { - case TxWriteback.PendingWrite write -> found.add(write.data().data().key()); - case TxWriteback.PendingDelete deleted -> found.remove(deleted.key()); - default -> throw new IllegalStateException("Unexpected value: " + p); + private final AtomicReference> _pendingWrites = new AtomicReference<>(TreePMap.empty()); + private final ReentrantReadWriteLock _pendingWritesVersionLock = new ReentrantReadWriteLock(); + private final AtomicLong _pendingWritesVersion = new AtomicLong(); + private final LinkedHashMap _notFlushedBundles = new LinkedHashMap<>(); + + private final Object _flushWaitSynchronizer = new Object(); + private final AtomicLong _lastWrittenTx = new AtomicLong(-1); + private final AtomicLong _counter = new AtomicLong(); + private final AtomicLong _waitedTotal = new AtomicLong(0); + @Inject + CachingObjectPersistentStore cachedStore; + @ConfigProperty(name = "dhfs.objects.writeback.limit") + long sizeLimit; + private long currentSize = 0; + private ExecutorService _writebackExecutor; + private ExecutorService _statusExecutor; + private volatile boolean _ready = false; + + void init(@Observes @Priority(110) StartupEvent event) { + { + BasicThreadFactory factory = new BasicThreadFactory.Builder() + .namingPattern("tx-writeback-%d") + .build(); + + _writebackExecutor = Executors.newSingleThreadExecutor(factory); + _writebackExecutor.submit(this::writeback); + } + + _statusExecutor = Executors.newSingleThreadExecutor(); + _statusExecutor.submit(() -> { + try { + while (true) { + Thread.sleep(1000); + if (currentSize > 0) + Log.info("Tx commit status: size=" + currentSize / 1024 / 1024 + "MB"); + } + } catch (InterruptedException ignored) { + } + }); + _ready = true; + } + + void shutdown(@Observes @Priority(890) ShutdownEvent event) throws InterruptedException { + Log.info("Waiting for all transactions to drain"); + + synchronized (_flushWaitSynchronizer) { + _ready = false; + while (currentSize > 0) { + _flushWaitSynchronizer.wait(); } } - return found; + + _writebackExecutor.shutdownNow(); + Log.info("Total tx bundle wait time: " + _waitedTotal.get() + "ms"); + } + + private void verifyReady() { + if (!_ready) throw new IllegalStateException("Not doing transactions while shutting down!"); + } + + private void writeback() { + while (!Thread.interrupted()) { + try { + TxBundleImpl bundle = new TxBundleImpl(0); + synchronized (_pendingBundles) { + while (_pendingBundles.isEmpty() || !_pendingBundles.peek()._ready) + _pendingBundles.wait(); + + long diff = 0; + while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { + var toCompress = _pendingBundles.poll(); + diff -= toCompress.calculateTotalSize(); + bundle.compress(toCompress); + } + diff += bundle.calculateTotalSize(); + synchronized (_flushWaitSynchronizer) { + currentSize += diff; + } + } + + var toWrite = new ArrayList>(); + var toDelete = new ArrayList(); + + for (var e : bundle._entries.values()) { + switch (e) { + case TxBundleImpl.CommittedEntry(JObjectKey key, JDataVersionedWrapper data, int size) -> { + Log.trace("Writing new " + key); + toWrite.add(Pair.of(key, data)); + } + case TxBundleImpl.DeletedEntry(JObjectKey key) -> { + Log.trace("Deleting from persistent storage " + key); + toDelete.add(key); + } + default -> throw new IllegalStateException("Unexpected value: " + e); + } + } + + cachedStore.commitTx( + new TxManifestObj<>( + Collections.unmodifiableList(toWrite), + Collections.unmodifiableList(toDelete) + )); + + Log.trace("Bundle " + bundle.getId() + " committed"); + + // Remove from pending writes, after real commit + synchronized (_pendingBundles) { + var curPw = _pendingWrites.get(); + for (var e : bundle._entries.values()) { + var cur = curPw.get(e.key()); + if (cur.bundleId() <= bundle.getId()) + curPw = curPw.minus(e.key()); + } + _pendingWrites.set(curPw); + // No need to increment version + } + + List> callbacks = new ArrayList<>(); + synchronized (_notFlushedBundles) { + _lastWrittenTx.set(bundle.getId()); + while (!_notFlushedBundles.isEmpty() && _notFlushedBundles.firstEntry().getKey() <= bundle.getId()) { + callbacks.add(_notFlushedBundles.pollFirstEntry().getValue().setCommitted()); + } + } + callbacks.forEach(l -> l.forEach(Runnable::run)); + + synchronized (_flushWaitSynchronizer) { + currentSize -= bundle.calculateTotalSize(); + // FIXME: + if (currentSize <= sizeLimit || !_ready) + _flushWaitSynchronizer.notifyAll(); + } + } catch (InterruptedException ignored) { + } catch (Exception e) { + Log.error("Uncaught exception in writeback", e); + } catch (Throwable o) { + Log.error("Uncaught THROWABLE in writeback", o); + } + } + Log.info("Writeback thread exiting"); + } + + + public TxBundle createBundle() { + verifyReady(); + boolean wait = false; + while (true) { + if (wait) { + synchronized (_flushWaitSynchronizer) { + long started = System.currentTimeMillis(); + while (currentSize > sizeLimit) { + try { + _flushWaitSynchronizer.wait(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + long waited = System.currentTimeMillis() - started; + _waitedTotal.addAndGet(waited); + if (Log.isTraceEnabled()) + Log.trace("Thread " + Thread.currentThread().getName() + " waited for tx bundle for " + waited + " ms"); + wait = false; + } + } + synchronized (_pendingBundles) { + synchronized (_flushWaitSynchronizer) { + if (currentSize > sizeLimit) { + if (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { + var target = _pendingBundles.poll(); + + long diff = -target.calculateTotalSize(); + while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { + var toCompress = _pendingBundles.poll(); + diff -= toCompress.calculateTotalSize(); + target.compress(toCompress); + } + diff += target.calculateTotalSize(); + currentSize += diff; + _pendingBundles.addFirst(target); + } + } + + if (currentSize > sizeLimit) { + wait = true; + continue; + } + } + synchronized (_notFlushedBundles) { + var bundle = new TxBundleImpl(_counter.incrementAndGet()); + _pendingBundles.addLast(bundle); + _notFlushedBundles.put(bundle.getId(), bundle); + return bundle; + } + } + } + } + + public void commitBundle(TxBundle bundle) { + verifyReady(); + _pendingWritesVersionLock.writeLock().lock(); + try { + var curPw = _pendingWrites.get(); + for (var e : ((TxBundleImpl) bundle)._entries.values()) { + switch (e) { + case TxBundleImpl.CommittedEntry c -> { + curPw = curPw.plus(c.key(), new PendingWrite(c.data, bundle.getId())); + } + case TxBundleImpl.DeletedEntry d -> { + curPw = curPw.plus(d.key(), new PendingDelete(d.key, bundle.getId())); + } + default -> throw new IllegalStateException("Unexpected value: " + e); + } + } + _pendingWrites.set(curPw); + synchronized (_pendingBundles) { + ((TxBundleImpl) bundle).setReady(); + _pendingWritesVersion.incrementAndGet(); + if (_pendingBundles.peek() == bundle) + _pendingBundles.notify(); + synchronized (_flushWaitSynchronizer) { + currentSize += ((TxBundleImpl) bundle).calculateTotalSize(); + } + } + } finally { + _pendingWritesVersionLock.writeLock().unlock(); + } + } + + public void dropBundle(TxBundle bundle) { + verifyReady(); + synchronized (_pendingBundles) { + Log.warn("Dropped bundle: " + bundle); + _pendingBundles.remove((TxBundleImpl) bundle); + synchronized (_flushWaitSynchronizer) { + currentSize -= ((TxBundleImpl) bundle).calculateTotalSize(); + } + } + } + + public void fence(long bundleId) { + var latch = new CountDownLatch(1); + asyncFence(bundleId, latch::countDown); + try { + latch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + public void asyncFence(long bundleId, Runnable fn) { + verifyReady(); + if (bundleId < 0) throw new IllegalArgumentException("txId should be >0!"); + if (_lastWrittenTx.get() >= bundleId) { + fn.run(); + return; + } + synchronized (_notFlushedBundles) { + if (_lastWrittenTx.get() >= bundleId) { + fn.run(); + return; + } + _notFlushedBundles.get(bundleId).addCallback(fn); + } + } + + private static class TxBundleImpl implements TxBundle { + private final LinkedHashMap _entries = new LinkedHashMap<>(); + private final ArrayList _callbacks = new ArrayList<>(); + private long _txId; + private volatile boolean _ready = false; + private long _size = -1; + private boolean _wasCommitted = false; + + private TxBundleImpl(long txId) { + _txId = txId; + } + + public long getId() { + return _txId; + } + + public void setReady() { + _ready = true; + } + + public void addCallback(Runnable callback) { + synchronized (_callbacks) { + if (_wasCommitted) throw new IllegalStateException(); + _callbacks.add(callback); + } + } + + public List setCommitted() { + synchronized (_callbacks) { + _wasCommitted = true; + return Collections.unmodifiableList(_callbacks); + } + } + + public void commit(JDataVersionedWrapper obj) { + synchronized (_entries) { + _entries.put(obj.data().key(), new CommittedEntry(obj.data().key(), obj, obj.data().estimateSize())); + } + } + + public void delete(JObjectKey obj) { + synchronized (_entries) { + _entries.put(obj, new DeletedEntry(obj)); + } + } + + public long calculateTotalSize() { + if (_size >= 0) return _size; + _size = _entries.values().stream().mapToInt(BundleEntry::size).sum(); + return _size; + } + + public void compress(TxBundleImpl other) { + if (_txId >= other._txId) + throw new IllegalArgumentException("Compressing an older bundle into newer"); + + _txId = other._txId; + _size = -1; + + _entries.putAll(other._entries); + } + + private interface BundleEntry { + JObjectKey key(); + + int size(); + } + + private record CommittedEntry(JObjectKey key, JDataVersionedWrapper data, int size) + implements BundleEntry { + } + + private record DeletedEntry(JObjectKey key) + implements BundleEntry { + + public int size() { + return 64; + } + } + } + + public Optional getPendingWrite(JObjectKey key) { + synchronized (_pendingBundles) { + return Optional.ofNullable(_pendingWrites.get().get(key)); + } } @Nonnull Optional readObject(JObjectKey name) { - var pending = txWriteback.getPendingWrite(name).orElse(null); + var pending = getPendingWrite(name).orElse(null); return switch (pending) { - case TxWriteback.PendingWrite write -> Optional.of(write.data()); - case TxWriteback.PendingDelete ignored -> Optional.empty(); - case null -> delegate.readObject(name); + case PendingWrite write -> Optional.of(write.data()); + case PendingDelete ignored -> Optional.empty(); + case null -> cachedStore.readObject(name); default -> throw new IllegalStateException("Unexpected value: " + pending); }; } @@ -55,20 +400,20 @@ public class WritebackObjectPersistentStore { public record VerboseReadResultPersisted(Optional data) implements VerboseReadResult { } - public record VerboseReadResultPending(TxWriteback.PendingWriteEntry pending) implements VerboseReadResult { + public record VerboseReadResultPending(PendingWriteEntry pending) implements VerboseReadResult { } @Nonnull VerboseReadResult readObjectVerbose(JObjectKey key) { - var pending = txWriteback.getPendingWrite(key).orElse(null); + var pending = getPendingWrite(key).orElse(null); if (pending != null) { return new VerboseReadResultPending(pending); } - return new VerboseReadResultPersisted(delegate.readObject(key)); + return new VerboseReadResultPersisted(cachedStore.readObject(key)); } Consumer commitTx(Collection> writes, long id) { - var bundle = txWriteback.createBundle(); + var bundle = createBundle(); try { for (var action : writes) { switch (action) { @@ -86,34 +431,45 @@ public class WritebackObjectPersistentStore { } } } catch (Throwable t) { - txWriteback.dropBundle(bundle); + dropBundle(bundle); throw new TxCommitException(t.getMessage(), t); } Log.tracef("Committing transaction %d to storage", id); - txWriteback.commitBundle(bundle); - _commitCounter.incrementAndGet(); + commitBundle(bundle); long bundleId = bundle.getId(); - return r -> txWriteback.asyncFence(bundleId, r); + return r -> asyncFence(bundleId, r); } // Returns an iterator with a view of all commited objects // Does not have to guarantee consistent view, snapshots are handled by upper layers - // Should be refreshed after each commit - public CloseableKvIterator> getIterator(IteratorStart start, JObjectKey key) { - _lock.readLock().lock(); + // Invalidated by commitBundle, but might return data after it has been really committed + public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { + _pendingWritesVersionLock.readLock().lock(); try { - return new InvalidatableKvIterator<>(new MergingKvIterator<>(txWriteback.getIterator(start, key), - new MappingKvIterator<>(delegate.getIterator(start, key), TombstoneMergingKvIterator.Data::new)), - _commitCounter::get, _lock.readLock()); + CloseableKvIterator> oursIterator = new MappingKvIterator<>( + new NavigableMapKvIterator<>(_pendingWrites.get(), start, key), + e -> switch (e) { + case PendingWrite p -> new TombstoneMergingKvIterator.Data<>(p.data()); + case PendingDelete d -> new TombstoneMergingKvIterator.Tombstone<>(); + default -> throw new IllegalStateException("Unexpected value: " + e); + }); + + return new InvalidatableKvIterator<>( + new InconsistentKvIteratorWrapper<>( + (p) -> + new TombstoneMergingKvIterator<>("writeback-ps", + oursIterator, + new MappingKvIterator<>(cachedStore.getIterator(p.getLeft(), p.getRight()), TombstoneMergingKvIterator.Data::new)), start, key), + _pendingWritesVersion::get, _pendingWritesVersionLock.readLock()); } finally { - _lock.readLock().unlock(); + _pendingWritesVersionLock.readLock().unlock(); } } - public CloseableKvIterator> getIterator(JObjectKey key) { + public CloseableKvIterator getIterator(JObjectKey key) { return getIterator(IteratorStart.GE, key); } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java index 1d8d400e..c7fbeb6d 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java @@ -125,6 +125,7 @@ public class CachingObjectPersistentStore { assert added; } } + Log.tracev("Committing: {0} writes, {1} deletes", names.written().size(), names.deleted().size()); delegate.commitTx(serialized); // Now, reading from the backing store should return the new data synchronized (_cache) { @@ -135,6 +136,7 @@ public class CachingObjectPersistentStore { } } _cacheVersion.incrementAndGet(); + Log.tracev("Committed: {0} writes, {1} deletes", names.written().size(), names.deleted().size()); } finally { _cacheVersionLock.writeLock().unlock(); } @@ -166,6 +168,7 @@ public class CachingObjectPersistentStore { @Override public Pair next() { var next = _delegate.next(); + Log.tracev("Caching: {0}", next); put(next.getKey(), Optional.of(next.getValue())); return next; } @@ -176,13 +179,21 @@ public class CachingObjectPersistentStore { // Warning: it has a nasty side effect of global caching, so in this case don't even call next on it, // if some objects are still in writeback public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { - return new InconsistentSelfRefreshingKvIterator<>( - (bp) -> new MergingKvIterator<>( - new PredicateKvIterator<>( - new NavigableMapKvIterator<>(_sortedCache, bp.getLeft(), bp.getRight()), - e -> e.object().orElse(null) - ), new CachingKvIterator(delegate.getIterator(bp.getLeft(), bp.getRight()))), _cacheVersion::get, - _cacheVersionLock.readLock(), start, key); + _cacheVersionLock.readLock().lock(); + try { + return new InconsistentSelfRefreshingKvIterator<>( + (bp) -> new MergingKvIterator<>("cache", + new PredicateKvIterator<>( + new NavigableMapKvIterator<>(_sortedCache, bp.getLeft(), bp.getRight()), + e -> { + Log.tracev("Taken from cache: {0}", e); + return e.object().orElse(null); + } + ), new CachingKvIterator(delegate.getIterator(bp.getLeft(), bp.getRight()))), _cacheVersion::get, + _cacheVersionLock.readLock(), start, key); + } finally { + _cacheVersionLock.readLock().unlock(); + } } public CloseableKvIterator getIterator(JObjectKey key) { diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/Just.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/Just.java index ed5e9d44..2fa4e8fa 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/Just.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/Just.java @@ -1,6 +1,8 @@ package com.usatiuk.dhfs.objects; +import java.util.Arrays; import java.util.concurrent.Callable; +import java.util.concurrent.Executors; public abstract class Just { public static void run(Callable callable) { @@ -12,4 +14,26 @@ public abstract class Just { } }).start(); } + + public static void runAll(Callable... callables) { + try { + try (var exs = Executors.newFixedThreadPool(callables.length)) { + exs.invokeAll(Arrays.stream(callables).map(c -> (Callable) () -> { + try { + return c.call(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }).toList()).forEach(f -> { + try { + f.get(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } } diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java index 6166bb8c..2f7db033 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java @@ -74,7 +74,7 @@ public class MergingKvIteratorTest { public void testSimple() { var source1 = List.of(Pair.of(1, 2), Pair.of(3, 4), Pair.of(5, 6)).iterator(); var source2 = List.of(Pair.of(2, 3), Pair.of(4, 5), Pair.of(6, 7)).iterator(); - var mergingIterator = new MergingKvIterator<>(new SimpleIteratorWrapper<>(source1), new SimpleIteratorWrapper<>(source2)); + var mergingIterator = new MergingKvIterator<>("test", new SimpleIteratorWrapper<>(source1), new SimpleIteratorWrapper<>(source2)); var expected = List.of(Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4), Pair.of(4, 5), Pair.of(5, 6), Pair.of(6, 7)); for (var pair : expected) { Assertions.assertTrue(mergingIterator.hasNext()); @@ -86,7 +86,7 @@ public class MergingKvIteratorTest { public void testPriority() { var source1 = List.of(Pair.of(1, 2), Pair.of(2, 4), Pair.of(5, 6)); var source2 = List.of(Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 7)); - var mergingIterator = new MergingKvIterator<>(new SimpleIteratorWrapper<>(source1.iterator()), new SimpleIteratorWrapper<>(source2.iterator())); + var mergingIterator = new MergingKvIterator<>("test", new SimpleIteratorWrapper<>(source1.iterator()), new SimpleIteratorWrapper<>(source2.iterator())); var expected = List.of(Pair.of(1, 2), Pair.of(2, 4), Pair.of(5, 6)); for (var pair : expected) { Assertions.assertTrue(mergingIterator.hasNext()); @@ -94,7 +94,7 @@ public class MergingKvIteratorTest { } Assertions.assertFalse(mergingIterator.hasNext()); - var mergingIterator2 = new MergingKvIterator<>(new SimpleIteratorWrapper<>(source2.iterator()), new SimpleIteratorWrapper<>(source1.iterator())); + var mergingIterator2 = new MergingKvIterator<>("test", new SimpleIteratorWrapper<>(source2.iterator()), new SimpleIteratorWrapper<>(source1.iterator())); var expected2 = List.of(Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 7)); for (var pair : expected2) { Assertions.assertTrue(mergingIterator2.hasNext()); @@ -102,4 +102,25 @@ public class MergingKvIteratorTest { } Assertions.assertFalse(mergingIterator2.hasNext()); } + + @Test + public void testPriority2() { + var source1 = List.of(Pair.of(2, 4), Pair.of(5, 6)); + var source2 = List.of(Pair.of(1, 3), Pair.of(2, 5)); + var mergingIterator = new MergingKvIterator<>("test", new SimpleIteratorWrapper<>(source1.iterator()), new SimpleIteratorWrapper<>(source2.iterator())); + var expected = List.of(Pair.of(1, 3), Pair.of(2, 4), Pair.of(5, 6)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + Assertions.assertFalse(mergingIterator.hasNext()); + + var mergingIterator2 = new MergingKvIterator<>("test", new SimpleIteratorWrapper<>(source2.iterator()), new SimpleIteratorWrapper<>(source1.iterator())); + var expected2 = List.of(Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 6)); + for (var pair : expected2) { + Assertions.assertTrue(mergingIterator2.hasNext()); + Assertions.assertEquals(pair, mergingIterator2.next()); + } + Assertions.assertFalse(mergingIterator2.hasNext()); + } } diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index 7d9ecab1..5b8538a7 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -1,6 +1,7 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.data.Parent; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; import com.usatiuk.dhfs.objects.transaction.LockingStrategy; import com.usatiuk.dhfs.objects.transaction.Transaction; import io.quarkus.logging.Log; @@ -442,6 +443,139 @@ public class ObjectsTest { deleteAndCheck(new JObjectKey(key)); } + @RepeatedTest(100) + void simpleIterator1() throws Exception { + var key = "SimpleIterator1"; + var key1 = key + "_1"; + var key2 = key + "_2"; + var key3 = key + "_3"; + var key4 = key + "_4"; + txm.run(() -> { + curTx.put(new Parent(JObjectKey.of(key), "John")); + curTx.put(new Parent(JObjectKey.of(key1), "John1")); + curTx.put(new Parent(JObjectKey.of(key2), "John2")); + curTx.put(new Parent(JObjectKey.of(key3), "John3")); + curTx.put(new Parent(JObjectKey.of(key4), "John4")); + }); + txm.run(() -> { + var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key2, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key3, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + }); + } + + @RepeatedTest(100) + void simpleIterator2() throws Exception { + var key = "SimpleIterator2"; + var key1 = key + "_1"; + var key2 = key + "_2"; + var key3 = key + "_3"; + var key4 = key + "_4"; + txm.run(() -> { + curTx.put(new Parent(JObjectKey.of(key), "John")); + curTx.put(new Parent(JObjectKey.of(key1), "John1")); + curTx.put(new Parent(JObjectKey.of(key2), "John2")); + curTx.put(new Parent(JObjectKey.of(key3), "John3")); + curTx.put(new Parent(JObjectKey.of(key4), "John4")); + }); + txm.run(() -> { + var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key2, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key3, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + }); + txm.run(() -> { + curTx.delete(new JObjectKey(key)); + curTx.delete(new JObjectKey(key1)); + curTx.delete(new JObjectKey(key2)); + curTx.delete(new JObjectKey(key3)); + curTx.delete(new JObjectKey(key4)); + }); + txm.run(() -> { + var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); + Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key)); + }); + } + + @RepeatedTest(100) + void concurrentIterator1() { + var key = "ConcurrentIterator1"; + var key1 = key + "_1"; + var key2 = key + "_2"; + var key3 = key + "_3"; + var key4 = key + "_4"; + txm.run(() -> { + curTx.put(new Parent(JObjectKey.of(key), "John")); + curTx.put(new Parent(JObjectKey.of(key1), "John1")); + curTx.put(new Parent(JObjectKey.of(key4), "John4")); + }); + var barrier = new CyclicBarrier(2); + var barrier2 = new CyclicBarrier(2); + Just.runAll(() -> { + barrier.await(); + txm.run(() -> { + Log.info("Thread 1 starting tx"); + try { + barrier2.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + curTx.put(new Parent(JObjectKey.of(key2), "John2")); + curTx.put(new Parent(JObjectKey.of(key3), "John3")); + Log.info("Thread 1 committing"); + }); + Log.info("Thread 1 commited"); + return null; + }, () -> { + txm.run(() -> { + Log.info("Thread 2 starting tx"); + try { + barrier.await(); + barrier2.await(); + var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + Log.info("Thread 2 finished"); + return null; + }); + Log.info("All threads finished"); + txm.run(() -> { + var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key2, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key3, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + }); + txm.run(() -> { + curTx.delete(new JObjectKey(key)); + curTx.delete(new JObjectKey(key1)); + curTx.delete(new JObjectKey(key2)); + curTx.delete(new JObjectKey(key3)); + curTx.delete(new JObjectKey(key4)); + }); + } + // } // // @Test diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java index f8b7fcf2..0ec03042 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java @@ -1,5 +1,6 @@ package com.usatiuk.dhfs.objects.repository.peersync; +import com.google.protobuf.ByteString; import com.usatiuk.dhfs.objects.JDataRemote; import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.PeerId; @@ -7,12 +8,12 @@ import com.usatiuk.dhfs.objects.repository.CertificateTools; import java.security.cert.X509Certificate; -public record PeerInfo(JObjectKey key, PeerId id, byte[] cert) implements JDataRemote { +public record PeerInfo(JObjectKey key, PeerId id, ByteString cert) implements JDataRemote { public PeerInfo(PeerId id, byte[] cert) { - this(id.toJObjectKey(), id, cert); + this(id.toJObjectKey(), id, ByteString.copyFrom(cert)); } public X509Certificate parsedCert() { - return CertificateTools.certFromBytes(cert); + return CertificateTools.certFromBytes(cert.toByteArray()); } } From 74e5ee0925484b2241d02f3f380054ced0030147 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 23 Feb 2025 16:38:36 +0100 Subject: [PATCH 096/105] more iterator tests --- .../com/usatiuk/dhfs/objects/ObjectsTest.java | 154 ++++++++++++++++++ 1 file changed, 154 insertions(+) diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index 5b8538a7..3fb5daf9 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -574,6 +574,160 @@ public class ObjectsTest { curTx.delete(new JObjectKey(key3)); curTx.delete(new JObjectKey(key4)); }); + txm.run(() -> { + var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); + Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key)); + }); + } + + @RepeatedTest(100) + void concurrentIterator2() { + var key = "ConcurrentIterator2"; + var key1 = key + "_1"; + var key2 = key + "_2"; + var key3 = key + "_3"; + var key4 = key + "_4"; + txm.run(() -> { + curTx.put(new Parent(JObjectKey.of(key), "John")); + curTx.put(new Parent(JObjectKey.of(key1), "John1")); + curTx.put(new Parent(JObjectKey.of(key2), "John2")); + curTx.put(new Parent(JObjectKey.of(key4), "John4")); + }); + var barrier = new CyclicBarrier(2); + var barrier2 = new CyclicBarrier(2); + Just.runAll(() -> { + barrier.await(); + txm.run(() -> { + Log.info("Thread 1 starting tx"); + try { + barrier2.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + curTx.put(new Parent(JObjectKey.of(key2), "John5")); + curTx.put(new Parent(JObjectKey.of(key3), "John3")); + Log.info("Thread 1 committing"); + }); + Log.info("Thread 1 commited"); + return null; + }, () -> { + txm.run(() -> { + Log.info("Thread 2 starting tx"); + try { + barrier.await(); + barrier2.await(); + var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key2, got.getKey().name()); + Assertions.assertEquals("John2", ((Parent) got.getValue()).name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + Log.info("Thread 2 finished"); + return null; + }); + Log.info("All threads finished"); + txm.run(() -> { + var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key2, got.getKey().name()); + Assertions.assertEquals("John5", ((Parent) got.getValue()).name()); + got = iter.next(); + Assertions.assertEquals(key3, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + }); + txm.run(() -> { + curTx.delete(new JObjectKey(key)); + curTx.delete(new JObjectKey(key1)); + curTx.delete(new JObjectKey(key2)); + curTx.delete(new JObjectKey(key3)); + curTx.delete(new JObjectKey(key4)); + }); + txm.run(() -> { + var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); + Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key)); + }); + } + + @RepeatedTest(100) + void concurrentIterator3() { + var key = "ConcurrentIterator3"; + var key1 = key + "_1"; + var key2 = key + "_2"; + var key3 = key + "_3"; + var key4 = key + "_4"; + txm.run(() -> { + curTx.put(new Parent(JObjectKey.of(key), "John")); + curTx.put(new Parent(JObjectKey.of(key1), "John1")); + curTx.put(new Parent(JObjectKey.of(key2), "John2")); + curTx.put(new Parent(JObjectKey.of(key4), "John4")); + }); + var barrier = new CyclicBarrier(2); + var barrier2 = new CyclicBarrier(2); + Just.runAll(() -> { + barrier.await(); + txm.run(() -> { + Log.info("Thread 1 starting tx"); + try { + barrier2.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + curTx.put(new Parent(JObjectKey.of(key3), "John3")); + curTx.delete(new JObjectKey(key2)); + Log.info("Thread 1 committing"); + }); + Log.info("Thread 1 commited"); + return null; + }, () -> { + txm.run(() -> { + Log.info("Thread 2 starting tx"); + try { + barrier.await(); + barrier2.await(); + var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key2, got.getKey().name()); + Assertions.assertEquals("John2", ((Parent) got.getValue()).name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + Log.info("Thread 2 finished"); + return null; + }); + Log.info("All threads finished"); + txm.run(() -> { + var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key3, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + }); + txm.run(() -> { + curTx.delete(new JObjectKey(key)); + curTx.delete(new JObjectKey(key1)); + curTx.delete(new JObjectKey(key3)); + curTx.delete(new JObjectKey(key4)); + }); + txm.run(() -> { + var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); + Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key)); + }); } // } From 3720280cd7e97435f2ea4bdf102a2188c2746971 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 23 Feb 2025 17:58:01 +0100 Subject: [PATCH 097/105] skip iterator --- .../dhfs/objects/CloseableKvIterator.java | 2 ++ .../InconsistentKvIteratorWrapper.java | 20 +++++++++++++++++++ .../InconsistentSelfRefreshingKvIterator.java | 14 +++++++++++++ .../dhfs/objects/InvalidatableKvIterator.java | 11 ++++++++++ .../dhfs/objects/MappingKvIterator.java | 5 +++++ .../dhfs/objects/MergingKvIterator.java | 15 ++++++++++++-- .../dhfs/objects/NavigableMapKvIterator.java | 9 +++++++++ .../dhfs/objects/PredicateKvIterator.java | 8 ++++++++ .../objects/SelfRefreshingKvIterator.java | 8 ++++++++ .../usatiuk/dhfs/objects/SnapshotManager.java | 13 ++++++++++++ .../objects/TombstoneMergingKvIterator.java | 5 +++++ .../CachingObjectPersistentStore.java | 6 ++++++ .../LmdbObjectPersistentStore.java | 5 +++++ .../SerializingObjectPersistentStore.java | 5 +++++ .../ReadTrackingObjectSourceFactory.java | 5 +++++ .../dhfs/objects/MergingKvIteratorTest.java | 9 +++++++++ 16 files changed, 138 insertions(+), 2 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CloseableKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CloseableKvIterator.java index 82227750..bcc3474c 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CloseableKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CloseableKvIterator.java @@ -7,4 +7,6 @@ import java.util.Iterator; public interface CloseableKvIterator, V> extends Iterator>, AutoCloseableNoThrow { K peekNextKey(); + + void skip(); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentKvIteratorWrapper.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentKvIteratorWrapper.java index de5b6766..d3da5bfa 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentKvIteratorWrapper.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentKvIteratorWrapper.java @@ -30,6 +30,7 @@ public class InconsistentKvIteratorWrapper, V> implement } private void refresh() { + Log.tracev("Refreshing iterator: {0}", _backing); _backing.close(); if (_peekedKey != null) { _backing = _iteratorSupplier.apply(Pair.of(IteratorStart.GE, _peekedKey)); @@ -69,6 +70,25 @@ public class InconsistentKvIteratorWrapper, V> implement } } + @Override + public void skip() { + while (true) { + try { + _lastReturnedKey = _backing.peekNextKey(); + _backing.skip(); + _peekedNext = false; + _peekedKey = null; + return; + } catch (NoSuchElementException ignored) { + assert !_peekedNext; + throw ignored; + } catch (StaleIteratorException ignored) { + refresh(); + continue; + } + } + } + @Override public void close() { _backing.close(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentSelfRefreshingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentSelfRefreshingKvIterator.java index 41088677..c296f255 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentSelfRefreshingKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentSelfRefreshingKvIterator.java @@ -91,6 +91,20 @@ public class InconsistentSelfRefreshingKvIterator, V> im } } + @Override + public void skip() { + _lock.lock(); + try { + maybeRefresh(); + _lastReturnedKey = _backing.peekNextKey(); + _backing.skip(); + _peekedNext = false; + _peekedKey = null; + } finally { + _lock.unlock(); + } + } + @Override public void close() { _backing.close(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InvalidatableKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InvalidatableKvIterator.java index 712499c3..a83b36a4 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InvalidatableKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InvalidatableKvIterator.java @@ -37,6 +37,17 @@ public class InvalidatableKvIterator, V> implements Clos } } + @Override + public void skip() { + _lock.lock(); + try { + checkVersion(); + _backing.skip(); + } finally { + _lock.unlock(); + } + } + @Override public void close() { _backing.close(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MappingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MappingKvIterator.java index f131e4f9..6a374c34 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MappingKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MappingKvIterator.java @@ -18,6 +18,11 @@ public class MappingKvIterator, V, V_T> implements Close return _backing.peekNextKey(); } + @Override + public void skip() { + _backing.skip(); + } + @Override public void close() { _backing.close(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java index f1c8b1fe..72736b89 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java @@ -49,7 +49,7 @@ public class MergingKvIterator, V> implements CloseableK _sortedIterators.put(key, iterator); advanceIterator(them); } else { - iterator.next(); + iterator.skip(); advanceIterator(iterator); } } @@ -61,6 +61,17 @@ public class MergingKvIterator, V> implements CloseableK return _sortedIterators.firstKey(); } + @Override + public void skip() { + var cur = _sortedIterators.pollFirstEntry(); + if (cur == null) { + throw new NoSuchElementException(); + } + cur.getValue().skip(); + advanceIterator(cur.getValue()); + Log.tracev("{0} Skip: {1}, next: {2}", _name, cur, _sortedIterators); + } + @Override public void close() { for (CloseableKvIterator iterator : _iterators.keySet()) { @@ -81,7 +92,7 @@ public class MergingKvIterator, V> implements CloseableK } var curVal = cur.getValue().next(); advanceIterator(cur.getValue()); - Log.tracev("{0} Read: {1}, next: {2}", _name, curVal, _sortedIterators); + Log.tracev("{0} Read from {1}: {2}, next: {3}", _name, cur.getValue(), curVal, _sortedIterators); return curVal; } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java index 1ced2e15..f14c2997 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java @@ -40,6 +40,15 @@ public class NavigableMapKvIterator, V> implements Close return _next.getKey(); } + @Override + public void skip() { + if (_next == null) { + throw new NoSuchElementException(); + } + _next = null; + fillNext(); + } + @Override public void close() { } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java index a6836983..b0be5636 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java @@ -33,6 +33,14 @@ public class PredicateKvIterator, V, V_T> implements Clo return _next.getKey(); } + @Override + public void skip() { + if (_next == null) + throw new NoSuchElementException(); + _next = null; + fillNext(); + } + @Override public void close() { _backing.close(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java index bf9691c3..f1d851d9 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java @@ -91,6 +91,14 @@ public class SelfRefreshingKvIterator, V> implements Clo return _next.getKey(); } + @Override + public void skip() { + if (_next == null) { + throw new NoSuchElementException(); + } + prepareNext(); + } + @Override public void close() { _backing.close(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java index e1aa640a..791b1221 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java @@ -280,6 +280,14 @@ public class SnapshotManager { return _next.getKey(); } + @Override + public void skip() { + if (_next == null) + throw new NoSuchElementException(); + _next = null; + fillNext(); + } + @Override public void close() { _backing.close(); @@ -321,6 +329,11 @@ public class SnapshotManager { return _backing.peekNextKey(); } + @Override + public void skip() { + _backing.skip(); + } + @Override public void close() { _backing.close(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java index 4d84d357..bcbbd681 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java @@ -41,6 +41,11 @@ public class TombstoneMergingKvIterator, V> implements C return _backing.peekNextKey(); } + @Override + public void skip() { + _backing.skip(); + } + @Override public void close() { _backing.close(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java index c7fbeb6d..04a40286 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java @@ -155,6 +155,11 @@ public class CachingObjectPersistentStore { return _delegate.peekNextKey(); } + @Override + public void skip() { + _delegate.skip(); + } + @Override public void close() { _delegate.close(); @@ -179,6 +184,7 @@ public class CachingObjectPersistentStore { // Warning: it has a nasty side effect of global caching, so in this case don't even call next on it, // if some objects are still in writeback public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { + Log.tracev("Getting cache iterator: {0}, {1}", start, key); _cacheVersionLock.readLock().lock(); try { return new InconsistentSelfRefreshingKvIterator<>( diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java index aa7e7705..b8f7f270 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java @@ -171,6 +171,11 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { return ret; } + @Override + public void skip() { + _hasNext = _cursor.next(); + } + @Override public JObjectKey peekNextKey() { if (!_hasNext) { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java index 4eef745b..2ccddadf 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java @@ -43,6 +43,11 @@ public class SerializingObjectPersistentStore { return _delegate.peekNextKey(); } + @Override + public void skip() { + _delegate.skip(); + } + @Override public void close() { _delegate.close(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java index 65985dc6..a163bac6 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java @@ -83,6 +83,11 @@ public class ReadTrackingObjectSourceFactory { return _backing.peekNextKey(); } + @Override + public void skip() { + _backing.skip(); + } + @Override public void close() { _backing.close(); diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java index 2f7db033..45c73eea 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java @@ -33,6 +33,15 @@ public class MergingKvIteratorTest { return _next.getKey(); } + @Override + public void skip() { + if (_next == null) { + throw new NoSuchElementException(); + } + _next = null; + fillNext(); + } + @Override public void close() { } From d7a2627c932e0a2241ed8561f108a3b918fde524 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 23 Feb 2025 18:43:08 +0100 Subject: [PATCH 098/105] race fix in writeback --- .../WritebackObjectPersistentStore.java | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java index 10aeb598..4597544d 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java @@ -232,20 +232,20 @@ public class WritebackObjectPersistentStore { verifyReady(); _pendingWritesVersionLock.writeLock().lock(); try { - var curPw = _pendingWrites.get(); - for (var e : ((TxBundleImpl) bundle)._entries.values()) { - switch (e) { - case TxBundleImpl.CommittedEntry c -> { - curPw = curPw.plus(c.key(), new PendingWrite(c.data, bundle.getId())); - } - case TxBundleImpl.DeletedEntry d -> { - curPw = curPw.plus(d.key(), new PendingDelete(d.key, bundle.getId())); - } - default -> throw new IllegalStateException("Unexpected value: " + e); - } - } - _pendingWrites.set(curPw); synchronized (_pendingBundles) { + var curPw = _pendingWrites.get(); + for (var e : ((TxBundleImpl) bundle)._entries.values()) { + switch (e) { + case TxBundleImpl.CommittedEntry c -> { + curPw = curPw.plus(c.key(), new PendingWrite(c.data, bundle.getId())); + } + case TxBundleImpl.DeletedEntry d -> { + curPw = curPw.plus(d.key(), new PendingDelete(d.key, bundle.getId())); + } + default -> throw new IllegalStateException("Unexpected value: " + e); + } + } + _pendingWrites.set(curPw); ((TxBundleImpl) bundle).setReady(); _pendingWritesVersion.incrementAndGet(); if (_pendingBundles.peek() == bundle) From 02fd3e38e7e756e99b69df5d317c93aef36f3dde Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sun, 23 Feb 2025 21:16:22 +0100 Subject: [PATCH 099/105] additional allParallel test --- .../java/com/usatiuk/dhfs/objects/Just.java | 24 +++ .../com/usatiuk/dhfs/objects/ObjectsTest.java | 187 +++++++++++------- 2 files changed, 139 insertions(+), 72 deletions(-) diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/Just.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/Just.java index 2fa4e8fa..4c711c85 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/Just.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/Just.java @@ -36,4 +36,28 @@ public abstract class Just { throw new RuntimeException(e); } } + + public static void runAll(Runnable... callables) { + try { + try (var exs = Executors.newFixedThreadPool(callables.length)) { + exs.invokeAll(Arrays.stream(callables).map(c -> (Callable) () -> { + try { + c.run(); + return null; + } catch (Exception e) { + throw new RuntimeException(e); + } + }).toList()).forEach(f -> { + try { + f.get(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + } diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index 3fb5daf9..13a30c5d 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -131,7 +131,7 @@ public class ObjectsTest { @Test @Disabled - void createObjectConflict() throws InterruptedException { + void createObjectConflict() { AtomicBoolean thread1Failed = new AtomicBoolean(true); AtomicBoolean thread2Failed = new AtomicBoolean(true); @@ -179,7 +179,11 @@ public class ObjectsTest { } }); - latch.await(); + try { + latch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } var got = txm.run(() -> { return curTx.get(Parent.class, new JObjectKey("Parent2")).orElse(null); @@ -197,7 +201,7 @@ public class ObjectsTest { @ParameterizedTest @EnumSource(LockingStrategy.class) - void editConflict(LockingStrategy strategy) throws InterruptedException { + void editConflict(LockingStrategy strategy) { String key = "Parent4" + strategy.name(); txm.run(() -> { var newParent = new Parent(JObjectKey.of(key), "John3"); @@ -247,7 +251,11 @@ public class ObjectsTest { } }); - latchEnd.await(); + try { + latchEnd.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } var got = txm.run(() -> { return curTx.get(Parent.class, new JObjectKey(key)).orElse(null); @@ -383,7 +391,7 @@ public class ObjectsTest { } @RepeatedTest(100) - void snapshotTest3() throws InterruptedException { + void snapshotTest3() { var key = "SnapshotTest3"; var barrier0 = new CountDownLatch(1); var barrier1 = new CyclicBarrier(2); @@ -391,7 +399,11 @@ public class ObjectsTest { txm.run(() -> { curTx.put(new Parent(JObjectKey.of(key), "John")); }).onFlush(barrier0::countDown); - barrier0.await(); + try { + barrier0.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } try (ExecutorService ex = Executors.newFixedThreadPool(3)) { ex.invokeAll(List.of( () -> { @@ -444,7 +456,7 @@ public class ObjectsTest { } @RepeatedTest(100) - void simpleIterator1() throws Exception { + void simpleIterator1() { var key = "SimpleIterator1"; var key1 = key + "_1"; var key2 = key + "_2"; @@ -471,7 +483,7 @@ public class ObjectsTest { } @RepeatedTest(100) - void simpleIterator2() throws Exception { + void simpleIterator2() { var key = "SimpleIterator2"; var key1 = key + "_1"; var key2 = key + "_2"; @@ -485,15 +497,16 @@ public class ObjectsTest { curTx.put(new Parent(JObjectKey.of(key4), "John4")); }); txm.run(() -> { - var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); - var got = iter.next(); - Assertions.assertEquals(key1, got.getKey().name()); - got = iter.next(); - Assertions.assertEquals(key2, got.getKey().name()); - got = iter.next(); - Assertions.assertEquals(key3, got.getKey().name()); - got = iter.next(); - Assertions.assertEquals(key4, got.getKey().name()); + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key2, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key3, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + } }); txm.run(() -> { curTx.delete(new JObjectKey(key)); @@ -503,8 +516,9 @@ public class ObjectsTest { curTx.delete(new JObjectKey(key4)); }); txm.run(() -> { - var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); - Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key)); + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key)); + } }); } @@ -543,11 +557,12 @@ public class ObjectsTest { try { barrier.await(); barrier2.await(); - var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); - var got = iter.next(); - Assertions.assertEquals(key1, got.getKey().name()); - got = iter.next(); - Assertions.assertEquals(key4, got.getKey().name()); + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + } } catch (Exception e) { throw new RuntimeException(e); } @@ -557,15 +572,16 @@ public class ObjectsTest { }); Log.info("All threads finished"); txm.run(() -> { - var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); - var got = iter.next(); - Assertions.assertEquals(key1, got.getKey().name()); - got = iter.next(); - Assertions.assertEquals(key2, got.getKey().name()); - got = iter.next(); - Assertions.assertEquals(key3, got.getKey().name()); - got = iter.next(); - Assertions.assertEquals(key4, got.getKey().name()); + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key2, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key3, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + } }); txm.run(() -> { curTx.delete(new JObjectKey(key)); @@ -575,8 +591,9 @@ public class ObjectsTest { curTx.delete(new JObjectKey(key4)); }); txm.run(() -> { - var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); - Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key)); + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key)); + } }); } @@ -616,14 +633,15 @@ public class ObjectsTest { try { barrier.await(); barrier2.await(); - var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); - var got = iter.next(); - Assertions.assertEquals(key1, got.getKey().name()); - got = iter.next(); - Assertions.assertEquals(key2, got.getKey().name()); - Assertions.assertEquals("John2", ((Parent) got.getValue()).name()); - got = iter.next(); - Assertions.assertEquals(key4, got.getKey().name()); + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key2, got.getKey().name()); + Assertions.assertEquals("John2", ((Parent) got.getValue()).name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + } } catch (Exception e) { throw new RuntimeException(e); } @@ -633,16 +651,17 @@ public class ObjectsTest { }); Log.info("All threads finished"); txm.run(() -> { - var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); - var got = iter.next(); - Assertions.assertEquals(key1, got.getKey().name()); - got = iter.next(); - Assertions.assertEquals(key2, got.getKey().name()); - Assertions.assertEquals("John5", ((Parent) got.getValue()).name()); - got = iter.next(); - Assertions.assertEquals(key3, got.getKey().name()); - got = iter.next(); - Assertions.assertEquals(key4, got.getKey().name()); + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key2, got.getKey().name()); + Assertions.assertEquals("John5", ((Parent) got.getValue()).name()); + got = iter.next(); + Assertions.assertEquals(key3, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + } }); txm.run(() -> { curTx.delete(new JObjectKey(key)); @@ -652,8 +671,9 @@ public class ObjectsTest { curTx.delete(new JObjectKey(key4)); }); txm.run(() -> { - var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); - Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key)); + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key)); + } }); } @@ -693,14 +713,15 @@ public class ObjectsTest { try { barrier.await(); barrier2.await(); - var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); - var got = iter.next(); - Assertions.assertEquals(key1, got.getKey().name()); - got = iter.next(); - Assertions.assertEquals(key2, got.getKey().name()); - Assertions.assertEquals("John2", ((Parent) got.getValue()).name()); - got = iter.next(); - Assertions.assertEquals(key4, got.getKey().name()); + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key2, got.getKey().name()); + Assertions.assertEquals("John2", ((Parent) got.getValue()).name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + } } catch (Exception e) { throw new RuntimeException(e); } @@ -710,13 +731,14 @@ public class ObjectsTest { }); Log.info("All threads finished"); txm.run(() -> { - var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); - var got = iter.next(); - Assertions.assertEquals(key1, got.getKey().name()); - got = iter.next(); - Assertions.assertEquals(key3, got.getKey().name()); - got = iter.next(); - Assertions.assertEquals(key4, got.getKey().name()); + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + var got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key3, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + } }); txm.run(() -> { curTx.delete(new JObjectKey(key)); @@ -725,11 +747,32 @@ public class ObjectsTest { curTx.delete(new JObjectKey(key4)); }); txm.run(() -> { - var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key)); - Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key)); + try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { + Assertions.assertTrue(!iter.hasNext() || !iter.next().getKey().name().startsWith(key)); + } }); } + @RepeatedTest(100) + void allParallel() { + Just.runAll( + () -> createObject(), + () -> createGetObject(), + () -> createDeleteObject(), + () -> createCreateObject(), + () -> editConflict(LockingStrategy.WRITE), + () -> editConflict(LockingStrategy.OPTIMISTIC), + () -> snapshotTest1(), + () -> snapshotTest2(), + () -> snapshotTest3(), + () -> simpleIterator1(), + () -> simpleIterator2(), + () -> concurrentIterator1(), + () -> concurrentIterator2(), + () -> concurrentIterator3() + ); + } + // } // // @Test From 577e9dc1163160ab8aac8607bd1c44dc7c40c13b Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Tue, 25 Feb 2025 19:39:56 +0100 Subject: [PATCH 100/105] k-v based map draft, seems to work! --- .../dhfs/objects/CurrentTransaction.java | 2 +- .../com/usatiuk/dhfs/objects/IterProdFn.java | 8 + .../usatiuk/dhfs/objects/JObjectManager.java | 9 +- .../dhfs/objects/MergingKvIterator.java | 69 +++- .../dhfs/objects/NavigableMapKvIterator.java | 12 +- .../dhfs/objects/PredicateKvIterator.java | 15 +- .../objects/SelfRefreshingKvIterator.java | 4 +- .../usatiuk/dhfs/objects/SnapshotManager.java | 36 +- .../objects/TombstoneMergingKvIterator.java | 10 +- .../WritebackObjectPersistentStore.java | 26 +- .../CachingObjectPersistentStore.java | 9 +- .../LmdbObjectPersistentStore.java | 19 +- .../ReadTrackingObjectSourceFactory.java | 17 +- .../ReadTrackingTransactionObjectSource.java | 5 +- .../dhfs/objects/transaction/Transaction.java | 7 +- .../transaction/TransactionFactoryImpl.java | 22 +- .../dhfs/objects/MergingKvIteratorTest.java | 172 +++++++++- .../dhfs/objects/PredicateKvIteratorTest.java | 37 ++ .../com/usatiuk/dhfs/files/objects/File.java | 30 +- .../usatiuk/dhfs/files/objects/FsNode.java | 11 - .../files/service/DhfsFileServiceImpl.java | 315 +++++++++++------- .../dhfs/objects/RefcounterTxHook.java | 48 ++- .../dhfs/objects/RemoteObjectMeta.java | 2 +- .../usatiuk/dhfs/objects/jmap/JMapEntry.java | 12 + .../usatiuk/dhfs/objects/jmap/JMapHelper.java | 49 +++ .../usatiuk/dhfs/objects/jmap/JMapHolder.java | 6 + .../dhfs/objects/jmap/JMapIterator.java | 75 +++++ .../usatiuk/dhfs/objects/jmap/JMapKey.java | 4 + .../dhfs/objects/jmap/JMapLongKey.java | 24 ++ .../repository/PersistentRemoteHostsData.java | 8 + .../objects/repository/peersync/PeerInfo.java | 8 + .../files/DhfsFileServiceSimpleTestImpl.java | 26 +- 32 files changed, 859 insertions(+), 238 deletions(-) create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/IterProdFn.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PredicateKvIteratorTest.java delete mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapEntry.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapHelper.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapHolder.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapIterator.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapKey.java create mode 100644 dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapLongKey.java diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java index d3ef24ca..604a10f8 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CurrentTransaction.java @@ -44,7 +44,7 @@ public class CurrentTransaction implements Transaction { } @Override - public Iterator> getIterator(IteratorStart start, JObjectKey key) { + public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { return transactionManager.current().getIterator(start, key); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/IterProdFn.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/IterProdFn.java new file mode 100644 index 00000000..01798da9 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/IterProdFn.java @@ -0,0 +1,8 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.IteratorStart; + +@FunctionalInterface +public interface IterProdFn, V> { + CloseableKvIterator get(IteratorStart start, K key); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index af500e3a..fdd3f421 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -136,10 +136,6 @@ public class JObjectManager { }; } - Log.trace("Committing transaction start"); - // FIXME: Better way? - addDependency.accept(JDataDummy.TX_ID_OBJ_NAME); - writes.put(JDataDummy.TX_ID_OBJ_NAME, new TxRecord.TxObjectRecordWrite<>(JDataDummy.getInstance())); } finally { readSet = tx.reads(); @@ -153,6 +149,11 @@ public class JObjectManager { } } } + + Log.trace("Committing transaction start"); + // FIXME: Better way? + addDependency.accept(JDataDummy.TX_ID_OBJ_NAME); + writes.put(JDataDummy.TX_ID_OBJ_NAME, new TxRecord.TxObjectRecordWrite<>(JDataDummy.getInstance())); var snapshotId = tx.snapshot().id(); var newId = _txCounter.get() + 1; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java index 72736b89..04a293bf 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java @@ -1,5 +1,6 @@ package com.usatiuk.dhfs.objects; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; import io.quarkus.logging.Log; import org.apache.commons.lang3.tuple.Pair; @@ -7,30 +8,79 @@ import java.util.*; public class MergingKvIterator, V> implements CloseableKvIterator { private final Map, Integer> _iterators; - private final SortedMap> _sortedIterators = new TreeMap<>(); + private final NavigableMap> _sortedIterators = new TreeMap<>(); private final String _name; - public MergingKvIterator(String name, List> iterators) { + public MergingKvIterator(String name, IteratorStart startType, K startKey, List> iterators) { _name = name; + + IteratorStart initialStartType = startType; + K initialStartKey = startKey; + boolean fail = false; + if (startType == IteratorStart.LT || startType == IteratorStart.LE) { + // Starting at a greatest key less than/less or equal than: + // We have a bunch of iterators that have given us theirs "greatest LT/LE key" + // now we need to pick the greatest of those to start with + var initialIterators = iterators.stream().map(p -> p.get(initialStartType, initialStartKey)).toList(); + try { + K initialMaxValue = initialIterators.stream() + .filter(CloseableKvIterator::hasNext) + .map((i) -> { + var peeked = i.peekNextKey(); +// Log.warnv("peeked: {0}, from {1}", peeked, i.getClass()); + return peeked; + }) + .max(Comparator.naturalOrder()).orElse(null); + if (initialMaxValue == null) { + fail = true; + } + startKey = initialMaxValue; + startType = IteratorStart.GE; + } finally { + initialIterators.forEach(CloseableKvIterator::close); + } + } + + if (fail) { + _iterators = Map.of(); + return; + } + int counter = 0; var iteratorsTmp = new HashMap, Integer>(); - for (CloseableKvIterator iterator : iterators) { + for (var iteratorFn : iterators) { + var iterator = iteratorFn.get(startType, startKey); iteratorsTmp.put(iterator, counter++); } - _iterators = Collections.unmodifiableMap(iteratorsTmp); + _iterators = Map.copyOf(iteratorsTmp); - for (CloseableKvIterator iterator : iterators) { + for (CloseableKvIterator iterator : _iterators.keySet()) { advanceIterator(iterator); } Log.tracev("{0} Created: {1}", _name, _sortedIterators); + switch (initialStartType) { + case LT -> { + assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) < 0; + } + case LE -> { + assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) <= 0; + } + case GT -> { + assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) > 0; + } + case GE -> { + assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) >= 0; + } + } } @SafeVarargs - public MergingKvIterator(String name, CloseableKvIterator... iterators) { - this(name, List.of(iterators)); + public MergingKvIterator(String name, IteratorStart startType, K startKey, IterProdFn... iterators) { + this(name, startType, startKey, List.of(iterators)); } + private void advanceIterator(CloseableKvIterator iterator) { if (!iterator.hasNext()) { return; @@ -49,6 +99,7 @@ public class MergingKvIterator, V> implements CloseableK _sortedIterators.put(key, iterator); advanceIterator(them); } else { + Log.tracev("{0} Skipped: {1}", _name, iterator.peekNextKey()); iterator.skip(); advanceIterator(iterator); } @@ -92,7 +143,7 @@ public class MergingKvIterator, V> implements CloseableK } var curVal = cur.getValue().next(); advanceIterator(cur.getValue()); - Log.tracev("{0} Read from {1}: {2}, next: {3}", _name, cur.getValue(), curVal, _sortedIterators); +// Log.tracev("{0} Read from {1}: {2}, next: {3}", _name, cur.getValue(), curVal, _sortedIterators.keySet()); return curVal; } @@ -100,7 +151,7 @@ public class MergingKvIterator, V> implements CloseableK public String toString() { return "MergingKvIterator{" + "_name='" + _name + '\'' + - ", _sortedIterators=" + _sortedIterators + + ", _sortedIterators=" + _sortedIterators.keySet() + ", _iterators=" + _iterators + '}'; } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java index f14c2997..6b5a6ccf 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java @@ -15,11 +15,15 @@ public class NavigableMapKvIterator, V> implements Close case GE -> _view = map.tailMap(key, true); case GT -> _view = map.tailMap(key, false); case LE -> { - var tail = map.tailMap(key, true); - if (tail.firstKey().equals(key)) _view = tail; - else _view = map.tailMap(map.lowerKey(key), true); + var floorKey = map.floorKey(key); + if (floorKey == null) _view = Collections.emptyNavigableMap(); + else _view = map.tailMap(floorKey, true); + } + case LT -> { + var lowerKey = map.lowerKey(key); + if (lowerKey == null) _view = Collections.emptyNavigableMap(); + else _view = map.tailMap(lowerKey, true); } - case LT -> _view = map.tailMap(map.lowerKey(key), true); default -> throw new IllegalArgumentException("Unknown start type"); } _iterator = _view.entrySet().iterator(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java index b0be5636..24a72b89 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java @@ -1,5 +1,6 @@ package com.usatiuk.dhfs.objects; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; import org.apache.commons.lang3.tuple.Pair; import java.util.NoSuchElementException; @@ -10,10 +11,22 @@ public class PredicateKvIterator, V, V_T> implements Clo private final Function _transformer; private Pair _next; - public PredicateKvIterator(CloseableKvIterator backing, Function transformer) { + public PredicateKvIterator(CloseableKvIterator backing, IteratorStart start, K startKey, Function transformer) { _backing = backing; _transformer = transformer; fillNext(); + if (_next == null) { + return; + } + if (start == IteratorStart.LE) { + if (_next.getKey().compareTo(startKey) > 0) { + _next = null; + } + } else if (start == IteratorStart.LT) { + if (_next.getKey().compareTo(startKey) >= 0) { + _next = null; + } + } } private void fillNext() { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java index f1d851d9..1ce8dd05 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java @@ -42,13 +42,15 @@ public class SelfRefreshingKvIterator, V> implements Clo if (_versionSupplier.get() == _curVersion) { return; } + Log.tracev("Refreshing iterator last refreshed {0}, current version {1}, current value {2}", + _curVersion, _versionSupplier.get(), _next); long newVersion = _versionSupplier.get(); oldBacking = _backing; _backing = _iteratorSupplier.apply(Pair.of(IteratorStart.GE, _next.getKey())); var next = _backing.hasNext() ? _backing.next() : null; if (next == null) { Log.errorv("Failed to refresh iterator, null last refreshed {0}," + - " current version {1}, current value {2}", _curVersion, newVersion, next); + " current version {1}, current value {2}, read value {3}", _curVersion, newVersion, _next, next); assert false; } else if (!next.equals(_next)) { Log.errorv("Failed to refresh iterator, mismatch last refreshed {0}," + diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java index 791b1221..9ebf8949 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java @@ -240,9 +240,21 @@ public class SnapshotManager { private final CloseableKvIterator _backing; private Pair> _next; - public SnapshotKvIterator(IteratorStart start, JObjectKey key) { - _backing = new NavigableMapKvIterator<>(_objects, start, new SnapshotKey(key, 0L)); + public SnapshotKvIterator(IteratorStart start, JObjectKey startKey) { + _backing = new NavigableMapKvIterator<>(_objects, start, new SnapshotKey(startKey, 0L)); fillNext(); + if (_next == null) { + return; + } + if (start == IteratorStart.LE) { + if (_next.getKey().compareTo(startKey) > 0) { + _next = null; + } + } else if (start == IteratorStart.LT) { + if (_next.getKey().compareTo(startKey) >= 0) { + _next = null; + } + } } private void fillNext() { @@ -357,11 +369,21 @@ public class SnapshotManager { // so refresh them manually. Otherwise, it could be possible that something from the writeback cache will // be served instead. Note that refreshing the iterator will also refresh the writeback iterator, // so it also should be consistent. - return new CheckingSnapshotKvIterator(new SelfRefreshingKvIterator<>((params) -> - new TombstoneMergingKvIterator<>("snapshot", new SnapshotKvIterator(params.getLeft(), params.getRight()), - new MappingKvIterator<>(writebackStore.getIterator(params.getLeft(), params.getRight()), d -> - d.version() <= _id ? new TombstoneMergingKvIterator.Data<>(d) : new TombstoneMergingKvIterator.Tombstone<>() - )), _snapshotVersion::get, _lock.readLock(), start, key)); + Log.tracev("Getting snapshot {0} iterator for {1} {2}", _id, start, key); + _lock.readLock().lock(); + try { + return new CheckingSnapshotKvIterator(new SelfRefreshingKvIterator<>( + p -> + new TombstoneMergingKvIterator<>("snapshot", p.getKey(), p.getValue(), + SnapshotKvIterator::new, + (tS, tK) -> new MappingKvIterator<>( + writebackStore.getIterator(tS, tK), + d -> d.version() <= _id ? new TombstoneMergingKvIterator.Data<>(d) : new TombstoneMergingKvIterator.Tombstone<>()) + ) + , _snapshotVersion::get, _lock.readLock(), start, key)); + } finally { + _lock.readLock().unlock(); + } } public CloseableKvIterator getIterator(JObjectKey key) { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java index bcbbd681..d84bdd79 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java @@ -1,5 +1,6 @@ package com.usatiuk.dhfs.objects; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; import io.quarkus.logging.Log; import org.apache.commons.lang3.tuple.Pair; @@ -9,10 +10,11 @@ public class TombstoneMergingKvIterator, V> implements C private final CloseableKvIterator _backing; private final String _name; - public TombstoneMergingKvIterator(String name, List>> iterators) { + public TombstoneMergingKvIterator(String name, IteratorStart startType, K startKey, List>> iterators) { _name = name; _backing = new PredicateKvIterator<>( - new MergingKvIterator<>(name + "-merging", iterators), + new MergingKvIterator<>(name + "-merging", startType, startKey, iterators), + startType, startKey, pair -> { Log.tracev("{0} - Processing pair {1}", _name, pair); if (pair instanceof Tombstone) { @@ -23,8 +25,8 @@ public class TombstoneMergingKvIterator, V> implements C } @SafeVarargs - public TombstoneMergingKvIterator(String name, CloseableKvIterator>... iterators) { - this(name, List.of(iterators)); + public TombstoneMergingKvIterator(String name, IteratorStart startType, K startKey, IterProdFn>... iterators) { + this(name, startType, startKey, List.of(iterators)); } public interface DataType { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java index 4597544d..173689b0 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java @@ -447,22 +447,26 @@ public class WritebackObjectPersistentStore { // Does not have to guarantee consistent view, snapshots are handled by upper layers // Invalidated by commitBundle, but might return data after it has been really committed public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { + Log.tracev("Getting writeback iterator: {0}, {1}", start, key); _pendingWritesVersionLock.readLock().lock(); try { - CloseableKvIterator> oursIterator = new MappingKvIterator<>( - new NavigableMapKvIterator<>(_pendingWrites.get(), start, key), - e -> switch (e) { - case PendingWrite p -> new TombstoneMergingKvIterator.Data<>(p.data()); - case PendingDelete d -> new TombstoneMergingKvIterator.Tombstone<>(); - default -> throw new IllegalStateException("Unexpected value: " + e); - }); + var curPending = _pendingWrites.get(); return new InvalidatableKvIterator<>( new InconsistentKvIteratorWrapper<>( - (p) -> - new TombstoneMergingKvIterator<>("writeback-ps", - oursIterator, - new MappingKvIterator<>(cachedStore.getIterator(p.getLeft(), p.getRight()), TombstoneMergingKvIterator.Data::new)), start, key), + p -> + new TombstoneMergingKvIterator<>("writeback-ps", p.getLeft(), p.getRight(), + (tS, tK) -> new MappingKvIterator<>( + new NavigableMapKvIterator<>(curPending, tS, tK), + e -> switch (e) { + case PendingWrite pw -> + new TombstoneMergingKvIterator.Data<>(pw.data()); + case PendingDelete d -> + new TombstoneMergingKvIterator.Tombstone<>(); + default -> + throw new IllegalStateException("Unexpected value: " + e); + }), + (tS, tK) -> new MappingKvIterator<>(cachedStore.getIterator(tS, tK), TombstoneMergingKvIterator.Data::new)), start, key), _pendingWritesVersion::get, _pendingWritesVersionLock.readLock()); } finally { _pendingWritesVersionLock.readLock().unlock(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java index 04a40286..f39a9dd1 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java @@ -188,14 +188,15 @@ public class CachingObjectPersistentStore { _cacheVersionLock.readLock().lock(); try { return new InconsistentSelfRefreshingKvIterator<>( - (bp) -> new MergingKvIterator<>("cache", - new PredicateKvIterator<>( - new NavigableMapKvIterator<>(_sortedCache, bp.getLeft(), bp.getRight()), + p -> new MergingKvIterator<>("cache", p.getLeft(), p.getRight(), + (mS, mK) -> new PredicateKvIterator<>( + new NavigableMapKvIterator<>(_sortedCache, mS, mK), + mS, mK, e -> { Log.tracev("Taken from cache: {0}", e); return e.object().orElse(null); } - ), new CachingKvIterator(delegate.getIterator(bp.getLeft(), bp.getRight()))), _cacheVersion::get, + ), (mS, mK) -> new CachingKvIterator(delegate.getIterator(mS, mK))), _cacheVersion::get, _cacheVersionLock.readLock(), start, key); } finally { _cacheVersionLock.readLock().unlock(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java index b8f7f270..bb1254f8 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java @@ -142,7 +142,24 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { } } - Log.tracev("got: {0}, hasNext: {1}", got, _hasNext); + var realGot = JObjectKey.fromByteBuffer(_cursor.key()); + _cursor.key().flip(); + + switch (start) { + case LT -> { + assert !_hasNext || realGot.compareTo(key) < 0; + } + case LE -> { + assert !_hasNext || realGot.compareTo(key) <= 0; + } + case GT -> { + assert !_hasNext || realGot.compareTo(key) > 0; + } + case GE -> { + assert !_hasNext || realGot.compareTo(key) >= 0; + } + } + Log.tracev("got: {0}, hasNext: {1}", realGot, _hasNext); } @Override diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java index a163bac6..17881c03 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java @@ -2,12 +2,14 @@ package com.usatiuk.dhfs.objects.transaction; import com.usatiuk.dhfs.objects.*; import com.usatiuk.dhfs.objects.persistence.IteratorStart; -import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; import org.apache.commons.lang3.tuple.Pair; -import java.util.*; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; @ApplicationScoped public class ReadTrackingObjectSourceFactory { @@ -22,7 +24,6 @@ public class ReadTrackingObjectSourceFactory { private final SnapshotManager.Snapshot _snapshot; private final Map> _readSet = new HashMap<>(); - private final Queue _iterators = new ArrayDeque<>(); public ReadTrackingObjectSourceImpl(SnapshotManager.Snapshot snapshot) { _snapshot = snapshot; @@ -66,9 +67,9 @@ public class ReadTrackingObjectSourceFactory { @Override public void close() { - for (var it : _iterators) { - it.close(); - } +// for (var it : _iterators) { +// it.close(); +// } } private class ReadTrackingIterator implements CloseableKvIterator { @@ -108,9 +109,7 @@ public class ReadTrackingObjectSourceFactory { @Override public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { - var got = new ReadTrackingIterator(start, key); - _iterators.add(got); - return got; + return new ReadTrackingIterator(start, key); } } } \ No newline at end of file diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingTransactionObjectSource.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingTransactionObjectSource.java index 14ee4c3a..171ea1d4 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingTransactionObjectSource.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingTransactionObjectSource.java @@ -1,5 +1,6 @@ package com.usatiuk.dhfs.objects.transaction; +import com.usatiuk.dhfs.objects.CloseableKvIterator; import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.persistence.IteratorStart; @@ -15,9 +16,9 @@ public interface ReadTrackingTransactionObjectSource extends AutoCloseableNoThro Optional getWriteLocked(Class type, JObjectKey key); - Iterator> getIterator(IteratorStart start, JObjectKey key); + CloseableKvIterator getIterator(IteratorStart start, JObjectKey key); - default Iterator> getIterator(JObjectKey key) { + default CloseableKvIterator getIterator(JObjectKey key) { return getIterator(IteratorStart.GE, key); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java index 0d295511..3120999b 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/Transaction.java @@ -1,13 +1,12 @@ package com.usatiuk.dhfs.objects.transaction; +import com.usatiuk.dhfs.objects.CloseableKvIterator; import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.JObjectKey; import com.usatiuk.dhfs.objects.persistence.IteratorStart; -import org.apache.commons.lang3.tuple.Pair; import javax.annotation.Nonnull; import java.util.Collection; -import java.util.Iterator; import java.util.Optional; // The transaction interface actually used by user code to retrieve objects @@ -27,9 +26,9 @@ public interface Transaction extends TransactionHandle { return get(type, key, LockingStrategy.OPTIMISTIC); } - Iterator> getIterator(IteratorStart start, JObjectKey key); + CloseableKvIterator getIterator(IteratorStart start, JObjectKey key); - default Iterator> getIterator(JObjectKey key) { + default CloseableKvIterator getIterator(JObjectKey key) { return getIterator(IteratorStart.GE, key); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index 85025fc6..58939457 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -1,13 +1,10 @@ package com.usatiuk.dhfs.objects.transaction; -import com.usatiuk.dhfs.objects.JData; -import com.usatiuk.dhfs.objects.JObjectKey; -import com.usatiuk.dhfs.objects.SnapshotManager; +import com.usatiuk.dhfs.objects.*; import com.usatiuk.dhfs.objects.persistence.IteratorStart; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; -import org.apache.commons.lang3.tuple.Pair; import javax.annotation.Nonnull; import java.util.*; @@ -27,7 +24,10 @@ public class TransactionFactoryImpl implements TransactionFactory { private class TransactionImpl implements TransactionPrivate { private final ReadTrackingTransactionObjectSource _source; - private final Map> _writes = new HashMap<>(); + + private final NavigableMap> _writes = new TreeMap<>(); + private long _writeVersion = 0; + private Map> _newWrites = new HashMap<>(); private final List _onCommit = new ArrayList<>(); private final List _onFlush = new ArrayList<>(); @@ -103,8 +103,16 @@ public class TransactionFactoryImpl implements TransactionFactory { } @Override - public Iterator> getIterator(IteratorStart start, JObjectKey key) { - return _source.getIterator(start, key); + public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { + Log.tracev("Getting tx iterator with start={0}, key={1}", start, key); + return new TombstoneMergingKvIterator<>("tx", start, key, + (tS, tK) -> new MappingKvIterator<>(new NavigableMapKvIterator<>(_writes, tS, tK), t -> switch (t) { + case TxRecord.TxObjectRecordWrite write -> + new TombstoneMergingKvIterator.Data<>(write.data()); + case TxRecord.TxObjectRecordDeleted deleted -> new TombstoneMergingKvIterator.Tombstone<>(); + case null, default -> null; + }), + (tS, tK) -> new MappingKvIterator<>(_source.getIterator(tS, tK), TombstoneMergingKvIterator.Data::new)); } @Override diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java index 45c73eea..89ebbcf1 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java @@ -1,8 +1,10 @@ package com.usatiuk.dhfs.objects; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; import org.apache.commons.lang3.tuple.Pair; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import org.pcollections.TreePMap; import java.util.Iterator; import java.util.List; @@ -83,7 +85,7 @@ public class MergingKvIteratorTest { public void testSimple() { var source1 = List.of(Pair.of(1, 2), Pair.of(3, 4), Pair.of(5, 6)).iterator(); var source2 = List.of(Pair.of(2, 3), Pair.of(4, 5), Pair.of(6, 7)).iterator(); - var mergingIterator = new MergingKvIterator<>("test", new SimpleIteratorWrapper<>(source1), new SimpleIteratorWrapper<>(source2)); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source1), (a, b) -> new SimpleIteratorWrapper<>(source2)); var expected = List.of(Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4), Pair.of(4, 5), Pair.of(5, 6), Pair.of(6, 7)); for (var pair : expected) { Assertions.assertTrue(mergingIterator.hasNext()); @@ -95,7 +97,7 @@ public class MergingKvIteratorTest { public void testPriority() { var source1 = List.of(Pair.of(1, 2), Pair.of(2, 4), Pair.of(5, 6)); var source2 = List.of(Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 7)); - var mergingIterator = new MergingKvIterator<>("test", new SimpleIteratorWrapper<>(source1.iterator()), new SimpleIteratorWrapper<>(source2.iterator())); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source1.iterator()), (a, b) -> new SimpleIteratorWrapper<>(source2.iterator())); var expected = List.of(Pair.of(1, 2), Pair.of(2, 4), Pair.of(5, 6)); for (var pair : expected) { Assertions.assertTrue(mergingIterator.hasNext()); @@ -103,7 +105,7 @@ public class MergingKvIteratorTest { } Assertions.assertFalse(mergingIterator.hasNext()); - var mergingIterator2 = new MergingKvIterator<>("test", new SimpleIteratorWrapper<>(source2.iterator()), new SimpleIteratorWrapper<>(source1.iterator())); + var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source2.iterator()), (a, b) -> new SimpleIteratorWrapper<>(source1.iterator())); var expected2 = List.of(Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 7)); for (var pair : expected2) { Assertions.assertTrue(mergingIterator2.hasNext()); @@ -116,7 +118,7 @@ public class MergingKvIteratorTest { public void testPriority2() { var source1 = List.of(Pair.of(2, 4), Pair.of(5, 6)); var source2 = List.of(Pair.of(1, 3), Pair.of(2, 5)); - var mergingIterator = new MergingKvIterator<>("test", new SimpleIteratorWrapper<>(source1.iterator()), new SimpleIteratorWrapper<>(source2.iterator())); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source1.iterator()), (a, b) -> new SimpleIteratorWrapper<>(source2.iterator())); var expected = List.of(Pair.of(1, 3), Pair.of(2, 4), Pair.of(5, 6)); for (var pair : expected) { Assertions.assertTrue(mergingIterator.hasNext()); @@ -124,7 +126,7 @@ public class MergingKvIteratorTest { } Assertions.assertFalse(mergingIterator.hasNext()); - var mergingIterator2 = new MergingKvIterator<>("test", new SimpleIteratorWrapper<>(source2.iterator()), new SimpleIteratorWrapper<>(source1.iterator())); + var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source2.iterator()), (a, b) -> new SimpleIteratorWrapper<>(source1.iterator())); var expected2 = List.of(Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 6)); for (var pair : expected2) { Assertions.assertTrue(mergingIterator2.hasNext()); @@ -132,4 +134,164 @@ public class MergingKvIteratorTest { } Assertions.assertFalse(mergingIterator2.hasNext()); } + + @Test + public void testPriorityLe() { + var source1 = TreePMap.empty().plus(2, 4).plus(5, 6); + var source2 = TreePMap.empty().plus(1, 3).plus(2, 5); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK)); + var expected = List.of(Pair.of(5, 6)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + Assertions.assertFalse(mergingIterator.hasNext()); + + var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK)); + var expected2 = List.of(Pair.of(5, 6)); + for (var pair : expected2) { + Assertions.assertTrue(mergingIterator2.hasNext()); + Assertions.assertEquals(pair, mergingIterator2.next()); + } + Assertions.assertFalse(mergingIterator2.hasNext()); + } + + @Test + public void testPriorityLe2() { + var source1 = TreePMap.empty().plus(2, 4).plus(5, 6); + var source2 = TreePMap.empty().plus(1, 3).plus(2, 5).plus(3, 4); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK)); + var expected = List.of(Pair.of(5, 6)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + Assertions.assertFalse(mergingIterator.hasNext()); + } + + @Test + public void testPriorityLe3() { + var source1 = TreePMap.empty().plus(2, 4).plus(5, 6); + var source2 = TreePMap.empty().plus(1, 3).plus(2, 5).plus(6, 8); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK)); + var expected = List.of(Pair.of(5, 6), Pair.of(6, 8)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + Assertions.assertFalse(mergingIterator.hasNext()); + + var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK)); + var expected2 = List.of(Pair.of(5, 6), Pair.of(6, 8)); + for (var pair : expected2) { + Assertions.assertTrue(mergingIterator2.hasNext()); + Assertions.assertEquals(pair, mergingIterator2.next()); + } + Assertions.assertFalse(mergingIterator2.hasNext()); + } + + @Test + public void testPriorityLe4() { + var source1 = TreePMap.empty().plus(6, 7); + var source2 = TreePMap.empty().plus(1, 3).plus(2, 5).plus(3, 4); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK)); + var expected = List.of(Pair.of(3, 4), Pair.of(6, 7)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + Assertions.assertFalse(mergingIterator.hasNext()); + + var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK)); + var expected2 = List.of(Pair.of(3, 4), Pair.of(6, 7)); + for (var pair : expected2) { + Assertions.assertTrue(mergingIterator2.hasNext()); + Assertions.assertEquals(pair, mergingIterator2.next()); + } + Assertions.assertFalse(mergingIterator2.hasNext()); + } + + @Test + public void testPriorityLe5() { + var source1 = TreePMap.empty().plus(1, 2).plus(6, 7); + var source2 = TreePMap.empty().plus(1, 3).plus(2, 5).plus(3, 4); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK)); + var expected = List.of(Pair.of(3, 4), Pair.of(6, 7)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + Assertions.assertFalse(mergingIterator.hasNext()); + + var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK)); + var expected2 = List.of(Pair.of(3, 4), Pair.of(6, 7)); + for (var pair : expected2) { + Assertions.assertTrue(mergingIterator2.hasNext()); + Assertions.assertEquals(pair, mergingIterator2.next()); + } + Assertions.assertFalse(mergingIterator2.hasNext()); + } + + @Test + public void testPriorityLe6() { + var source1 = TreePMap.empty().plus(1, 3).plus(2, 5).plus(3, 4); + var source2 = TreePMap.empty().plus(4, 6); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK)); + var expected = List.of(Pair.of(4, 6)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + Assertions.assertFalse(mergingIterator.hasNext()); + + var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK)); + var expected2 = List.of(Pair.of(4, 6)); + for (var pair : expected2) { + Assertions.assertTrue(mergingIterator2.hasNext()); + Assertions.assertEquals(pair, mergingIterator2.next()); + } + Assertions.assertFalse(mergingIterator2.hasNext()); + } + + @Test + public void testPriorityLe7() { + var source1 = TreePMap.empty().plus(1, 3).plus(3, 5).plus(4, 6); + var source2 = TreePMap.empty().plus(1, 4).plus(3, 5).plus(4, 6); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 2, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK)); + var expected = List.of(Pair.of(1, 3), Pair.of(3, 5), Pair.of(4, 6)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + Assertions.assertFalse(mergingIterator.hasNext()); + + var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 2, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK)); + var expected2 = List.of(Pair.of(1, 4), Pair.of(3, 5), Pair.of(4, 6)); + for (var pair : expected2) { + Assertions.assertTrue(mergingIterator2.hasNext()); + Assertions.assertEquals(pair, mergingIterator2.next()); + } + Assertions.assertFalse(mergingIterator2.hasNext()); + } + + @Test + public void testPriorityLt() { + var source1 = TreePMap.empty().plus(2, 4).plus(5, 6); + var source2 = TreePMap.empty().plus(1, 3).plus(2, 5); + var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LT, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK)); + var expected = List.of(Pair.of(2, 4), Pair.of(5, 6)); + for (var pair : expected) { + Assertions.assertTrue(mergingIterator.hasNext()); + Assertions.assertEquals(pair, mergingIterator.next()); + } + Assertions.assertFalse(mergingIterator.hasNext()); + + var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LT, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK)); + var expected2 = List.of(Pair.of(2, 5), Pair.of(5, 6)); + for (var pair : expected2) { + Assertions.assertTrue(mergingIterator2.hasNext()); + Assertions.assertEquals(pair, mergingIterator2.next()); + } + Assertions.assertFalse(mergingIterator2.hasNext()); + } } diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PredicateKvIteratorTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PredicateKvIteratorTest.java new file mode 100644 index 00000000..44c1daa0 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PredicateKvIteratorTest.java @@ -0,0 +1,37 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import org.apache.commons.lang3.tuple.Pair; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.pcollections.TreePMap; + +import java.util.List; + +public class PredicateKvIteratorTest { + + @Test + public void simpleTest() { + var source1 = TreePMap.empty().plus(1, 3).plus(3, 5).plus(4, 6); + var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.GT, 1), + IteratorStart.GE, 1, v -> (v % 2 == 0) ? v : null); + var expected = List.of(Pair.of(4, 6)); + for (var pair : expected) { + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(pair, pit.next()); + } + } + + @Test + public void ltTest() { + var source1 = TreePMap.empty().plus(1, 3).plus(3, 5).plus(4, 6); + var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4), + IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null); + var expected = List.of(); + for (var pair : expected) { + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(pair, pit.next()); + } + Assertions.assertFalse(pit.hasNext()); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java index 37aaf5a5..d6a4084c 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/File.java @@ -1,47 +1,45 @@ package com.usatiuk.dhfs.files.objects; -import com.usatiuk.autoprotomap.runtime.ProtoMirror; +import com.usatiuk.dhfs.objects.JDataRemote; import com.usatiuk.dhfs.objects.JObjectKey; -import org.pcollections.TreePMap; +import com.usatiuk.dhfs.objects.jmap.JMapHolder; +import com.usatiuk.dhfs.objects.jmap.JMapLongKey; import java.util.Collection; import java.util.Set; -//@ProtoMirror(ChunkDataP.class) public record File(JObjectKey key, long mode, long cTime, long mTime, - TreePMap chunks, boolean symlink, long size -) implements FsNode { - public File withChunks(TreePMap chunks) { - return new File(key, mode, cTime, mTime, chunks, symlink, size); - } - + boolean symlink, long size +) implements JDataRemote, JMapHolder { public File withSymlink(boolean symlink) { - return new File(key, mode, cTime, mTime, chunks, symlink, size); + return new File(key, mode, cTime, mTime, symlink, size); } public File withSize(long size) { - return new File(key, mode, cTime, mTime, chunks, symlink, size); + return new File(key, mode, cTime, mTime, symlink, size); } public File withMode(long mode) { - return new File(key, mode, cTime, mTime, chunks, symlink, size); + return new File(key, mode, cTime, mTime, symlink, size); } public File withCTime(long cTime) { - return new File(key, mode, cTime, mTime, chunks, symlink, size); + return new File(key, mode, cTime, mTime, symlink, size); } public File withMTime(long mTime) { - return new File(key, mode, cTime, mTime, chunks, symlink, size); + return new File(key, mode, cTime, mTime, symlink, size); } @Override public Collection collectRefsTo() { - return Set.copyOf(chunks().values()); + return Set.of(); +// return Set.copyOf(chunks().values()); } @Override public int estimateSize() { - return chunks.size() * 64; + return 64; +// return chunks.size() * 64; } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java deleted file mode 100644 index a359d2b7..00000000 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java +++ /dev/null @@ -1,11 +0,0 @@ -package com.usatiuk.dhfs.files.objects; - -import com.usatiuk.dhfs.objects.JDataRemote; - -public interface FsNode extends JDataRemote { - long mode(); - - long cTime(); - - long mTime(); -} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java index 9172ee1c..0bcfbb90 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java @@ -10,6 +10,10 @@ import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaDirectory; import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile; +import com.usatiuk.dhfs.objects.jmap.JMapEntry; +import com.usatiuk.dhfs.objects.jmap.JMapHelper; +import com.usatiuk.dhfs.objects.jmap.JMapLongKey; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; import com.usatiuk.dhfs.objects.transaction.LockingStrategy; import com.usatiuk.dhfs.objects.transaction.Transaction; import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; @@ -23,7 +27,6 @@ import jakarta.enterprise.event.Observes; import jakarta.inject.Inject; import org.apache.commons.lang3.tuple.Pair; import org.eclipse.microprofile.config.inject.ConfigProperty; -import org.pcollections.TreePMap; import java.nio.charset.StandardCharsets; import java.nio.file.Path; @@ -69,6 +72,9 @@ public class DhfsFileServiceImpl implements DhfsFileService { @Inject JKleppmannTreeManager jKleppmannTreeManager; + @Inject + JMapHelper jMapHelper; + private JKleppmannTreeManager.JKleppmannTree getTree() { return jKleppmannTreeManager.getTree(new JObjectKey("fs")); } @@ -156,7 +162,7 @@ public class DhfsFileServiceImpl implements DhfsFileService { var fuuid = UUID.randomUUID(); Log.debug("Creating file " + fuuid); - File f = new File(JObjectKey.of(fuuid.toString()), mode, System.currentTimeMillis(), System.currentTimeMillis(), TreePMap.empty(), false, 0); + File f = new File(JObjectKey.of(fuuid.toString()), mode, System.currentTimeMillis(), System.currentTimeMillis(), false, 0); remoteTx.putData(f); try { @@ -270,31 +276,27 @@ public class DhfsFileServiceImpl implements DhfsFileService { return Optional.empty(); } - try { - var chunksAll = file.chunks(); - if (chunksAll.isEmpty()) { + try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) { + if (!it.hasNext()) return Optional.of(ByteString.empty()); - } - var chunksList = chunksAll.tailMap(chunksAll.floorKey(offset)).entrySet(); - if (chunksList.isEmpty()) { - return Optional.of(ByteString.empty()); - } - - var chunks = chunksList.iterator(); +// if (it.peekNextKey().key() != offset) { +// Log.warnv("Read over the end of file: {0} {1} {2}, next chunk: {3}", fileUuid, offset, length, it.peekNextKey()); +// return Optional.of(ByteString.empty()); +// } + long curPos = offset; ByteString buf = ByteString.empty(); - long curPos = offset; - var chunk = chunks.next(); + var chunk = it.next(); while (curPos < offset + length) { - var chunkPos = chunk.getKey(); + var chunkPos = chunk.getKey().key(); long offInChunk = curPos - chunkPos; long toReadInChunk = (offset + length) - curPos; - var chunkBytes = readChunk(chunk.getValue()); + var chunkBytes = readChunk(chunk.getValue().ref()); long readableLen = chunkBytes.size() - offInChunk; @@ -309,12 +311,11 @@ public class DhfsFileServiceImpl implements DhfsFileService { if (readableLen > toReadInChunk) break; - if (!chunks.hasNext()) break; + if (!it.hasNext()) break; - chunk = chunks.next(); + chunk = it.next(); } - // FIXME: return Optional.of(buf); } catch (Exception e) { Log.error("Error reading file: " + fileUuid, e); @@ -379,41 +380,68 @@ public class DhfsFileServiceImpl implements DhfsFileService { file = remoteTx.getData(File.class, fileUuid).orElse(null); } - var chunksAll = file.chunks(); - var first = chunksAll.floorEntry(offset); - var last = chunksAll.lowerEntry(offset + data.size()); + Pair> first; + Pair> last; + Log.tracev("Getting last"); + try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.of(offset + data.size()))) { + last = it.hasNext() ? it.next() : null; + Log.tracev("Last: {0}", last); + } + NavigableMap removedChunks = new TreeMap<>(); long start = 0; - NavigableMap beforeFirst = first != null ? chunksAll.headMap(first.getKey(), false) : Collections.emptyNavigableMap(); - NavigableMap afterLast = last != null ? chunksAll.tailMap(last.getKey(), false) : Collections.emptyNavigableMap(); - - if (first != null && (getChunkSize(first.getValue()) + first.getKey() <= offset)) { - beforeFirst = chunksAll; - afterLast = Collections.emptyNavigableMap(); - first = null; - last = null; - start = offset; - } else if (!chunksAll.isEmpty()) { - var between = chunksAll.subMap(first.getKey(), true, last.getKey(), true); - removedChunks.putAll(between); - start = first.getKey(); + try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) { + first = it.hasNext() ? it.next() : null; + Log.tracev("First: {0}", first); + boolean empty = last == null; + if (first != null && getChunkSize(first.getValue().ref()) + first.getKey().key() <= offset) { + first = null; + last = null; + start = offset; + } else if (!empty) { + assert first != null; + removedChunks.put(first.getKey().key(), first.getValue().ref()); + while (it.hasNext() && it.peekNextKey().compareTo(last.getKey()) <= 0) { + var next = it.next(); + Log.tracev("Next: {0}", next); + removedChunks.put(next.getKey().key(), next.getValue().ref()); + } + removedChunks.put(last.getKey().key(), last.getValue().ref()); + start = first.getKey().key(); + } } + +// NavigableMap beforeFirst = first != null ? chunksAll.headMap(first.getKey(), false) : Collections.emptyNavigableMap(); +// NavigableMap afterLast = last != null ? chunksAll.tailMap(last.getKey(), false) : Collections.emptyNavigableMap(); + +// if (first != null && (getChunkSize(first.getValue()) + first.getKey() <= offset)) { +// beforeFirst = chunksAll; +// afterLast = Collections.emptyNavigableMap(); +// first = null; +// last = null; +// start = offset; +// } else if (!chunksAll.isEmpty()) { +// var between = chunksAll.subMap(first.getKey(), true, last.getKey(), true); +// removedChunks.putAll(between); +// start = first.getKey(); +// } + ByteString pendingWrites = ByteString.empty(); - if (first != null && first.getKey() < offset) { - var chunkBytes = readChunk(first.getValue()); - pendingWrites = pendingWrites.concat(chunkBytes.substring(0, (int) (offset - first.getKey()))); + if (first != null && first.getKey().key() < offset) { + var chunkBytes = readChunk(first.getValue().ref()); + pendingWrites = pendingWrites.concat(chunkBytes.substring(0, (int) (offset - first.getKey().key()))); } pendingWrites = pendingWrites.concat(data); if (last != null) { - var lchunkBytes = readChunk(last.getValue()); - if (last.getKey() + lchunkBytes.size() > offset + data.size()) { + var lchunkBytes = readChunk(last.getValue().ref()); + if (last.getKey().key() + lchunkBytes.size() > offset + data.size()) { var startInFile = offset + data.size(); - var startInChunk = startInFile - last.getKey(); + var startInChunk = startInFile - last.getKey().key(); pendingWrites = pendingWrites.concat(lchunkBytes.substring((int) startInChunk, lchunkBytes.size())); } } @@ -421,57 +449,57 @@ public class DhfsFileServiceImpl implements DhfsFileService { int combinedSize = pendingWrites.size(); if (targetChunkSize > 0) { - if (combinedSize < (targetChunkSize * writeMergeThreshold)) { - boolean leftDone = false; - boolean rightDone = false; - while (!leftDone && !rightDone) { - if (beforeFirst.isEmpty()) leftDone = true; - if (!beforeFirst.isEmpty() || !leftDone) { - var takeLeft = beforeFirst.lastEntry(); - - var cuuid = takeLeft.getValue(); - - if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) { - leftDone = true; - continue; - } - - if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) { - leftDone = true; - continue; - } - - // FIXME: (and test this) - beforeFirst = beforeFirst.headMap(takeLeft.getKey(), false); - start = takeLeft.getKey(); - pendingWrites = readChunk(cuuid).concat(pendingWrites); - combinedSize += getChunkSize(cuuid); - removedChunks.put(takeLeft.getKey(), takeLeft.getValue()); - } - if (afterLast.isEmpty()) rightDone = true; - if (!afterLast.isEmpty() && !rightDone) { - var takeRight = afterLast.firstEntry(); - - var cuuid = takeRight.getValue(); - - if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) { - rightDone = true; - continue; - } - - if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) { - rightDone = true; - continue; - } - - // FIXME: (and test this) - afterLast = afterLast.tailMap(takeRight.getKey(), false); - pendingWrites = pendingWrites.concat(readChunk(cuuid)); - combinedSize += getChunkSize(cuuid); - removedChunks.put(takeRight.getKey(), takeRight.getValue()); - } - } - } +// if (combinedSize < (targetChunkSize * writeMergeThreshold)) { +// boolean leftDone = false; +// boolean rightDone = false; +// while (!leftDone && !rightDone) { +// if (beforeFirst.isEmpty()) leftDone = true; +// if (!beforeFirst.isEmpty() || !leftDone) { +// var takeLeft = beforeFirst.lastEntry(); +// +// var cuuid = takeLeft.getValue(); +// +// if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) { +// leftDone = true; +// continue; +// } +// +// if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) { +// leftDone = true; +// continue; +// } +// +// // FIXME: (and test this) +// beforeFirst = beforeFirst.headMap(takeLeft.getKey(), false); +// start = takeLeft.getKey(); +// pendingWrites = readChunk(cuuid).concat(pendingWrites); +// combinedSize += getChunkSize(cuuid); +// removedChunks.put(takeLeft.getKey(), takeLeft.getValue()); +// } +// if (afterLast.isEmpty()) rightDone = true; +// if (!afterLast.isEmpty() && !rightDone) { +// var takeRight = afterLast.firstEntry(); +// +// var cuuid = takeRight.getValue(); +// +// if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) { +// rightDone = true; +// continue; +// } +// +// if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) { +// rightDone = true; +// continue; +// } +// +// // FIXME: (and test this) +// afterLast = afterLast.tailMap(takeRight.getKey(), false); +// pendingWrites = pendingWrites.concat(readChunk(cuuid)); +// combinedSize += getChunkSize(cuuid); +// removedChunks.put(takeRight.getKey(), takeRight.getValue()); +// } +// } +// } } NavigableMap newChunks = new TreeMap<>(); @@ -501,7 +529,16 @@ public class DhfsFileServiceImpl implements DhfsFileService { } } - file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis()); + for (var e : removedChunks.entrySet()) { + Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue()); + jMapHelper.delete(file, JMapLongKey.of(e.getKey())); + } + + for (var e : newChunks.entrySet()) { + Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue()); + jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue()); + } + remoteTx.putData(file); cleanupChunks(file, removedChunks.values()); updateFileSize(file); @@ -523,11 +560,17 @@ public class DhfsFileServiceImpl implements DhfsFileService { } if (length == 0) { - var oldChunks = file.chunks(); - - file = file.withChunks(TreePMap.empty()).withMTime(System.currentTimeMillis()); + try (var it = jMapHelper.getIterator(file, IteratorStart.GE, JMapLongKey.of(0))) { + while (it.hasNext()) { + var next = it.next(); + jMapHelper.delete(file, next.getKey()); + } + } +// var oldChunks = file.chunks(); +// +// file = file.withChunks(TreePMap.empty()).withMTime(System.currentTimeMillis()); remoteTx.putData(file); - cleanupChunks(file, oldChunks.values()); +// cleanupChunks(file, oldChunks.values()); updateFileSize(file); return true; } @@ -535,7 +578,6 @@ public class DhfsFileServiceImpl implements DhfsFileService { var curSize = size(fileUuid); if (curSize == length) return true; - var chunksAll = file.chunks(); NavigableMap removedChunks = new TreeMap<>(); NavigableMap newChunks = new TreeMap<>(); @@ -573,20 +615,64 @@ public class DhfsFileServiceImpl implements DhfsFileService { } } } else { - var tail = chunksAll.lowerEntry(length); - var afterTail = chunksAll.tailMap(tail.getKey(), false); +// Pair> first; + Pair> last; + try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.of(length))) { + last = it.hasNext() ? it.next() : null; + while (it.hasNext()) { + var next = it.next(); + removedChunks.put(next.getKey().key(), next.getValue().ref()); + } + } + removedChunks.put(last.getKey().key(), last.getValue().ref()); +// +// NavigableMap removedChunks = new TreeMap<>(); +// +// long start = 0; +// +// try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) { +// first = it.hasNext() ? it.next() : null; +// boolean empty = last == null; +// if (first != null && getChunkSize(first.getValue().ref()) + first.getKey().key() <= offset) { +// first = null; +// last = null; +// start = offset; +// } else if (!empty) { +// assert first != null; +// removedChunks.put(first.getKey().key(), first.getValue().ref()); +// while (it.hasNext() && it.peekNextKey() != last.getKey()) { +// var next = it.next(); +// removedChunks.put(next.getKey().key(), next.getValue().ref()); +// } +// removedChunks.put(last.getKey().key(), last.getValue().ref()); +// } +// } +// +// var tail = chunksAll.lowerEntry(length); +// var afterTail = chunksAll.tailMap(tail.getKey(), false); +// +// removedChunks.put(tail.getKey(), tail.getValue()); +// removedChunks.putAll(afterTail); - removedChunks.put(tail.getKey(), tail.getValue()); - removedChunks.putAll(afterTail); - - var tailBytes = readChunk(tail.getValue()); - var newChunk = tailBytes.substring(0, (int) (length - tail.getKey())); + var tailBytes = readChunk(last.getValue().ref()); + var newChunk = tailBytes.substring(0, (int) (length - last.getKey().key())); ChunkData newChunkData = createChunk(newChunk); - newChunks.put(tail.getKey(), newChunkData.key()); + newChunks.put(last.getKey().key(), newChunkData.key()); + } + +// file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis()); + + for (var e : removedChunks.entrySet()) { + Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue()); + jMapHelper.delete(file, JMapLongKey.of(e.getKey())); + } + + for (var e : newChunks.entrySet()) { + Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue()); + jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue()); } - file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis()); remoteTx.putData(file); cleanupChunks(file, removedChunks.values()); updateFileSize(file); @@ -623,7 +709,8 @@ public class DhfsFileServiceImpl implements DhfsFileService { Log.debug("Creating file " + fuuid); ChunkData newChunkData = createChunk(UnsafeByteOperations.unsafeWrap(oldpath.getBytes(StandardCharsets.UTF_8))); - File f = new File(JObjectKey.of(fuuid.toString()), 0, System.currentTimeMillis(), System.currentTimeMillis(), TreePMap.empty().plus(0L, newChunkData.key()), true, 0); + File f = new File(JObjectKey.of(fuuid.toString()), 0, System.currentTimeMillis(), System.currentTimeMillis(), true, 0); + jMapHelper.put(f, JMapLongKey.of(0), newChunkData.key()); updateFileSize(f); @@ -650,10 +737,14 @@ public class DhfsFileServiceImpl implements DhfsFileService { jObjectTxManager.executeTx(() -> { long realSize = 0; - if (!file.chunks().isEmpty()) { - var last = file.chunks().lastEntry(); - var lastSize = getChunkSize(last.getValue()); - realSize = last.getKey() + lastSize; + Pair> last; + Log.tracev("Getting last"); + try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.max())) { + last = it.hasNext() ? it.next() : null; + } + + if (last != null) { + realSize = last.getKey().key() + getChunkSize(last.getValue().ref()); } if (realSize != file.size()) { diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java index 8978c728..b6f515cd 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RefcounterTxHook.java @@ -1,6 +1,6 @@ package com.usatiuk.dhfs.objects; -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; +import com.usatiuk.dhfs.objects.jmap.JMapEntry; import com.usatiuk.dhfs.objects.transaction.Transaction; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; @@ -11,24 +11,30 @@ public class RefcounterTxHook implements PreCommitTxHook { @Inject Transaction curTx; - private JDataRefcounted getRef(JDataRefcounted cur, JObjectKey key) { + private JDataRefcounted getRef(JObjectKey key) { var found = curTx.get(JDataRefcounted.class, key).orElse(null); if (found != null) { return found; } - if (cur instanceof RemoteObjectDataWrapper || cur instanceof JKleppmannTreeNode) { - // FIXME: - return new RemoteObjectMeta(key); - } else { - return found; - } - + return new RemoteObjectMeta(key); } @Override public void onChange(JObjectKey key, JData old, JData cur) { + if (cur instanceof JMapEntry me) { + var oldMe = (JMapEntry) old; + var oldRef = oldMe.ref(); + var curRef = me.ref(); + var referencedOld = getRef(oldRef); + curTx.put(referencedOld.withRefsFrom(referencedOld.refsFrom().minus(key))); + var referencedCur = getRef(curRef); + curTx.put(referencedCur.withRefsFrom(referencedCur.refsFrom().plus(key))); + Log.tracev("Removed ref from {0} to {1}, added ref to {2}", key, oldRef, curRef); + return; + } + if (!(cur instanceof JDataRefcounted refCur)) { return; } @@ -39,7 +45,7 @@ public class RefcounterTxHook implements PreCommitTxHook { for (var curRef : curRefs) { if (!oldRefs.contains(curRef)) { - var referenced = getRef(refCur, curRef); + var referenced = getRef(curRef); curTx.put(referenced.withRefsFrom(referenced.refsFrom().plus(key))); Log.tracev("Added ref from {0} to {1}", key, curRef); } @@ -47,7 +53,7 @@ public class RefcounterTxHook implements PreCommitTxHook { for (var oldRef : oldRefs) { if (!curRefs.contains(oldRef)) { - var referenced = getRef(refCur, oldRef); + var referenced = getRef(oldRef); curTx.put(referenced.withRefsFrom(referenced.refsFrom().minus(key))); Log.tracev("Removed ref from {0} to {1}", key, oldRef); } @@ -56,12 +62,20 @@ public class RefcounterTxHook implements PreCommitTxHook { @Override public void onCreate(JObjectKey key, JData cur) { + if (cur instanceof JMapEntry me) { + var curRef = me.ref(); + var referencedCur = getRef(curRef); + curTx.put(referencedCur.withRefsFrom(referencedCur.refsFrom().plus(key))); + Log.tracev("Added ref from {0} to {1}", key, curRef); + return; + } + if (!(cur instanceof JDataRefcounted refCur)) { return; } for (var newRef : refCur.collectRefsTo()) { - var referenced = getRef(refCur, newRef); + var referenced = getRef(newRef); curTx.put(referenced.withRefsFrom(referenced.refsFrom().plus(key))); Log.tracev("Added ref from {0} to {1}", key, newRef); } @@ -69,12 +83,20 @@ public class RefcounterTxHook implements PreCommitTxHook { @Override public void onDelete(JObjectKey key, JData cur) { + if (cur instanceof JMapEntry me) { + var oldRef = me.ref(); + var referencedOld = getRef(oldRef); + curTx.put(referencedOld.withRefsFrom(referencedOld.refsFrom().minus(key))); + Log.tracev("Removed ref from {0} to {1}", key, oldRef); + return; + } + if (!(cur instanceof JDataRefcounted refCur)) { return; } for (var removedRef : refCur.collectRefsTo()) { - var referenced = getRef(refCur, removedRef); + var referenced = getRef(removedRef); curTx.put(referenced.withRefsFrom(referenced.refsFrom().minus(key))); Log.tracev("Removed ref from {0} to {1}", key, removedRef); } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectMeta.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectMeta.java index 6db896a2..b2d9ab6d 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectMeta.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/RemoteObjectMeta.java @@ -45,7 +45,7 @@ public record RemoteObjectMeta(PCollection refsFrom, boolean frozen, } public static JObjectKey ofDataKey(JObjectKey key) { - return JObjectKey.of(key.name() + "_data"); + return JObjectKey.of("data_" + key.name()); } public JObjectKey dataKey() { diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapEntry.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapEntry.java new file mode 100644 index 00000000..997c21bd --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapEntry.java @@ -0,0 +1,12 @@ +package com.usatiuk.dhfs.objects.jmap; + +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; + +public record JMapEntry>(JObjectKey holder, K selfKey, + JObjectKey ref) implements JData { + @Override + public JObjectKey key() { + return JMapHelper.makeKey(holder, selfKey); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapHelper.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapHelper.java new file mode 100644 index 00000000..a777cb91 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapHelper.java @@ -0,0 +1,49 @@ +package com.usatiuk.dhfs.objects.jmap; + +import com.usatiuk.dhfs.objects.CloseableKvIterator; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import com.usatiuk.dhfs.objects.transaction.Transaction; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; + +import java.util.Optional; + +@ApplicationScoped +public class JMapHelper { + @Inject + Transaction curTx; + + static > JObjectKey makePrefix(JObjectKey holder) { + return JObjectKey.of(holder.name() + "/"); + } + + static > JObjectKey makeKey(JObjectKey holder, K key) { + return JObjectKey.of(makePrefix(holder).name() + key.toString()); + } + + public > CloseableKvIterator> getIterator(JMapHolder holder, IteratorStart start, K key) { + return new JMapIterator<>(curTx.getIterator(start, makeKey(holder.key(), key)), holder); + } + + public > CloseableKvIterator> getIterator(JMapHolder holder, K key) { + return getIterator(holder, IteratorStart.GE, key); + } + + public > CloseableKvIterator> getIterator(JMapHolder holder, IteratorStart start) { + return new JMapIterator<>(curTx.getIterator(start, makePrefix(holder.key())), holder); + } + + public > void put(JMapHolder holder, K key, JObjectKey ref) { + curTx.put(new JMapEntry<>(holder.key(), key, ref)); + } + + public > Optional> get(JMapHolder holder, K key) { + // TODO: + return curTx.get(JMapEntry.class, makeKey(holder.key(), key)).map(e -> (JMapEntry) e); + } + + public > void delete(JMapHolder holder, K key) { + curTx.delete(makeKey(holder.key(), key)); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapHolder.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapHolder.java new file mode 100644 index 00000000..b8dcbed3 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapHolder.java @@ -0,0 +1,6 @@ +package com.usatiuk.dhfs.objects.jmap; + +import com.usatiuk.dhfs.objects.JData; + +public interface JMapHolder> extends JData { +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapIterator.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapIterator.java new file mode 100644 index 00000000..d997f3b8 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapIterator.java @@ -0,0 +1,75 @@ +package com.usatiuk.dhfs.objects.jmap; + +import com.usatiuk.dhfs.objects.CloseableKvIterator; +import com.usatiuk.dhfs.objects.JData; +import com.usatiuk.dhfs.objects.JObjectKey; +import org.apache.commons.lang3.tuple.Pair; + +public class JMapIterator> implements CloseableKvIterator> { + private final CloseableKvIterator _backing; + private final JObjectKey _prefix; + private boolean _hasNext = true; + + public JMapIterator(CloseableKvIterator backing, JMapHolder holder) { + _backing = backing; + _prefix = JMapHelper.makePrefix(holder.key()); + advance(); + } + + void advance() { + assert _hasNext; + if (!_backing.hasNext()) { + _hasNext = false; + return; + } + if (!_backing.peekNextKey().name().startsWith(_prefix.name())) { + _backing.skip(); + if (!_backing.peekNextKey().name().startsWith(_prefix.name())) { + _hasNext = false; + } + } + } + + public K keyToKey(JObjectKey key) { + var keyPart = key.name().substring(_prefix.name().length()); + return (K) JMapLongKey.of(Long.parseLong(keyPart)); + } + + @Override + public K peekNextKey() { + if (!_hasNext) { + throw new IllegalStateException("No next element"); + } + + return keyToKey(_backing.peekNextKey()); + } + + @Override + public void skip() { + if (!_hasNext) { + throw new IllegalStateException("No next element"); + } + advance(); + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public boolean hasNext() { + return _hasNext; + } + + @Override + public Pair> next() { + if (!_hasNext) { + throw new IllegalStateException("No next element"); + } + var next = _backing.next(); + assert next.getKey().name().startsWith(_prefix.name()); + advance(); + return Pair.of(keyToKey(next.getKey()), (JMapEntry) next.getValue()); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapKey.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapKey.java new file mode 100644 index 00000000..46e61a2c --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapKey.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.objects.jmap; + +public interface JMapKey { +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapLongKey.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapLongKey.java new file mode 100644 index 00000000..83461084 --- /dev/null +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapLongKey.java @@ -0,0 +1,24 @@ +package com.usatiuk.dhfs.objects.jmap; + +import javax.annotation.Nonnull; +import java.io.Serializable; + +public record JMapLongKey(long key) implements JMapKey, Comparable, Serializable { + public static JMapLongKey of(long key) { + return new JMapLongKey(key); + } + + @Override + public String toString() { + return String.format("%016d", key); + } + + public static JMapLongKey max() { + return new JMapLongKey(Long.MAX_VALUE); + } + + @Override + public int compareTo(@Nonnull JMapLongKey o) { + return Long.compare(key, o.key); + } +} diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java index 4f4343f1..5e820e18 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java @@ -24,4 +24,12 @@ public record PersistentRemoteHostsData(PeerId selfUuid, public PersistentRemoteHostsData withInitialSyncDone(PSet initialSyncDone) { return new PersistentRemoteHostsData(selfUuid, selfCertificate, selfKeyPair, initialSyncDone); } + + @Override + public String toString() { + return "PersistentRemoteHostsData{" + + "selfUuid=" + selfUuid + + ", initialSyncDone=" + initialSyncDone + + '}'; + } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java index 0ec03042..7b2d8a59 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java @@ -16,4 +16,12 @@ public record PeerInfo(JObjectKey key, PeerId id, ByteString cert) implements JD public X509Certificate parsedCert() { return CertificateTools.certFromBytes(cert.toByteArray()); } + + @Override + public String toString() { + return "PeerInfo{" + + "key=" + key + + ", id=" + id + + '}'; + } } diff --git a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java index aa2a0da0..6d02c516 100644 --- a/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java +++ b/dhfs-parent/server/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java @@ -1,7 +1,6 @@ package com.usatiuk.dhfs.files; import com.usatiuk.dhfs.TempDataProfile; -import com.usatiuk.dhfs.files.objects.ChunkData; import com.usatiuk.dhfs.files.objects.File; import com.usatiuk.dhfs.files.service.DhfsFileService; import com.usatiuk.dhfs.objects.RemoteTransaction; @@ -112,6 +111,7 @@ public class DhfsFileServiceSimpleTestImpl { fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + Assertions.assertArrayEquals(new byte[]{2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 2, 8).get().toByteArray()); fileService.write(uuid, 4, new byte[]{10, 11, 12}); Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); fileService.write(uuid, 10, new byte[]{13, 14}); @@ -154,19 +154,23 @@ public class DhfsFileServiceSimpleTestImpl { Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray()); } - @Test + @RepeatedTest(100) void truncateTest2() { var ret = fileService.create("/truncateTest2", 777); - Assertions.assertTrue(ret.isPresent()); + try { + Assertions.assertTrue(ret.isPresent()); - var uuid = ret.get(); + var uuid = ret.get(); - fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); + fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); - fileService.truncate(uuid, 20); - fileService.write(uuid, 10, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 20}); - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, fileService.read(uuid, 0, 20).get().toByteArray()); + fileService.truncate(uuid, 20); + fileService.write(uuid, 10, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 20}); + Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, fileService.read(uuid, 0, 20).get().toByteArray()); + } finally { + fileService.unlink("/truncateTest2"); + } } @Test @@ -217,8 +221,8 @@ public class DhfsFileServiceSimpleTestImpl { jObjectTxManager.run(() -> { var oldfile = remoteTx.getData(File.class, ret2.get()).orElseThrow(IllegalStateException::new); - var chunk = oldfile.chunks().get(0L); - var chunkObj = remoteTx.getData(ChunkData.class, chunk).orElseThrow(IllegalStateException::new); +// var chunk = oldfile.chunks().get(0L); +// var chunkObj = remoteTx.getData(ChunkData.class, chunk).orElseThrow(IllegalStateException::new); }); Assertions.assertTrue(fileService.rename("/moveOverTest1", "/moveOverTest2")); From 5cbf5fcda201f6a0b29eca6a7366e0c25c738a39 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Tue, 25 Feb 2025 20:20:51 +0100 Subject: [PATCH 101/105] test fixie --- .../com/usatiuk/dhfs/objects/ObjectsTest.java | 88 +++++++++++++++++++ 1 file changed, 88 insertions(+) diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java index 13a30c5d..dd12880d 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java @@ -261,6 +261,92 @@ public class ObjectsTest { return curTx.get(Parent.class, new JObjectKey(key)).orElse(null); }); + if (!thread1Failed.get() && !thread2Failed.get()) { + Assertions.assertTrue(got.name().equals("John") || got.name().equals("John2")); + return; + } + + Assertions.assertFalse(!thread1Failed.get() && !thread2Failed.get()); + + if (!thread1Failed.get()) { + if (!thread2Failed.get()) { + Assertions.assertEquals("John2", got.name()); + } else { + Assertions.assertEquals("John", got.name()); + } + } else { + Assertions.assertFalse(thread2Failed.get()); + Assertions.assertEquals("John2", got.name()); + } + } + + @ParameterizedTest + @EnumSource(LockingStrategy.class) + void editConflict2(LockingStrategy strategy) { + String key = "EditConflict2" + strategy.name(); + txm.run(() -> { + var newParent = new Parent(JObjectKey.of(key), "John3"); + curTx.put(newParent); + }); + + AtomicBoolean thread1Failed = new AtomicBoolean(true); + AtomicBoolean thread2Failed = new AtomicBoolean(true); + + var barrier = new CyclicBarrier(2); + var latchEnd = new CountDownLatch(2); + + Just.run(() -> { + try { + Log.warn("Thread 1"); + txm.runTries(() -> { + try { + barrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null); + curTx.put(parent.withName("John")); + Log.warn("Thread 1 commit"); + }, 0); + Log.warn("Thread 1 commit done"); + thread1Failed.set(false); + return null; + } finally { + latchEnd.countDown(); + } + }); + Just.run(() -> { + try { + Log.warn("Thread 2"); + txm.runTries(() -> { + // Ensure they will conflict + try { + barrier.await(); + } catch (Throwable e) { + throw new RuntimeException(e); + } + var parent = curTx.get(Parent.class, new JObjectKey(key), strategy).orElse(null); + curTx.put(parent.withName("John2")); + Log.warn("Thread 2 commit"); + }, 0); + Log.warn("Thread 2 commit done"); + thread2Failed.set(false); + return null; + } finally { + latchEnd.countDown(); + } + }); + + try { + latchEnd.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + + var got = txm.run(() -> { + return curTx.get(Parent.class, new JObjectKey(key)).orElse(null); + }); + Assertions.assertFalse(!thread1Failed.get() && !thread2Failed.get()); if (!thread1Failed.get()) { @@ -762,6 +848,8 @@ public class ObjectsTest { () -> createCreateObject(), () -> editConflict(LockingStrategy.WRITE), () -> editConflict(LockingStrategy.OPTIMISTIC), + () -> editConflict2(LockingStrategy.WRITE), + () -> editConflict2(LockingStrategy.OPTIMISTIC), () -> snapshotTest1(), () -> snapshotTest2(), () -> snapshotTest3(), From 3e84ff1ed6d69fb3b2b4a07952b8b34510d269fa Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Wed, 26 Feb 2025 11:58:07 +0100 Subject: [PATCH 102/105] broken extra checks --- .../InconsistentSelfRefreshingKvIterator.java | 3 + .../usatiuk/dhfs/objects/SnapshotManager.java | 87 ++++++++++++++----- .../src/main/resources/application.properties | 1 + .../dhfs/objects/ObjectsTestExtraChecks.java | 9 ++ ...{ObjectsTest.java => ObjectsTestImpl.java} | 23 ++++- .../objects/ObjectsTestNoExtraChecks.java | 9 ++ .../src/test/resources/application.properties | 1 + .../src/test/resources/application.properties | 3 +- 8 files changed, 109 insertions(+), 27 deletions(-) create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestExtraChecks.java rename dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/{ObjectsTest.java => ObjectsTestImpl.java} (97%) create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestNoExtraChecks.java diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentSelfRefreshingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentSelfRefreshingKvIterator.java index c296f255..c503ad1a 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentSelfRefreshingKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentSelfRefreshingKvIterator.java @@ -53,6 +53,9 @@ public class InconsistentSelfRefreshingKvIterator, V> im } } else if (_lastReturnedKey != null) { _backing = _iteratorSupplier.apply(Pair.of(IteratorStart.GT, _lastReturnedKey)); + if (_backing.hasNext() && !(_backing.peekNextKey().compareTo(_lastReturnedKey) > 0)) { + throw new StaleIteratorException(); + } } else { _backing = _iteratorSupplier.apply(_initialStart); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java index 9ebf8949..5ca22c02 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java @@ -8,6 +8,7 @@ import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; import org.apache.commons.lang3.mutable.MutableObject; import org.apache.commons.lang3.tuple.Pair; +import org.eclipse.microprofile.config.inject.ConfigProperty; import javax.annotation.Nonnull; import java.lang.ref.Cleaner; @@ -16,6 +17,7 @@ import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Consumer; +import java.util.function.Function; @ApplicationScoped public class SnapshotManager { @@ -43,6 +45,9 @@ public class SnapshotManager { } } + @ConfigProperty(name = "dhfs.objects.persistence.snapshot-extra-checks") + boolean extraChecks; + private long _lastSnapshotId = 0; private long _lastAliveSnapshotId = -1; private final AtomicLong _snapshotVersion = new AtomicLong(0); @@ -238,7 +243,7 @@ public class SnapshotManager { public class SnapshotKvIterator implements CloseableKvIterator> { private final CloseableKvIterator _backing; - private Pair> _next; + private Pair> _next = null; public SnapshotKvIterator(IteratorStart start, JObjectKey startKey) { _backing = new NavigableMapKvIterator<>(_objects, start, new SnapshotKey(startKey, 0L)); @@ -246,13 +251,18 @@ public class SnapshotManager { if (_next == null) { return; } - if (start == IteratorStart.LE) { - if (_next.getKey().compareTo(startKey) > 0) { - _next = null; + switch (start) { + case LT -> { + assert _next.getKey().compareTo(startKey) < 0; } - } else if (start == IteratorStart.LT) { - if (_next.getKey().compareTo(startKey) >= 0) { - _next = null; + case LE -> { + assert _next.getKey().compareTo(startKey) <= 0; + } + case GT -> { + assert _next.getKey().compareTo(startKey) > 0; + } + case GE -> { + assert _next.getKey().compareTo(startKey) >= 0; } } } @@ -338,29 +348,54 @@ public class SnapshotManager { @Override public JObjectKey peekNextKey() { - return _backing.peekNextKey(); + try { + return _backing.peekNextKey(); + } catch (StaleIteratorException e) { + assert false; + throw e; + } } @Override public void skip() { - _backing.skip(); + try { + _backing.skip(); + } catch (StaleIteratorException e) { + assert false; + throw e; + } } @Override public void close() { - _backing.close(); + try { + _backing.close(); + } catch (StaleIteratorException e) { + assert false; + throw e; + } } @Override public boolean hasNext() { - return _backing.hasNext(); + try { + return _backing.hasNext(); + } catch (StaleIteratorException e) { + assert false; + throw e; + } } @Override public Pair next() { - var ret = _backing.next(); - assert ret.getValue().version() <= _id; - return ret; + try { + var ret = _backing.next(); + assert ret.getValue().version() <= _id; + return ret; + } catch (StaleIteratorException e) { + assert false; + throw e; + } } } @@ -372,15 +407,21 @@ public class SnapshotManager { Log.tracev("Getting snapshot {0} iterator for {1} {2}", _id, start, key); _lock.readLock().lock(); try { - return new CheckingSnapshotKvIterator(new SelfRefreshingKvIterator<>( - p -> - new TombstoneMergingKvIterator<>("snapshot", p.getKey(), p.getValue(), - SnapshotKvIterator::new, - (tS, tK) -> new MappingKvIterator<>( - writebackStore.getIterator(tS, tK), - d -> d.version() <= _id ? new TombstoneMergingKvIterator.Data<>(d) : new TombstoneMergingKvIterator.Tombstone<>()) - ) - , _snapshotVersion::get, _lock.readLock(), start, key)); + Function, CloseableKvIterator> iteratorFactory = + p -> new TombstoneMergingKvIterator<>("snapshot", p.getKey(), p.getValue(), + SnapshotKvIterator::new, + (tS, tK) -> new MappingKvIterator<>( + writebackStore.getIterator(tS, tK), + d -> d.version() <= _id ? new TombstoneMergingKvIterator.Data<>(d) : new TombstoneMergingKvIterator.Tombstone<>()) + ); + + var backing = extraChecks ? new SelfRefreshingKvIterator<>( + iteratorFactory, _snapshotVersion::get, _lock.readLock(), start, key + ) : new InconsistentSelfRefreshingKvIterator<>( + iteratorFactory, _snapshotVersion::get, _lock.readLock(), start, key + ); + + return new CheckingSnapshotKvIterator(backing); } finally { _lock.readLock().unlock(); } diff --git a/dhfs-parent/objects/src/main/resources/application.properties b/dhfs-parent/objects/src/main/resources/application.properties index 93211847..71d81280 100644 --- a/dhfs-parent/objects/src/main/resources/application.properties +++ b/dhfs-parent/objects/src/main/resources/application.properties @@ -5,3 +5,4 @@ dhfs.objects.lru.print-stats=true dhfs.objects.lock_timeout_secs=15 dhfs.objects.persistence.files.root=${HOME}/dhfs_default/data/objs quarkus.package.jar.decompiler.enabled=true +dhfs.objects.persistence.snapshot-extra-checks=false \ No newline at end of file diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestExtraChecks.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestExtraChecks.java new file mode 100644 index 00000000..9c933417 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestExtraChecks.java @@ -0,0 +1,9 @@ +package com.usatiuk.dhfs.objects; + +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; + +@QuarkusTest +@TestProfile(Profiles.ObjectsTestProfileExtraChecks.class) +public class ObjectsTestExtraChecks extends ObjectsTestImpl{ +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestImpl.java similarity index 97% rename from dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java rename to dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestImpl.java index dd12880d..a3c346ff 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestImpl.java @@ -5,7 +5,7 @@ import com.usatiuk.dhfs.objects.persistence.IteratorStart; import com.usatiuk.dhfs.objects.transaction.LockingStrategy; import com.usatiuk.dhfs.objects.transaction.Transaction; import io.quarkus.logging.Log; -import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.QuarkusTestProfile; import jakarta.inject.Inject; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Disabled; @@ -13,16 +13,33 @@ import org.junit.jupiter.api.RepeatedTest; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; +import org.pcollections.TreePMap; import java.util.List; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicBoolean; -@QuarkusTest -public class ObjectsTest { +class Profiles { + public static class ObjectsTestProfileExtraChecks implements QuarkusTestProfile { + @Override + final public Map getConfigOverrides() { + return TreePMap.empty().plus("dhfs.objects.persistence.snapshot-extra-checks", "true"); + } + } + + public static class ObjectsTestProfileNoExtraChecks implements QuarkusTestProfile { + @Override + final public Map getConfigOverrides() { + return TreePMap.empty().plus("dhfs.objects.persistence.snapshot-extra-checks", "false"); + } + } +} + +public abstract class ObjectsTestImpl { @Inject TransactionManager txm; diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestNoExtraChecks.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestNoExtraChecks.java new file mode 100644 index 00000000..661a9e4d --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestNoExtraChecks.java @@ -0,0 +1,9 @@ +package com.usatiuk.dhfs.objects; + +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; + +@QuarkusTest +@TestProfile(Profiles.ObjectsTestProfileNoExtraChecks.class) +public class ObjectsTestNoExtraChecks extends ObjectsTestImpl { +} diff --git a/dhfs-parent/objects/src/test/resources/application.properties b/dhfs-parent/objects/src/test/resources/application.properties index 3fab7b8f..f02e185d 100644 --- a/dhfs-parent/objects/src/test/resources/application.properties +++ b/dhfs-parent/objects/src/test/resources/application.properties @@ -3,3 +3,4 @@ quarkus.log.category."com.usatiuk".level=TRACE quarkus.log.category."com.usatiuk".min-level=TRACE quarkus.http.test-port=0 quarkus.http.test-ssl-port=0 +dhfs.objects.persistence.snapshot-extra-checks=true \ No newline at end of file diff --git a/dhfs-parent/server/src/test/resources/application.properties b/dhfs-parent/server/src/test/resources/application.properties index 64f51835..ecef50b6 100644 --- a/dhfs-parent/server/src/test/resources/application.properties +++ b/dhfs-parent/server/src/test/resources/application.properties @@ -8,4 +8,5 @@ quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE quarkus.class-loading.parent-first-artifacts=com.usatiuk.dhfs:supportlib quarkus.http.test-port=0 quarkus.http.test-ssl-port=0 -dhfs.local-discovery=false \ No newline at end of file +dhfs.local-discovery=false +dhfs.objects.persistence.snapshot-extra-checks=true \ No newline at end of file From 52c31bc86422156a23a958cd4df65fc53f80f3fe Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Wed, 26 Feb 2025 18:34:13 +0100 Subject: [PATCH 103/105] some moving around --- .../usatiuk/dhfs/objects/JObjectManager.java | 1 + .../WritebackObjectPersistentStore.java | 6 +- .../dhfs/objects/snapshot/SnapshotEntry.java | 5 + .../snapshot/SnapshotEntryDeleted.java | 4 + .../objects/snapshot/SnapshotEntryObject.java | 6 + .../dhfs/objects/snapshot/SnapshotKey.java | 15 +++ .../objects/snapshot/SnapshotKvIterator.java | 111 +++++++++++++++ .../{ => snapshot}/SnapshotManager.java | 126 +----------------- .../ReadTrackingObjectSourceFactory.java | 1 + .../transaction/TransactionFactoryImpl.java | 1 + .../transaction/TransactionPrivate.java | 2 +- 11 files changed, 153 insertions(+), 125 deletions(-) create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntry.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryDeleted.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryObject.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKey.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKvIterator.java rename dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/{ => snapshot}/SnapshotManager.java (73%) diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index fdd3f421..6aedd59a 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -1,5 +1,6 @@ package com.usatiuk.dhfs.objects; +import com.usatiuk.dhfs.objects.snapshot.SnapshotManager; import com.usatiuk.dhfs.objects.transaction.*; import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; import io.quarkus.logging.Log; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java index 173689b0..56c84aed 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java @@ -384,7 +384,7 @@ public class WritebackObjectPersistentStore { } @Nonnull - Optional readObject(JObjectKey name) { + public Optional readObject(JObjectKey name) { var pending = getPendingWrite(name).orElse(null); return switch (pending) { case PendingWrite write -> Optional.of(write.data()); @@ -404,7 +404,7 @@ public class WritebackObjectPersistentStore { } @Nonnull - VerboseReadResult readObjectVerbose(JObjectKey key) { + public VerboseReadResult readObjectVerbose(JObjectKey key) { var pending = getPendingWrite(key).orElse(null); if (pending != null) { return new VerboseReadResultPending(pending); @@ -412,7 +412,7 @@ public class WritebackObjectPersistentStore { return new VerboseReadResultPersisted(cachedStore.readObject(key)); } - Consumer commitTx(Collection> writes, long id) { + public Consumer commitTx(Collection> writes, long id) { var bundle = createBundle(); try { for (var action : writes) { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntry.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntry.java new file mode 100644 index 00000000..5bb49752 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntry.java @@ -0,0 +1,5 @@ +package com.usatiuk.dhfs.objects.snapshot; + +interface SnapshotEntry { + long whenToRemove(); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryDeleted.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryDeleted.java new file mode 100644 index 00000000..bd838c17 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryDeleted.java @@ -0,0 +1,4 @@ +package com.usatiuk.dhfs.objects.snapshot; + +record SnapshotEntryDeleted(long whenToRemove) implements SnapshotEntry { +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryObject.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryObject.java new file mode 100644 index 00000000..f4d7e40d --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryObject.java @@ -0,0 +1,6 @@ +package com.usatiuk.dhfs.objects.snapshot; + +import com.usatiuk.dhfs.objects.JDataVersionedWrapper; + +record SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) implements SnapshotEntry { +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKey.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKey.java new file mode 100644 index 00000000..9fd6e80f --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKey.java @@ -0,0 +1,15 @@ +package com.usatiuk.dhfs.objects.snapshot; + +import com.usatiuk.dhfs.objects.JObjectKey; + +import javax.annotation.Nonnull; +import java.util.Comparator; + +record SnapshotKey(JObjectKey key, long version) implements Comparable { + @Override + public int compareTo(@Nonnull SnapshotKey o) { + return Comparator.comparing(SnapshotKey::key) + .thenComparing(SnapshotKey::version) + .compare(this, o); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKvIterator.java new file mode 100644 index 00000000..2b52fae4 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKvIterator.java @@ -0,0 +1,111 @@ +package com.usatiuk.dhfs.objects.snapshot; + +import com.usatiuk.dhfs.objects.*; +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import io.quarkus.logging.Log; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.NavigableMap; +import java.util.NoSuchElementException; + +public class SnapshotKvIterator implements CloseableKvIterator> { + private final NavigableMap _objects; + private final long _version; + private final CloseableKvIterator _backing; + private Pair> _next = null; + + public SnapshotKvIterator(NavigableMap objects, long version, IteratorStart start, JObjectKey startKey) { + _objects = objects; + _version = version; + _backing = new NavigableMapKvIterator<>(_objects, start, new SnapshotKey(startKey, 0L)); + fillNext(); + if (_next == null) { + return; + } + switch (start) { + case LT -> { + assert _next.getKey().compareTo(startKey) < 0; + } + case LE -> { + assert _next.getKey().compareTo(startKey) <= 0; + } + case GT -> { + assert _next.getKey().compareTo(startKey) > 0; + } + case GE -> { + assert _next.getKey().compareTo(startKey) >= 0; + } + } + } + + private void fillNext() { + while (_backing.hasNext() && _next == null) { + var next = _backing.next(); + var nextNextKey = _backing.hasNext() ? _backing.peekNextKey() : null; + while (nextNextKey != null && nextNextKey.key().equals(next.getKey().key()) && nextNextKey.version() <= _version) { + next = _backing.next(); + nextNextKey = _backing.hasNext() ? _backing.peekNextKey() : null; + } + // next.getValue().whenToRemove() >=_id, read tx might have same snapshot id as some write tx + if (next.getKey().version() <= _version && next.getValue().whenToRemove() > _version) { + _next = switch (next.getValue()) { + case SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) -> + Pair.of(next.getKey().key(), new TombstoneMergingKvIterator.Data<>(data)); + case SnapshotEntryDeleted(long whenToRemove) -> + Pair.of(next.getKey().key(), new TombstoneMergingKvIterator.Tombstone<>()); + default -> throw new IllegalStateException("Unexpected value: " + next.getValue()); + }; + } + if (_next != null) { + if (_next.getValue() instanceof TombstoneMergingKvIterator.Data( + JDataVersionedWrapper value + )) { + assert value.version() <= _version; + } + } + } + } + + @Override + public JObjectKey peekNextKey() { + if (_next == null) + throw new NoSuchElementException(); + return _next.getKey(); + } + + @Override + public void skip() { + if (_next == null) + throw new NoSuchElementException(); + _next = null; + fillNext(); + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public boolean hasNext() { + return _next != null; + } + + @Override + public Pair> next() { + if (_next == null) + throw new NoSuchElementException("No more elements"); + var ret = _next; + if (ret.getValue() instanceof TombstoneMergingKvIterator.Data( + JDataVersionedWrapper value + )) { + assert value.version() <= _version; + } + + _next = null; + fillNext(); + Log.tracev("Read: {0}, next: {1}", ret, _next); + return ret; + } + +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotManager.java similarity index 73% rename from dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java rename to dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotManager.java index 5ca22c02..bb7be190 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SnapshotManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotManager.java @@ -1,5 +1,6 @@ -package com.usatiuk.dhfs.objects; +package com.usatiuk.dhfs.objects.snapshot; +import com.usatiuk.dhfs.objects.*; import com.usatiuk.dhfs.objects.persistence.IteratorStart; import com.usatiuk.dhfs.objects.transaction.TxRecord; import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; @@ -26,25 +27,6 @@ public class SnapshotManager { private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock(); - private interface SnapshotEntry { - long whenToRemove(); - } - - private record SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) implements SnapshotEntry { - } - - private record SnapshotEntryDeleted(long whenToRemove) implements SnapshotEntry { - } - - private record SnapshotKey(JObjectKey key, long version) implements Comparable { - @Override - public int compareTo(@Nonnull SnapshotKey o) { - return Comparator.comparing(SnapshotKey::key) - .thenComparing(SnapshotKey::version) - .compare(this, o); - } - } - @ConfigProperty(name = "dhfs.objects.persistence.snapshot-extra-checks") boolean extraChecks; @@ -62,7 +44,7 @@ public class SnapshotManager { assert _snapshotIds.isEmpty() || _snapshotIds.peek() == _lastAliveSnapshotId; } - Consumer commitTx(Collection> writes, long id) { + public Consumer commitTx(Collection> writes, long id) { _lock.writeLock().lock(); try { assert id > _lastSnapshotId; @@ -241,104 +223,6 @@ public class SnapshotManager { }); } - public class SnapshotKvIterator implements CloseableKvIterator> { - private final CloseableKvIterator _backing; - private Pair> _next = null; - - public SnapshotKvIterator(IteratorStart start, JObjectKey startKey) { - _backing = new NavigableMapKvIterator<>(_objects, start, new SnapshotKey(startKey, 0L)); - fillNext(); - if (_next == null) { - return; - } - switch (start) { - case LT -> { - assert _next.getKey().compareTo(startKey) < 0; - } - case LE -> { - assert _next.getKey().compareTo(startKey) <= 0; - } - case GT -> { - assert _next.getKey().compareTo(startKey) > 0; - } - case GE -> { - assert _next.getKey().compareTo(startKey) >= 0; - } - } - } - - private void fillNext() { - while (_backing.hasNext() && _next == null) { - var next = _backing.next(); - var nextNextKey = _backing.hasNext() ? _backing.peekNextKey() : null; - while (nextNextKey != null && nextNextKey.key.equals(next.getKey().key()) && nextNextKey.version() <= _id) { - next = _backing.next(); - nextNextKey = _backing.hasNext() ? _backing.peekNextKey() : null; - } - // next.getValue().whenToRemove() >=_id, read tx might have same snapshot id as some write tx - if (next.getKey().version() <= _id && next.getValue().whenToRemove() > _id) { - _next = switch (next.getValue()) { - case SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) -> - Pair.of(next.getKey().key(), new TombstoneMergingKvIterator.Data<>(data)); - case SnapshotEntryDeleted(long whenToRemove) -> - Pair.of(next.getKey().key(), new TombstoneMergingKvIterator.Tombstone<>()); - default -> throw new IllegalStateException("Unexpected value: " + next.getValue()); - }; - } - if (_next != null) { - if (_next.getValue() instanceof TombstoneMergingKvIterator.Data( - JDataVersionedWrapper value - )) { - assert value.version() <= _id; - } - } - } - } - - @Override - public JObjectKey peekNextKey() { - if (_next == null) - throw new NoSuchElementException(); - return _next.getKey(); - } - - @Override - public void skip() { - if (_next == null) - throw new NoSuchElementException(); - _next = null; - fillNext(); - } - - @Override - public void close() { - _backing.close(); - } - - @Override - public boolean hasNext() { - return _next != null; - } - - @Override - public Pair> next() { - if (_next == null) - throw new NoSuchElementException("No more elements"); - var ret = _next; - if (ret.getValue() instanceof TombstoneMergingKvIterator.Data( - JDataVersionedWrapper value - )) { - assert value.version() <= _id; - } - - _next = null; - fillNext(); - Log.tracev("Read: {0}, next: {1}", ret, _next); - return ret; - } - - } - public class CheckingSnapshotKvIterator implements CloseableKvIterator { private final CloseableKvIterator _backing; @@ -409,7 +293,7 @@ public class SnapshotManager { try { Function, CloseableKvIterator> iteratorFactory = p -> new TombstoneMergingKvIterator<>("snapshot", p.getKey(), p.getValue(), - SnapshotKvIterator::new, + (tS, tK) -> new SnapshotKvIterator(_objects, _id, tS, tK), (tS, tK) -> new MappingKvIterator<>( writebackStore.getIterator(tS, tK), d -> d.version() <= _id ? new TombstoneMergingKvIterator.Data<>(d) : new TombstoneMergingKvIterator.Tombstone<>()) @@ -459,7 +343,7 @@ public class SnapshotManager { } @Nonnull - Optional readObjectDirect(JObjectKey name) { + public Optional readObjectDirect(JObjectKey name) { return writebackStore.readObject(name); } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java index 17881c03..eae1d216 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java @@ -2,6 +2,7 @@ package com.usatiuk.dhfs.objects.transaction; import com.usatiuk.dhfs.objects.*; import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import com.usatiuk.dhfs.objects.snapshot.SnapshotManager; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; import org.apache.commons.lang3.tuple.Pair; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index 58939457..29c03c12 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -2,6 +2,7 @@ package com.usatiuk.dhfs.objects.transaction; import com.usatiuk.dhfs.objects.*; import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import com.usatiuk.dhfs.objects.snapshot.SnapshotManager; import io.quarkus.logging.Log; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java index 7a3c0705..766a3a63 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionPrivate.java @@ -1,7 +1,7 @@ package com.usatiuk.dhfs.objects.transaction; import com.usatiuk.dhfs.objects.JObjectKey; -import com.usatiuk.dhfs.objects.SnapshotManager; +import com.usatiuk.dhfs.objects.snapshot.SnapshotManager; import com.usatiuk.dhfs.utils.AutoCloseableNoThrow; import java.util.Collection; From e7bea01faf9f981fe42db76714ececb7e5241f14 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Thu, 27 Feb 2025 10:16:34 +0100 Subject: [PATCH 104/105] bunch of iterators with prev --- .../dhfs/objects/CloseableKvIterator.java | 20 +++ .../dhfs/objects/MappingKvIterator.java | 21 ++++ .../dhfs/objects/MergingKvIterator.java | 62 ++++++--- .../dhfs/objects/NavigableMapKvIterator.java | 47 +++++-- .../dhfs/objects/PredicateKvIterator.java | 77 +++++++++--- .../dhfs/objects/ReversedKvIterator.java | 61 +++++++++ .../dhfs/objects/ReversibleKvIterator.java | 79 ++++++++++++ .../CachingObjectPersistentStore.java | 23 ++++ .../LmdbObjectPersistentStore.java | 74 +++++++---- .../SerializingObjectPersistentStore.java | 46 +------ .../dhfs/objects/snapshot/SnapshotEntry.java | 2 +- .../snapshot/SnapshotEntryDeleted.java | 2 +- .../objects/snapshot/SnapshotEntryObject.java | 2 +- .../dhfs/objects/snapshot/SnapshotKey.java | 2 +- .../java/com/usatiuk/dhfs/objects/Just.java | 16 +++ .../dhfs/objects/MergingKvIteratorTest.java | 27 ++++ .../objects/NavigableMapKvIteratorTest.java | 71 +++++++++++ .../dhfs/objects/PredicateKvIteratorTest.java | 119 ++++++++++++++++++ .../usatiuk/dhfs/objects/TempDataProfile.java | 30 +++++ .../usatiuk/dhfs/objects/TestDataCleaner.java | 40 ++++++ .../persistence/LmdbKvIteratorTest.java | 106 ++++++++++++++++ .../snapshot/SnapshotKvIteratorTest.java | 11 ++ 22 files changed, 818 insertions(+), 120 deletions(-) create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ReversedKvIterator.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ReversibleKvIterator.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/NavigableMapKvIteratorTest.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/TempDataProfile.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/TestDataCleaner.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/LmdbKvIteratorTest.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKvIteratorTest.java diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CloseableKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CloseableKvIterator.java index bcc3474c..13b439af 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CloseableKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CloseableKvIterator.java @@ -9,4 +9,24 @@ public interface CloseableKvIterator, V> extends Iterato K peekNextKey(); void skip(); + + default K peekPrevKey() { + throw new UnsupportedOperationException(); + } + + default Pair prev() { + throw new UnsupportedOperationException(); + } + + default boolean hasPrev() { + throw new UnsupportedOperationException(); + } + + default void skipPrev() { + throw new UnsupportedOperationException(); + } + + default CloseableKvIterator reversed() { + return new ReversedKvIterator<>(this); + } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MappingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MappingKvIterator.java index 6a374c34..eae8f788 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MappingKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MappingKvIterator.java @@ -33,6 +33,27 @@ public class MappingKvIterator, V, V_T> implements Close return _backing.hasNext(); } + @Override + public K peekPrevKey() { + return _backing.peekPrevKey(); + } + + @Override + public Pair prev() { + var got = _backing.prev(); + return Pair.of(got.getKey(), _transformer.apply(got.getValue())); + } + + @Override + public boolean hasPrev() { + return _backing.hasPrev(); + } + + @Override + public void skipPrev() { + _backing.skipPrev(); + } + @Override public Pair next() { var got = _backing.next(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java index 04a293bf..a4f193c6 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java @@ -6,12 +6,13 @@ import org.apache.commons.lang3.tuple.Pair; import java.util.*; -public class MergingKvIterator, V> implements CloseableKvIterator { +public class MergingKvIterator, V> extends ReversibleKvIterator { private final Map, Integer> _iterators; private final NavigableMap> _sortedIterators = new TreeMap<>(); private final String _name; public MergingKvIterator(String name, IteratorStart startType, K startKey, List> iterators) { + _goingForward = true; _name = name; IteratorStart initialStartType = startType; @@ -92,9 +93,10 @@ public class MergingKvIterator, V> implements CloseableK return; } - var oursPrio = _iterators.get(iterator); + // Expects that reversed iterator returns itself when reversed again + var oursPrio = _iterators.get(_goingForward ? iterator : iterator.reversed()); var them = _sortedIterators.get(key); - var theirsPrio = _iterators.get(them); + var theirsPrio = _iterators.get(_goingForward ? them : them.reversed()); if (oursPrio < theirsPrio) { _sortedIterators.put(key, iterator); advanceIterator(them); @@ -106,15 +108,36 @@ public class MergingKvIterator, V> implements CloseableK } @Override - public K peekNextKey() { - if (_sortedIterators.isEmpty()) - throw new NoSuchElementException(); - return _sortedIterators.firstKey(); + protected void reverse() { + var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry(); + _goingForward = !_goingForward; + _sortedIterators.clear(); + for (CloseableKvIterator iterator : _iterators.keySet()) { + // _goingForward inverted already + advanceIterator(!_goingForward ? iterator.reversed() : iterator); + } + if (_sortedIterators.isEmpty() || cur == null) { + return; + } + // Advance to the expected key, as we might have brought back some iterators + // that were at their ends + while (!_sortedIterators.isEmpty() + && ((_goingForward && peekImpl().compareTo(cur.getKey()) <= 0) + || (!_goingForward && peekImpl().compareTo(cur.getKey()) >= 0))) { + skipImpl(); + } } @Override - public void skip() { - var cur = _sortedIterators.pollFirstEntry(); + protected K peekImpl() { + if (_sortedIterators.isEmpty()) + throw new NoSuchElementException(); + return _goingForward ? _sortedIterators.firstKey() : _sortedIterators.lastKey(); + } + + @Override + protected void skipImpl() { + var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry(); if (cur == null) { throw new NoSuchElementException(); } @@ -124,20 +147,13 @@ public class MergingKvIterator, V> implements CloseableK } @Override - public void close() { - for (CloseableKvIterator iterator : _iterators.keySet()) { - iterator.close(); - } - } - - @Override - public boolean hasNext() { + protected boolean hasImpl() { return !_sortedIterators.isEmpty(); } @Override - public Pair next() { - var cur = _sortedIterators.pollFirstEntry(); + protected Pair nextImpl() { + var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry(); if (cur == null) { throw new NoSuchElementException(); } @@ -147,6 +163,14 @@ public class MergingKvIterator, V> implements CloseableK return curVal; } + + @Override + public void close() { + for (CloseableKvIterator iterator : _iterators.keySet()) { + iterator.close(); + } + } + @Override public String toString() { return "MergingKvIterator{" + diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java index 6b5a6ccf..c1f07007 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/NavigableMapKvIterator.java @@ -5,23 +5,26 @@ import org.apache.commons.lang3.tuple.Pair; import java.util.*; -public class NavigableMapKvIterator, V> implements CloseableKvIterator { - private final Iterator> _iterator; +public class NavigableMapKvIterator, V> extends ReversibleKvIterator { + private final NavigableMap _map; + private Iterator> _iterator; private Map.Entry _next; public NavigableMapKvIterator(NavigableMap map, IteratorStart start, K key) { + _map = map; SortedMap _view; + _goingForward = true; switch (start) { case GE -> _view = map.tailMap(key, true); case GT -> _view = map.tailMap(key, false); case LE -> { var floorKey = map.floorKey(key); - if (floorKey == null) _view = Collections.emptyNavigableMap(); + if (floorKey == null) _view = _map; else _view = map.tailMap(floorKey, true); } case LT -> { var lowerKey = map.lowerKey(key); - if (lowerKey == null) _view = Collections.emptyNavigableMap(); + if (lowerKey == null) _view = _map; else _view = map.tailMap(lowerKey, true); } default -> throw new IllegalArgumentException("Unknown start type"); @@ -30,6 +33,25 @@ public class NavigableMapKvIterator, V> implements Close fillNext(); } + @Override + protected void reverse() { + var oldNext = _next; + _next = null; + if (_goingForward) { + _iterator + = oldNext == null + ? _map.descendingMap().entrySet().iterator() + : _map.headMap(oldNext.getKey(), false).descendingMap().entrySet().iterator(); + } else { + _iterator + = oldNext == null + ? _map.entrySet().iterator() + : _map.tailMap(oldNext.getKey(), false).entrySet().iterator(); + } + _goingForward = !_goingForward; + fillNext(); + } + private void fillNext() { while (_iterator.hasNext() && _next == null) { _next = _iterator.next(); @@ -37,7 +59,7 @@ public class NavigableMapKvIterator, V> implements Close } @Override - public K peekNextKey() { + protected K peekImpl() { if (_next == null) { throw new NoSuchElementException(); } @@ -45,7 +67,7 @@ public class NavigableMapKvIterator, V> implements Close } @Override - public void skip() { + protected void skipImpl() { if (_next == null) { throw new NoSuchElementException(); } @@ -54,16 +76,12 @@ public class NavigableMapKvIterator, V> implements Close } @Override - public void close() { - } - - @Override - public boolean hasNext() { + protected boolean hasImpl() { return _next != null; } @Override - public Pair next() { + protected Pair nextImpl() { if (_next == null) { throw new NoSuchElementException("No more elements"); } @@ -73,10 +91,13 @@ public class NavigableMapKvIterator, V> implements Close return Pair.of(ret); } + @Override + public void close() { + } + @Override public String toString() { return "NavigableMapKvIterator{" + - "_iterator=" + _iterator + ", _next=" + _next + '}'; } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java index 24a72b89..22b6dbe8 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java @@ -6,32 +6,58 @@ import org.apache.commons.lang3.tuple.Pair; import java.util.NoSuchElementException; import java.util.function.Function; -public class PredicateKvIterator, V, V_T> implements CloseableKvIterator { +public class PredicateKvIterator, V, V_T> extends ReversibleKvIterator { private final CloseableKvIterator _backing; private final Function _transformer; private Pair _next; public PredicateKvIterator(CloseableKvIterator backing, IteratorStart start, K startKey, Function transformer) { + _goingForward = true; _backing = backing; _transformer = transformer; fillNext(); - if (_next == null) { - return; - } + + boolean shouldGoBack = false; if (start == IteratorStart.LE) { - if (_next.getKey().compareTo(startKey) > 0) { - _next = null; + if (_next == null || _next.getKey().compareTo(startKey) > 0) { + shouldGoBack = true; } } else if (start == IteratorStart.LT) { - if (_next.getKey().compareTo(startKey) >= 0) { - _next = null; + if (_next == null || _next.getKey().compareTo(startKey) >= 0) { + shouldGoBack = true; + } + } + + if (shouldGoBack && _backing.hasPrev()) { + _goingForward = false; + _next = null; + _backing.skipPrev(); + fillNext(); + _goingForward = true; + _backing.skip(); + fillNext(); + } + + + switch (start) { + case LT -> { +// assert _next == null || _next.getKey().compareTo(startKey) < 0; + } + case LE -> { +// assert _next == null || _next.getKey().compareTo(startKey) <= 0; + } + case GT -> { + assert _next == null || _next.getKey().compareTo(startKey) > 0; + } + case GE -> { + assert _next == null || _next.getKey().compareTo(startKey) >= 0; } } } private void fillNext() { - while (_backing.hasNext() && _next == null) { - var next = _backing.next(); + while ((_goingForward ? _backing.hasNext() : _backing.hasPrev()) && _next == null) { + var next = _goingForward ? _backing.next() : _backing.prev(); var transformed = _transformer.apply(next.getValue()); if (transformed == null) continue; @@ -40,14 +66,27 @@ public class PredicateKvIterator, V, V_T> implements Clo } @Override - public K peekNextKey() { + protected void reverse() { + _goingForward = !_goingForward; + _next = null; + + if (_goingForward && _backing.hasNext()) + _backing.skip(); + else if (!_goingForward && _backing.hasPrev()) + _backing.skipPrev(); + + fillNext(); + } + + @Override + protected K peekImpl() { if (_next == null) throw new NoSuchElementException(); return _next.getKey(); } @Override - public void skip() { + protected void skipImpl() { if (_next == null) throw new NoSuchElementException(); _next = null; @@ -55,17 +94,12 @@ public class PredicateKvIterator, V, V_T> implements Clo } @Override - public void close() { - _backing.close(); - } - - @Override - public boolean hasNext() { + protected boolean hasImpl() { return _next != null; } @Override - public Pair next() { + protected Pair nextImpl() { if (_next == null) throw new NoSuchElementException("No more elements"); var ret = _next; @@ -74,6 +108,11 @@ public class PredicateKvIterator, V, V_T> implements Clo return ret; } + @Override + public void close() { + _backing.close(); + } + @Override public String toString() { return "PredicateKvIterator{" + diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ReversedKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ReversedKvIterator.java new file mode 100644 index 00000000..88b23f30 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ReversedKvIterator.java @@ -0,0 +1,61 @@ +package com.usatiuk.dhfs.objects; + +import org.apache.commons.lang3.tuple.Pair; + +public class ReversedKvIterator, V> implements CloseableKvIterator { + private final CloseableKvIterator _backing; + + public ReversedKvIterator(CloseableKvIterator backing) { + _backing = backing; + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public boolean hasNext() { + return _backing.hasPrev(); + } + + @Override + public Pair next() { + return _backing.prev(); + } + + @Override + public K peekNextKey() { + return _backing.peekPrevKey(); + } + + @Override + public void skip() { + _backing.skipPrev(); + } + + @Override + public K peekPrevKey() { + return _backing.peekNextKey(); + } + + @Override + public Pair prev() { + return _backing.next(); + } + + @Override + public boolean hasPrev() { + return _backing.hasNext(); + } + + @Override + public void skipPrev() { + _backing.skip(); + } + + @Override + public CloseableKvIterator reversed() { + return _backing; + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ReversibleKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ReversibleKvIterator.java new file mode 100644 index 00000000..a13a063d --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/ReversibleKvIterator.java @@ -0,0 +1,79 @@ +package com.usatiuk.dhfs.objects; + +import org.apache.commons.lang3.tuple.Pair; + +public abstract class ReversibleKvIterator, V> implements CloseableKvIterator { + protected boolean _goingForward; + + protected abstract void reverse(); + + private void ensureForward() { + if (!_goingForward) { + reverse(); + } + } + + private void ensureBackward() { + if (_goingForward) { + reverse(); + } + } + + abstract protected K peekImpl(); + + abstract protected void skipImpl(); + + abstract protected boolean hasImpl(); + + abstract protected Pair nextImpl(); + + @Override + public K peekNextKey() { + ensureForward(); + return peekImpl(); + } + + @Override + public void skip() { + ensureForward(); + skipImpl(); + } + + + @Override + public boolean hasNext() { + ensureForward(); + return hasImpl(); + } + + @Override + public Pair next() { + ensureForward(); + return nextImpl(); + } + + @Override + public K peekPrevKey() { + ensureBackward(); + return peekImpl(); + } + + @Override + public Pair prev() { + ensureBackward(); + return nextImpl(); + } + + @Override + public boolean hasPrev() { + ensureBackward(); + return hasImpl(); + } + + @Override + public void skipPrev() { + ensureBackward(); + skipImpl(); + } + +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java index f39a9dd1..1d0c1b98 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java @@ -170,6 +170,29 @@ public class CachingObjectPersistentStore { return _delegate.hasNext(); } + @Override + public JObjectKey peekPrevKey() { + return _delegate.peekPrevKey(); + } + + @Override + public Pair prev() { + var prev = _delegate.prev(); + Log.tracev("Caching: {0}", prev); + put(prev.getKey(), Optional.of(prev.getValue())); + return prev; + } + + @Override + public boolean hasPrev() { + return _delegate.hasPrev(); + } + + @Override + public void skipPrev() { + _delegate.skipPrev(); + } + @Override public Pair next() { var next = _delegate.next(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java index bb1254f8..a38f964c 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java @@ -3,6 +3,7 @@ package com.usatiuk.dhfs.objects.persistence; import com.google.protobuf.ByteString; import com.usatiuk.dhfs.objects.CloseableKvIterator; import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.ReversibleKvIterator; import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer; import io.quarkus.arc.properties.IfBuildProperty; import io.quarkus.logging.Log; @@ -92,7 +93,7 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { } } - private class LmdbKvIterator implements CloseableKvIterator { + private class LmdbKvIterator extends ReversibleKvIterator { private final Txn _txn = _env.txnRead(); private final Cursor _cursor = _db.openCursor(_txn); private boolean _hasNext = false; @@ -101,6 +102,7 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { private final MutableObject _closed = new MutableObject<>(false); LmdbKvIterator(IteratorStart start, JObjectKey key) { + _goingForward = true; var closedRef = _closed; CLEANER.register(this, () -> { if (!closedRef.getValue()) { @@ -125,6 +127,9 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { switch (start) { case LT -> { _hasNext = _cursor.prev(); + if (!_hasNext) { + _hasNext = _cursor.first(); + } } case GT -> { _hasNext = _cursor.next(); @@ -136,6 +141,9 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { switch (start) { case LT, LE -> { _hasNext = _cursor.prev(); + if (!_hasNext) { + _hasNext = _cursor.first(); + } } case GT, GE -> { } @@ -147,10 +155,10 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { switch (start) { case LT -> { - assert !_hasNext || realGot.compareTo(key) < 0; +// assert !_hasNext || realGot.compareTo(key) < 0; } case LE -> { - assert !_hasNext || realGot.compareTo(key) <= 0; +// assert !_hasNext || realGot.compareTo(key) <= 0; } case GT -> { assert !_hasNext || realGot.compareTo(key) > 0; @@ -173,28 +181,25 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { } @Override - public boolean hasNext() { - return _hasNext; - } - - @Override - public Pair next() { - if (!_hasNext) { - throw new NoSuchElementException("No more elements"); + protected void reverse() { + if (_hasNext) { + if (_goingForward) { + _hasNext = _cursor.prev(); + } else { + _hasNext = _cursor.next(); + } + } else { + if (_goingForward) { + _hasNext = _cursor.last(); + } else { + _hasNext = _cursor.first(); + } } - var ret = Pair.of(JObjectKey.fromByteBuffer(_cursor.key()), ByteString.copyFrom(_cursor.val())); - _hasNext = _cursor.next(); - Log.tracev("Read: {0}, hasNext: {1}", ret, _hasNext); - return ret; + _goingForward = !_goingForward; } @Override - public void skip() { - _hasNext = _cursor.next(); - } - - @Override - public JObjectKey peekNextKey() { + protected JObjectKey peekImpl() { if (!_hasNext) { throw new NoSuchElementException("No more elements"); } @@ -202,6 +207,33 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { _cursor.key().flip(); return ret; } + + @Override + protected void skipImpl() { + if (_goingForward) + _hasNext = _cursor.next(); + else + _hasNext = _cursor.prev(); + } + + @Override + protected boolean hasImpl() { + return _hasNext; + } + + @Override + protected Pair nextImpl() { + if (!_hasNext) { + throw new NoSuchElementException("No more elements"); + } + var ret = Pair.of(JObjectKey.fromByteBuffer(_cursor.key()), ByteString.copyFrom(_cursor.val())); + if (_goingForward) + _hasNext = _cursor.next(); + else + _hasNext = _cursor.prev(); + Log.tracev("Read: {0}, hasNext: {1}", ret, _hasNext); + return ret; + } } @Override diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java index 2ccddadf..a38604db 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java @@ -1,10 +1,6 @@ package com.usatiuk.dhfs.objects.persistence; -import com.google.protobuf.ByteString; -import com.usatiuk.dhfs.objects.CloseableKvIterator; -import com.usatiuk.dhfs.objects.JDataVersionedWrapper; -import com.usatiuk.dhfs.objects.JObjectKey; -import com.usatiuk.dhfs.objects.ObjectSerializer; +import com.usatiuk.dhfs.objects.*; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; import org.apache.commons.lang3.tuple.Pair; @@ -31,48 +27,10 @@ public class SerializingObjectPersistentStore { return delegateStore.readObject(name).map(serializer::deserialize); } - private class SerializingKvIterator implements CloseableKvIterator { - private final CloseableKvIterator _delegate; - - private SerializingKvIterator(IteratorStart start, JObjectKey key) { - _delegate = delegateStore.getIterator(start, key); - } - - @Override - public JObjectKey peekNextKey() { - return _delegate.peekNextKey(); - } - - @Override - public void skip() { - _delegate.skip(); - } - - @Override - public void close() { - _delegate.close(); - } - - @Override - public boolean hasNext() { - return _delegate.hasNext(); - } - - @Override - public Pair next() { - var next = _delegate.next(); - return Pair.of(next.getKey(), serializer.deserialize(next.getValue())); - } - } - // Returns an iterator with a view of all commited objects // Does not have to guarantee consistent view, snapshots are handled by upper layers public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { - return new SerializingKvIterator(start, key); - } - - public CloseableKvIterator getIterator(JObjectKey key) { - return getIterator(IteratorStart.GE, key); + return new MappingKvIterator<>(delegateStore.getIterator(start, key), d -> serializer.deserialize(d)); } public TxManifestRaw prepareManifest(TxManifestObj names) { diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntry.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntry.java index 5bb49752..1cdefc96 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntry.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntry.java @@ -1,5 +1,5 @@ package com.usatiuk.dhfs.objects.snapshot; -interface SnapshotEntry { +public interface SnapshotEntry { long whenToRemove(); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryDeleted.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryDeleted.java index bd838c17..3b0dbd6f 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryDeleted.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryDeleted.java @@ -1,4 +1,4 @@ package com.usatiuk.dhfs.objects.snapshot; -record SnapshotEntryDeleted(long whenToRemove) implements SnapshotEntry { +public record SnapshotEntryDeleted(long whenToRemove) implements SnapshotEntry { } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryObject.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryObject.java index f4d7e40d..78036e17 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryObject.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryObject.java @@ -2,5 +2,5 @@ package com.usatiuk.dhfs.objects.snapshot; import com.usatiuk.dhfs.objects.JDataVersionedWrapper; -record SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) implements SnapshotEntry { +public record SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) implements SnapshotEntry { } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKey.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKey.java index 9fd6e80f..dd8a5f07 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKey.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKey.java @@ -5,7 +5,7 @@ import com.usatiuk.dhfs.objects.JObjectKey; import javax.annotation.Nonnull; import java.util.Comparator; -record SnapshotKey(JObjectKey key, long version) implements Comparable { +public record SnapshotKey(JObjectKey key, long version) implements Comparable { @Override public int compareTo(@Nonnull SnapshotKey o) { return Comparator.comparing(SnapshotKey::key) diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/Just.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/Just.java index 4c711c85..0c78cd79 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/Just.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/Just.java @@ -1,6 +1,10 @@ package com.usatiuk.dhfs.objects; +import org.junit.jupiter.api.Assertions; + import java.util.Arrays; +import java.util.Iterator; +import java.util.List; import java.util.concurrent.Callable; import java.util.concurrent.Executors; @@ -60,4 +64,16 @@ public abstract class Just { } } + public static void checkIterator(Iterator it, List expected) { + for (var e : expected) { + Assertions.assertTrue(it.hasNext()); + var next = it.next(); + Assertions.assertEquals(e, next); + } + } + + @SafeVarargs + public static void checkIterator(Iterator it, K... expected) { + checkIterator(it, Arrays.asList(expected)); + } } diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java index 89ebbcf1..63f25100 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java @@ -146,6 +146,11 @@ public class MergingKvIteratorTest { Assertions.assertEquals(pair, mergingIterator.next()); } Assertions.assertFalse(mergingIterator.hasNext()); + Just.checkIterator(mergingIterator.reversed(), Pair.of(5, 6), Pair.of(2, 4), Pair.of(1, 3)); + Assertions.assertFalse(mergingIterator.reversed().hasNext()); + Just.checkIterator(mergingIterator, Pair.of(1,3), Pair.of(2, 4), Pair.of(5, 6)); + Assertions.assertFalse(mergingIterator.hasNext()); + var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK)); var expected2 = List.of(Pair.of(5, 6)); @@ -154,6 +159,16 @@ public class MergingKvIteratorTest { Assertions.assertEquals(pair, mergingIterator2.next()); } Assertions.assertFalse(mergingIterator2.hasNext()); + Just.checkIterator(mergingIterator2.reversed(), Pair.of(5, 6), Pair.of(2, 5), Pair.of(1, 3)); + Assertions.assertFalse(mergingIterator2.reversed().hasNext()); + Just.checkIterator(mergingIterator2, Pair.of(1,3), Pair.of(2, 5), Pair.of(5, 6)); + Assertions.assertFalse(mergingIterator2.hasNext()); + + var mergingIterator3 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK)); + Assertions.assertEquals(5, mergingIterator3.peekNextKey()); + Assertions.assertEquals(2, mergingIterator3.peekPrevKey()); + Assertions.assertEquals(5, mergingIterator3.peekNextKey()); + Assertions.assertEquals(2, mergingIterator3.peekPrevKey()); } @Test @@ -180,6 +195,10 @@ public class MergingKvIteratorTest { Assertions.assertEquals(pair, mergingIterator.next()); } Assertions.assertFalse(mergingIterator.hasNext()); + Just.checkIterator(mergingIterator.reversed(), Pair.of(6, 8), Pair.of(5, 6), Pair.of(2, 4), Pair.of(1, 3)); + Assertions.assertFalse(mergingIterator.reversed().hasNext()); + Just.checkIterator(mergingIterator, Pair.of(1, 3), Pair.of(2, 4), Pair.of(5, 6), Pair.of(6, 8)); + Assertions.assertFalse(mergingIterator.hasNext()); var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK)); var expected2 = List.of(Pair.of(5, 6), Pair.of(6, 8)); @@ -188,6 +207,12 @@ public class MergingKvIteratorTest { Assertions.assertEquals(pair, mergingIterator2.next()); } Assertions.assertFalse(mergingIterator2.hasNext()); + + var mergingIterator3 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK)); + Assertions.assertEquals(5, mergingIterator3.peekNextKey()); + Assertions.assertEquals(2, mergingIterator3.peekPrevKey()); + Assertions.assertEquals(5, mergingIterator3.peekNextKey()); + Assertions.assertEquals(2, mergingIterator3.peekPrevKey()); } @Test @@ -264,6 +289,8 @@ public class MergingKvIteratorTest { Assertions.assertEquals(pair, mergingIterator.next()); } Assertions.assertFalse(mergingIterator.hasNext()); + Just.checkIterator(mergingIterator.reversed(), Pair.of(4, 6), Pair.of(3, 5), Pair.of(1, 3)); + Just.checkIterator(mergingIterator, Pair.of(1, 3), Pair.of(3, 5), Pair.of(4, 6)); var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 2, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK)); var expected2 = List.of(Pair.of(1, 4), Pair.of(3, 5), Pair.of(4, 6)); diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/NavigableMapKvIteratorTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/NavigableMapKvIteratorTest.java new file mode 100644 index 00000000..de7a5666 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/NavigableMapKvIteratorTest.java @@ -0,0 +1,71 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import org.apache.commons.lang3.tuple.Pair; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.pcollections.TreePMap; + +import java.util.NavigableMap; + +public class NavigableMapKvIteratorTest { + private final NavigableMap _testMap1 = TreePMap.empty().plus(1, 2).plus(2, 3).plus(3, 4); + + @Test + void test1() { + var iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.LE, 3); + Just.checkIterator(iterator, Pair.of(3, 4)); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.LE, 2); + Just.checkIterator(iterator, Pair.of(2, 3), Pair.of(3, 4)); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.GE, 2); + Just.checkIterator(iterator, Pair.of(2, 3), Pair.of(3, 4)); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.GT, 2); + Just.checkIterator(iterator, Pair.of(3, 4)); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.LT, 3); + Just.checkIterator(iterator, Pair.of(2, 3), Pair.of(3, 4)); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.LT, 2); + Just.checkIterator(iterator, Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4)); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.LT, 1); + Just.checkIterator(iterator, Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4)); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.LE, 1); + Just.checkIterator(iterator, Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4)); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.GT, 3); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.GT, 4); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.LE, 0); + Just.checkIterator(iterator, Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4)); + Assertions.assertFalse(iterator.hasNext()); + + iterator = new NavigableMapKvIterator<>(_testMap1, IteratorStart.GE, 2); + Assertions.assertTrue(iterator.hasNext()); + Assertions.assertEquals(2, iterator.peekNextKey()); + Assertions.assertEquals(1, iterator.peekPrevKey()); + Assertions.assertEquals(2, iterator.peekNextKey()); + Assertions.assertEquals(1, iterator.peekPrevKey()); + Just.checkIterator(iterator.reversed(), Pair.of(1, 2)); + Just.checkIterator(iterator, Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4)); + Assertions.assertEquals(Pair.of(3, 4), iterator.prev()); + Assertions.assertEquals(Pair.of(2, 3), iterator.prev()); + Assertions.assertEquals(Pair.of(2, 3), iterator.next()); + } + +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PredicateKvIteratorTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PredicateKvIteratorTest.java index 44c1daa0..3cf41813 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PredicateKvIteratorTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PredicateKvIteratorTest.java @@ -34,4 +34,123 @@ public class PredicateKvIteratorTest { } Assertions.assertFalse(pit.hasNext()); } + + @Test + public void ltTest2() { + var source1 = TreePMap.empty().plus(1, 3).plus(3, 5).plus(4, 6); + var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 1), + IteratorStart.LT, 1, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(4, 6)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 2), + IteratorStart.LT, 2, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(4, 6)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4), + IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(4, 6)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LE, 4), + IteratorStart.LE, 4, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(4, 6)); + Assertions.assertFalse(pit.hasNext()); + } + + @Test + public void ltTest3() { + var source1 = TreePMap.empty().plus(1, 3).plus(3, 5).plus(4, 6).plus(5, 7).plus(6, 8); + var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4), + IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(4, 6), Pair.of(6, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5), + IteratorStart.LT, 5, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(4, 6), Pair.of(6, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6), + IteratorStart.LT, 6, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(4, 6), Pair.of(6, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 7), + IteratorStart.LT, 7, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(6, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 8), + IteratorStart.LT, 8, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(6, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LE, 6), + IteratorStart.LE, 6, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(6, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6), + IteratorStart.LT, 6, v -> (v % 2 == 0) ? v : null); + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(4, pit.peekNextKey()); + Assertions.assertFalse(pit.hasPrev()); + Assertions.assertEquals(4, pit.peekNextKey()); + Assertions.assertFalse(pit.hasPrev()); + Assertions.assertEquals(Pair.of(4, 6), pit.next()); + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(6, pit.peekNextKey()); + Assertions.assertEquals(4, pit.peekPrevKey()); + Assertions.assertEquals(6, pit.peekNextKey()); + Assertions.assertEquals(4, pit.peekPrevKey()); + } + + @Test + public void itTest4() { + var source1 = TreePMap.empty().plus(1, 3).plus(3, 5).plus(4, 6).plus(5, 8).plus(6, 10); + var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4), + IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(4, 6), Pair.of(5, 8), Pair.of(6, 10)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5), + IteratorStart.LT, 5, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(4, 6), Pair.of(5, 8), Pair.of(6, 10)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6), + IteratorStart.LT, 6, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(5, 8), Pair.of(6, 10)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 7), + IteratorStart.LT, 7, v -> (v % 2 == 0) ? v : null); + Just.checkIterator(pit, Pair.of(6, 10)); + Assertions.assertFalse(pit.hasNext()); + + pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6), + IteratorStart.LT, 6, v -> (v % 2 == 0) ? v : null); + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(5, pit.peekNextKey()); + Assertions.assertTrue(pit.hasPrev()); + Assertions.assertEquals(4, pit.peekPrevKey()); + Assertions.assertEquals(5, pit.peekNextKey()); + Assertions.assertEquals(4, pit.peekPrevKey()); + Assertions.assertEquals(Pair.of(5, 8), pit.next()); + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(6, pit.peekNextKey()); + Assertions.assertEquals(5, pit.peekPrevKey()); + Assertions.assertEquals(6, pit.peekNextKey()); + Assertions.assertEquals(5, pit.peekPrevKey()); + } + +// @Test +// public void reverseTest() { +// var source1 = TreePMap.empty().plus(1, 3).plus(3, 5).plus(4, 6); +// var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4), +// IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null); +// +// } } diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/TempDataProfile.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/TempDataProfile.java new file mode 100644 index 00000000..16c37789 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/TempDataProfile.java @@ -0,0 +1,30 @@ +package com.usatiuk.dhfs.objects; + +import io.quarkus.test.junit.QuarkusTestProfile; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Map; + +public class TempDataProfile implements QuarkusTestProfile { + protected void getConfigOverrides(Map toPut) { + } + + @Override + final public Map getConfigOverrides() { + Path tempDirWithPrefix; + try { + tempDirWithPrefix = Files.createTempDirectory("dhfs-test"); + } catch (IOException e) { + throw new RuntimeException(e); + } + var ret = new HashMap(); + ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString()); + ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString()); + ret.put("dhfs.objects.persistence", "lmdb"); + getConfigOverrides(ret); + return ret; + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/TestDataCleaner.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/TestDataCleaner.java new file mode 100644 index 00000000..957373ec --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/TestDataCleaner.java @@ -0,0 +1,40 @@ +package com.usatiuk.dhfs.objects; + +import io.quarkus.logging.Log; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import org.eclipse.microprofile.config.inject.ConfigProperty; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.util.Objects; + +@ApplicationScoped +public class TestDataCleaner { + @ConfigProperty(name = "dhfs.objects.persistence.files.root") + String tempDirectory; + + void init(@Observes @Priority(1) StartupEvent event) throws IOException { + try { + purgeDirectory(Path.of(tempDirectory).toFile()); + } catch (Exception ignored) { + Log.warn("Couldn't cleanup test data on init"); + } + } + + void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException { + purgeDirectory(Path.of(tempDirectory).toFile()); + } + + void purgeDirectory(File dir) { + for (File file : Objects.requireNonNull(dir.listFiles())) { + if (file.isDirectory()) + purgeDirectory(file); + file.delete(); + } + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/LmdbKvIteratorTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/LmdbKvIteratorTest.java new file mode 100644 index 00000000..e6baa8fa --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/LmdbKvIteratorTest.java @@ -0,0 +1,106 @@ +package com.usatiuk.dhfs.objects.persistence; + + +import com.google.protobuf.ByteString; +import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.Just; +import com.usatiuk.dhfs.objects.TempDataProfile; +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; +import jakarta.inject.Inject; +import org.apache.commons.lang3.tuple.Pair; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.util.List; + +@QuarkusTest +@TestProfile(TempDataProfile.class) +public class LmdbKvIteratorTest { + + @Inject + LmdbObjectPersistentStore store; + + @Test + public void iteratorTest1() { + store.commitTx( + new TxManifestRaw( + List.of(Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), + Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), + Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))), + List.of() + ) + ); + + var iterator = store.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(3))); + Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(2))); + Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.GE, JObjectKey.of(Long.toString(2))); + Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.GT, JObjectKey.of(Long.toString(2))); + Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.LT, JObjectKey.of(Long.toString(3))); + Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.LT, JObjectKey.of(Long.toString(2))); + Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.LT, JObjectKey.of(Long.toString(1))); + Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(1))); + Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.GT, JObjectKey.of(Long.toString(3))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.GT, JObjectKey.of(Long.toString(4))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(0))); + Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))); + Assertions.assertFalse(iterator.hasNext()); + iterator.close(); + + iterator = store.getIterator(IteratorStart.GE, JObjectKey.of(Long.toString(2))); + Assertions.assertTrue(iterator.hasNext()); + Assertions.assertEquals(JObjectKey.of(Long.toString(2)), iterator.peekNextKey()); + Assertions.assertEquals(JObjectKey.of(Long.toString(1)), iterator.peekPrevKey()); + Assertions.assertEquals(JObjectKey.of(Long.toString(2)), iterator.peekNextKey()); + Assertions.assertEquals(JObjectKey.of(Long.toString(1)), iterator.peekPrevKey()); + Just.checkIterator(iterator.reversed(), Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2}))); + Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))); + Assertions.assertEquals(Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})), iterator.prev()); + Assertions.assertEquals(Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), iterator.prev()); + Assertions.assertEquals(Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), iterator.next()); + iterator.close(); + + store.commitTx(new TxManifestRaw( + List.of(), + List.of(JObjectKey.of(Long.toString(1)), JObjectKey.of(Long.toString(2)), JObjectKey.of(Long.toString(3))) + )); + } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKvIteratorTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKvIteratorTest.java new file mode 100644 index 00000000..e4527110 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKvIteratorTest.java @@ -0,0 +1,11 @@ +package com.usatiuk.dhfs.objects.snapshot; + +import com.usatiuk.dhfs.objects.JObjectKey; +import org.junit.jupiter.api.Test; + +import java.util.Map; + +public class SnapshotKvIteratorTest { + + +} From 75fec73b39445d6016cc8689549f2daa035af7f2 Mon Sep 17 00:00:00 2001 From: Stepan Usatiuk Date: Sat, 8 Mar 2025 00:35:56 +0100 Subject: [PATCH 105/105] reverse iterators and a bunch of fixes with lt/gt iterator start --- .../dhfs/objects/CloseableKvIterator.java | 16 +- .../java/com/usatiuk/dhfs/objects/Data.java | 10 + .../InconsistentKvIteratorWrapper.java | 134 ---------- .../InconsistentSelfRefreshingKvIterator.java | 148 ----------- .../objects/InvalidIteratorException.java | 11 - .../dhfs/objects/InvalidatableKvIterator.java | 77 ------ .../com/usatiuk/dhfs/objects/JDataDummy.java | 28 --- .../usatiuk/dhfs/objects/JObjectManager.java | 35 +-- .../dhfs/objects/KeyPredicateKvIterator.java | 129 ++++++++++ .../usatiuk/dhfs/objects/MaybeTombstone.java | 7 + .../dhfs/objects/MergingKvIterator.java | 28 ++- .../dhfs/objects/PredicateKvIterator.java | 12 +- .../objects/SelfRefreshingKvIterator.java | 125 ---------- .../dhfs/objects/StaleIteratorException.java | 11 - .../com/usatiuk/dhfs/objects/Tombstone.java | 10 + .../objects/TombstoneMergingKvIterator.java | 35 ++- .../com/usatiuk/dhfs/objects/TxBundle.java | 9 - .../WritebackObjectPersistentStore.java | 97 ++++---- .../CachingObjectPersistentStore.java | 150 ++++++------ .../LmdbObjectPersistentStore.java | 73 +++++- .../MemoryObjectPersistentStore.java | 25 +- .../persistence/ObjectPersistentStore.java | 13 +- .../SerializingObjectPersistentStore.java | 13 +- .../dhfs/objects/snapshot/SnapshotEntry.java | 2 + .../snapshot/SnapshotEntryDeleted.java | 4 + .../objects/snapshot/SnapshotEntryObject.java | 4 + .../objects/snapshot/SnapshotKvIterator.java | 120 +++++++-- .../objects/snapshot/SnapshotManager.java | 231 +++++++++--------- .../ReadTrackingObjectSourceFactory.java | 22 ++ .../transaction/TransactionFactory.java | 2 +- .../transaction/TransactionFactoryImpl.java | 17 +- .../objects/KeyPredicateKvIteratorTest.java | 154 ++++++++++++ .../dhfs/objects/MergingKvIteratorTest.java | 28 ++- .../dhfs/objects/ObjectsTestExtraChecks.java | 2 +- .../usatiuk/dhfs/objects/ObjectsTestImpl.java | 53 +++- .../dhfs/objects/PreCommitTxHookTest.java | 2 + .../dhfs/objects/PredicateKvIteratorTest.java | 7 +- .../persistence/LmdbKvIteratorTest.java | 21 +- .../dhfs/objects/jmap/JMapIterator.java | 21 ++ 39 files changed, 967 insertions(+), 919 deletions(-) create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/Data.java delete mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentKvIteratorWrapper.java delete mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentSelfRefreshingKvIterator.java delete mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InvalidIteratorException.java delete mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InvalidatableKvIterator.java delete mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataDummy.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/KeyPredicateKvIterator.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MaybeTombstone.java delete mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java delete mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/StaleIteratorException.java create mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/Tombstone.java delete mode 100644 dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java create mode 100644 dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/KeyPredicateKvIteratorTest.java diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CloseableKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CloseableKvIterator.java index 13b439af..7014f8a2 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CloseableKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/CloseableKvIterator.java @@ -10,21 +10,13 @@ public interface CloseableKvIterator, V> extends Iterato void skip(); - default K peekPrevKey() { - throw new UnsupportedOperationException(); - } + K peekPrevKey(); - default Pair prev() { - throw new UnsupportedOperationException(); - } + Pair prev(); - default boolean hasPrev() { - throw new UnsupportedOperationException(); - } + boolean hasPrev(); - default void skipPrev() { - throw new UnsupportedOperationException(); - } + void skipPrev(); default CloseableKvIterator reversed() { return new ReversedKvIterator<>(this); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/Data.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/Data.java new file mode 100644 index 00000000..b1f7bcb7 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/Data.java @@ -0,0 +1,10 @@ +package com.usatiuk.dhfs.objects; + +import java.util.Optional; + +public record Data(V value) implements MaybeTombstone { + @Override + public Optional opt() { + return Optional.of(value); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentKvIteratorWrapper.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentKvIteratorWrapper.java deleted file mode 100644 index d3da5bfa..00000000 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentKvIteratorWrapper.java +++ /dev/null @@ -1,134 +0,0 @@ -package com.usatiuk.dhfs.objects; - -import com.usatiuk.dhfs.objects.persistence.IteratorStart; -import io.quarkus.logging.Log; -import org.apache.commons.lang3.tuple.Pair; - -import java.util.NoSuchElementException; -import java.util.function.Function; - -// Also checks that the next provided item is always consistent after a refresh -public class InconsistentKvIteratorWrapper, V> implements CloseableKvIterator { - private CloseableKvIterator _backing; - private final Function, CloseableKvIterator> _iteratorSupplier; - private K _lastReturnedKey = null; - private K _peekedKey = null; - private boolean _peekedNext = false; - private final Pair _initialStart; - - public InconsistentKvIteratorWrapper(Function, CloseableKvIterator> iteratorSupplier, IteratorStart start, K key) { - _iteratorSupplier = iteratorSupplier; - _initialStart = Pair.of(start, key); - while (true) { - try { - _backing = _iteratorSupplier.apply(Pair.of(start, key)); - break; - } catch (StaleIteratorException ignored) { - continue; - } - } - } - - private void refresh() { - Log.tracev("Refreshing iterator: {0}", _backing); - _backing.close(); - if (_peekedKey != null) { - _backing = _iteratorSupplier.apply(Pair.of(IteratorStart.GE, _peekedKey)); - if (!_backing.hasNext() || !_backing.peekNextKey().equals(_peekedKey)) { - assert false; - } - } else if (_lastReturnedKey != null) { - _backing = _iteratorSupplier.apply(Pair.of(IteratorStart.GT, _lastReturnedKey)); - } else { - _backing = _iteratorSupplier.apply(_initialStart); - } - - if (_peekedNext && !_backing.hasNext()) { - assert false; - } - } - - @Override - public K peekNextKey() { - while (true) { - if (_peekedKey != null) { - return _peekedKey; - } - try { - _peekedKey = _backing.peekNextKey(); - assert _lastReturnedKey == null || _peekedKey.compareTo(_lastReturnedKey) > 0; - } catch (NoSuchElementException ignored) { - assert !_peekedNext; - throw ignored; - } catch (StaleIteratorException ignored) { - refresh(); - continue; - } - _peekedNext = true; - Log.tracev("Peeked key: {0}", _peekedKey); - return _peekedKey; - } - } - - @Override - public void skip() { - while (true) { - try { - _lastReturnedKey = _backing.peekNextKey(); - _backing.skip(); - _peekedNext = false; - _peekedKey = null; - return; - } catch (NoSuchElementException ignored) { - assert !_peekedNext; - throw ignored; - } catch (StaleIteratorException ignored) { - refresh(); - continue; - } - } - } - - @Override - public void close() { - _backing.close(); - } - - @Override - public boolean hasNext() { - while (true) { - if (_peekedNext) { - return true; - } - try { - _peekedNext = _backing.hasNext(); - Log.tracev("Peeked next: {0}", _peekedNext); - return _peekedNext; - } catch (StaleIteratorException ignored) { - refresh(); - continue; - } - } - } - - @Override - public Pair next() { - while (true) { - try { - var got = _backing.next(); - assert _lastReturnedKey == null || _peekedKey.compareTo(_lastReturnedKey) > 0; - _peekedNext = false; - _peekedKey = null; - _lastReturnedKey = got.getKey(); - return got; - } catch (NoSuchElementException ignored) { - assert !_peekedNext; - throw ignored; - } catch (StaleIteratorException ignored) { - refresh(); - continue; - } - } - } - -} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentSelfRefreshingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentSelfRefreshingKvIterator.java deleted file mode 100644 index c503ad1a..00000000 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InconsistentSelfRefreshingKvIterator.java +++ /dev/null @@ -1,148 +0,0 @@ -package com.usatiuk.dhfs.objects; - -import com.usatiuk.dhfs.objects.persistence.IteratorStart; -import io.quarkus.logging.Log; -import org.apache.commons.lang3.tuple.Pair; - -import java.util.concurrent.locks.Lock; -import java.util.function.Function; -import java.util.function.Supplier; - -// Also checks that the next provided item is always consistent after a refresh -public class InconsistentSelfRefreshingKvIterator, V> implements CloseableKvIterator { - private CloseableKvIterator _backing; - private long _curVersion = -1L; - private final Lock _lock; - private final Function, CloseableKvIterator> _iteratorSupplier; - private final Supplier _versionSupplier; - private K _lastReturnedKey = null; - private K _peekedKey = null; - private boolean _peekedNext = false; - private final Pair _initialStart; - - public InconsistentSelfRefreshingKvIterator(Function, CloseableKvIterator> iteratorSupplier, Supplier versionSupplier, Lock lock, - IteratorStart start, K key) { - _iteratorSupplier = iteratorSupplier; - _versionSupplier = versionSupplier; - _lock = lock; - _initialStart = Pair.of(start, key); - - _lock.lock(); - try { - long curVersion = _versionSupplier.get(); - _backing = _iteratorSupplier.apply(Pair.of(start, key)); - _curVersion = curVersion; - } finally { - _lock.unlock(); - } - } - - private void maybeRefresh() { - _lock.lock(); - CloseableKvIterator oldBacking = null; - try { - if (_versionSupplier.get() == _curVersion) { - return; - } - long newVersion = _versionSupplier.get(); - oldBacking = _backing; - if (_peekedKey != null) { - _backing = _iteratorSupplier.apply(Pair.of(IteratorStart.GE, _peekedKey)); - if (!_backing.hasNext() || !_backing.peekNextKey().equals(_peekedKey)) { - throw new StaleIteratorException(); - } - } else if (_lastReturnedKey != null) { - _backing = _iteratorSupplier.apply(Pair.of(IteratorStart.GT, _lastReturnedKey)); - if (_backing.hasNext() && !(_backing.peekNextKey().compareTo(_lastReturnedKey) > 0)) { - throw new StaleIteratorException(); - } - } else { - _backing = _iteratorSupplier.apply(_initialStart); - } - - if (_peekedNext && !_backing.hasNext()) { - throw new StaleIteratorException(); - } - - Log.tracev("Refreshed iterator last refreshed {0}, current version {1}", - _curVersion, newVersion); - - _curVersion = newVersion; - } finally { - _lock.unlock(); - if (oldBacking != null) { - oldBacking.close(); - } - } - } - - @Override - public K peekNextKey() { - if (_peekedKey != null) { - return _peekedKey; - } - _lock.lock(); - try { - maybeRefresh(); - _peekedKey = _backing.peekNextKey(); - assert _lastReturnedKey == null || _peekedKey.compareTo(_lastReturnedKey) > 0; - _peekedNext = true; - Log.tracev("Peeked key: {0}", _peekedKey); - return _peekedKey; - } finally { - _lock.unlock(); - } - } - - @Override - public void skip() { - _lock.lock(); - try { - maybeRefresh(); - _lastReturnedKey = _backing.peekNextKey(); - _backing.skip(); - _peekedNext = false; - _peekedKey = null; - } finally { - _lock.unlock(); - } - } - - @Override - public void close() { - _backing.close(); - } - - @Override - public boolean hasNext() { - if (_peekedNext) { - return true; - } - _lock.lock(); - try { - maybeRefresh(); - _peekedNext = _backing.hasNext(); - Log.tracev("Peeked next: {0}", _peekedNext); - return _peekedNext; - } finally { - _lock.unlock(); - } - } - - @Override - public Pair next() { - _lock.lock(); - try { - maybeRefresh(); - var got = _backing.next(); - assert _lastReturnedKey == null || got.getKey().compareTo(_lastReturnedKey) > 0; - _peekedNext = false; - _peekedKey = null; - _lastReturnedKey = got.getKey(); - return got; - } finally { - _lock.unlock(); - } - } - -} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InvalidIteratorException.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InvalidIteratorException.java deleted file mode 100644 index fa2bb988..00000000 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InvalidIteratorException.java +++ /dev/null @@ -1,11 +0,0 @@ -package com.usatiuk.dhfs.objects; - -public class InvalidIteratorException extends RuntimeException { - public InvalidIteratorException() { - super(); - } - - public InvalidIteratorException(String message) { - super(message); - } -} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InvalidatableKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InvalidatableKvIterator.java deleted file mode 100644 index a83b36a4..00000000 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/InvalidatableKvIterator.java +++ /dev/null @@ -1,77 +0,0 @@ -package com.usatiuk.dhfs.objects; - -import io.quarkus.logging.Log; -import org.apache.commons.lang3.tuple.Pair; - -import java.util.concurrent.locks.Lock; -import java.util.function.Supplier; - -public class InvalidatableKvIterator, V> implements CloseableKvIterator { - private final CloseableKvIterator _backing; - private final Supplier _versionSupplier; - private final long _version; - private final Lock _lock; - - public InvalidatableKvIterator(CloseableKvIterator backing, Supplier versionSupplier, Lock lock) { - _backing = backing; - _versionSupplier = versionSupplier; - _lock = lock; - _version = _versionSupplier.get(); - } - - private void checkVersion() { - if (_versionSupplier.get() != _version) { - Log.errorv("Version mismatch: {0} != {1}", _versionSupplier.get(), _version); - throw new InvalidIteratorException(); - } - } - - @Override - public K peekNextKey() { - _lock.lock(); - try { - checkVersion(); - return _backing.peekNextKey(); - } finally { - _lock.unlock(); - } - } - - @Override - public void skip() { - _lock.lock(); - try { - checkVersion(); - _backing.skip(); - } finally { - _lock.unlock(); - } - } - - @Override - public void close() { - _backing.close(); - } - - @Override - public boolean hasNext() { - _lock.lock(); - try { - checkVersion(); - return _backing.hasNext(); - } finally { - _lock.unlock(); - } - } - - @Override - public Pair next() { - _lock.lock(); - try { - checkVersion(); - return _backing.next(); - } finally { - _lock.unlock(); - } - } -} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataDummy.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataDummy.java deleted file mode 100644 index cbc3dc29..00000000 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JDataDummy.java +++ /dev/null @@ -1,28 +0,0 @@ -package com.usatiuk.dhfs.objects; - -public class JDataDummy implements JData { - public static final JObjectKey TX_ID_OBJ_NAME = JObjectKey.of("tx_id"); - private static final JDataDummy INSTANCE = new JDataDummy(); - - public static JDataDummy getInstance() { - return INSTANCE; - } - - @Override - public JObjectKey key() { - return TX_ID_OBJ_NAME; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - return true; - } - - // hashCode - @Override - public int hashCode() { - return 0; - } -} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java index 6aedd59a..37e6798d 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/JObjectManager.java @@ -12,7 +12,6 @@ import jakarta.enterprise.inject.Instance; import jakarta.inject.Inject; import java.util.*; -import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Stream; @@ -24,7 +23,6 @@ import java.util.stream.Stream; @ApplicationScoped public class JObjectManager { private final List _preCommitTxHooks; - private final AtomicLong _txCounter = new AtomicLong(); private boolean _ready = false; @Inject SnapshotManager snapshotManager; @@ -38,10 +36,6 @@ public class JObjectManager { } void init(@Observes @Priority(200) StartupEvent event) { - var read = snapshotManager.readObjectDirect(JDataDummy.TX_ID_OBJ_NAME).orElse(null); - if (read != null) { - _txCounter.set(read.version()); - } _ready = true; } @@ -51,14 +45,9 @@ public class JObjectManager { public TransactionPrivate createTransaction() { verifyReady(); - while (true) { - try { - var tx = transactionFactory.createTransaction(_txCounter.get()); - Log.tracev("Created transaction with snapshotId={0}", tx.snapshot().id()); - return tx; - } catch (SnapshotManager.IllegalSnapshotIdException ignored) { - } - } + var tx = transactionFactory.createTransaction(); + Log.tracev("Created transaction with snapshotId={0}", tx.snapshot().id()); + return tx; } public TransactionHandle commit(TransactionPrivate tx) { @@ -102,10 +91,6 @@ public class JObjectManager { Log.trace("Commit iteration with " + currentIteration.size() + " records for hook " + hook.getClass()); for (var entry : currentIteration.entrySet()) { - // FIXME: Kinda hack? - if (entry.getKey().equals(JDataDummy.TX_ID_OBJ_NAME)) { - continue; - } somethingChanged = true; Log.trace("Running pre-commit hook " + hook.getClass() + " for" + entry.getKey()); var oldObj = getCurrent.apply(entry.getKey()); @@ -150,14 +135,9 @@ public class JObjectManager { } } } - - Log.trace("Committing transaction start"); - // FIXME: Better way? - addDependency.accept(JDataDummy.TX_ID_OBJ_NAME); - writes.put(JDataDummy.TX_ID_OBJ_NAME, new TxRecord.TxObjectRecordWrite<>(JDataDummy.getInstance())); + Log.trace("Committing transaction start"); var snapshotId = tx.snapshot().id(); - var newId = _txCounter.get() + 1; for (var read : readSet.entrySet()) { var dep = dependenciesLocked.get(read.getKey()); @@ -182,7 +162,6 @@ public class JObjectManager { Log.trace("Checking dependency " + read.getKey() + " - ok with read"); } - Log.tracef("Committing transaction %d to storage", newId); var addFlushCallback = snapshotManager.commitTx( writes.values().stream() .filter(r -> { @@ -194,11 +173,7 @@ public class JObjectManager { } } return true; - }).toList(), - newId); - - var realNewId = _txCounter.getAndIncrement() + 1; - assert realNewId == newId; + }).toList()); for (var callback : tx.getOnCommit()) { callback.run(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/KeyPredicateKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/KeyPredicateKvIterator.java new file mode 100644 index 00000000..b43308d2 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/KeyPredicateKvIterator.java @@ -0,0 +1,129 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.NoSuchElementException; +import java.util.function.Function; + +public class KeyPredicateKvIterator, V> extends ReversibleKvIterator { + private final CloseableKvIterator _backing; + private final Function _filter; + private K _next; + + public KeyPredicateKvIterator(CloseableKvIterator backing, IteratorStart start, K startKey, Function filter) { + _goingForward = true; + _backing = backing; + _filter = filter; + fillNext(); + + boolean shouldGoBack = false; + if (start == IteratorStart.LE) { + if (_next == null || _next.compareTo(startKey) > 0) { + shouldGoBack = true; + } + } else if (start == IteratorStart.LT) { + if (_next == null || _next.compareTo(startKey) >= 0) { + shouldGoBack = true; + } + } + + if (shouldGoBack && _backing.hasPrev()) { + _goingForward = false; + _next = null; + fillNext(); + if (_next != null) + _backing.skipPrev(); + _goingForward = true; +// _backing.skip(); + fillNext(); + } + + + switch (start) { + case LT -> { +// assert _next == null || _next.getKey().compareTo(startKey) < 0; + } + case LE -> { +// assert _next == null || _next.getKey().compareTo(startKey) <= 0; + } + case GT -> { + assert _next == null || _next.compareTo(startKey) > 0; + } + case GE -> { + assert _next == null || _next.compareTo(startKey) >= 0; + } + } + } + + private void fillNext() { + while ((_goingForward ? _backing.hasNext() : _backing.hasPrev()) && _next == null) { + var next = _goingForward ? _backing.peekNextKey() : _backing.peekPrevKey(); + if (!_filter.apply(next)) { + if (_goingForward) + _backing.skip(); + else + _backing.skipPrev(); + continue; + } + _next = next; + } + } + + @Override + protected void reverse() { + _goingForward = !_goingForward; + _next = null; + + fillNext(); + } + + @Override + protected K peekImpl() { + if (_next == null) + throw new NoSuchElementException(); + return _next; + } + + @Override + protected void skipImpl() { + if (_next == null) + throw new NoSuchElementException(); + _next = null; + if (_goingForward) + _backing.skip(); + else + _backing.skipPrev(); + fillNext(); + } + + @Override + protected boolean hasImpl() { + return _next != null; + } + + @Override + protected Pair nextImpl() { + if (_next == null) + throw new NoSuchElementException("No more elements"); + var retKey = _next; + _next = null; + var got = _goingForward ? _backing.next() : _backing.prev(); + assert got.getKey().equals(retKey); + fillNext(); + return got; + } + + @Override + public void close() { + _backing.close(); + } + + @Override + public String toString() { + return "KeyPredicateKvIterator{" + + "_backing=" + _backing + + ", _next=" + _next + + '}'; + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MaybeTombstone.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MaybeTombstone.java new file mode 100644 index 00000000..f6d47c71 --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MaybeTombstone.java @@ -0,0 +1,7 @@ +package com.usatiuk.dhfs.objects; + +import java.util.Optional; + +public interface MaybeTombstone { + Optional opt(); +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java index a4f193c6..78c8e482 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/MergingKvIterator.java @@ -5,6 +5,7 @@ import io.quarkus.logging.Log; import org.apache.commons.lang3.tuple.Pair; import java.util.*; +import java.util.stream.Collectors; public class MergingKvIterator, V> extends ReversibleKvIterator { private final Map, Integer> _iterators; @@ -22,16 +23,22 @@ public class MergingKvIterator, V> extends ReversibleKvI // Starting at a greatest key less than/less or equal than: // We have a bunch of iterators that have given us theirs "greatest LT/LE key" // now we need to pick the greatest of those to start with + // But if some of them don't have a lesser key, we need to pick the smallest of those var initialIterators = iterators.stream().map(p -> p.get(initialStartType, initialStartKey)).toList(); try { - K initialMaxValue = initialIterators.stream() + IteratorStart finalStartType = startType; + var found = initialIterators.stream() .filter(CloseableKvIterator::hasNext) .map((i) -> { var peeked = i.peekNextKey(); // Log.warnv("peeked: {0}, from {1}", peeked, i.getClass()); return peeked; - }) - .max(Comparator.naturalOrder()).orElse(null); + }).distinct().collect(Collectors.partitioningBy(e -> finalStartType == IteratorStart.LE ? e.compareTo(initialStartKey) <= 0 : e.compareTo(initialStartKey) < 0)); + K initialMaxValue; + if (!found.get(true).isEmpty()) + initialMaxValue = found.get(true).stream().max(Comparator.naturalOrder()).orElse(null); + else + initialMaxValue = found.get(false).stream().min(Comparator.naturalOrder()).orElse(null); if (initialMaxValue == null) { fail = true; } @@ -61,12 +68,12 @@ public class MergingKvIterator, V> extends ReversibleKvI Log.tracev("{0} Created: {1}", _name, _sortedIterators); switch (initialStartType) { - case LT -> { - assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) < 0; - } - case LE -> { - assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) <= 0; - } +// case LT -> { +// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) < 0; +// } +// case LE -> { +// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) <= 0; +// } case GT -> { assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) > 0; } @@ -88,6 +95,7 @@ public class MergingKvIterator, V> extends ReversibleKvI } K key = iterator.peekNextKey(); + Log.tracev("{0} Advance peeked: {1}-{2}", _name, iterator, key); if (!_sortedIterators.containsKey(key)) { _sortedIterators.put(key, iterator); return; @@ -110,6 +118,7 @@ public class MergingKvIterator, V> extends ReversibleKvI @Override protected void reverse() { var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry(); + Log.tracev("{0} Reversing from {1}", _name, cur); _goingForward = !_goingForward; _sortedIterators.clear(); for (CloseableKvIterator iterator : _iterators.keySet()) { @@ -126,6 +135,7 @@ public class MergingKvIterator, V> extends ReversibleKvI || (!_goingForward && peekImpl().compareTo(cur.getKey()) >= 0))) { skipImpl(); } + Log.tracev("{0} Reversed to {1}", _name, _sortedIterators); } @Override diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java index 22b6dbe8..cfe85ffa 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/PredicateKvIterator.java @@ -1,6 +1,7 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import io.quarkus.logging.Log; import org.apache.commons.lang3.tuple.Pair; import java.util.NoSuchElementException; @@ -68,13 +69,18 @@ public class PredicateKvIterator, V, V_T> extends Revers @Override protected void reverse() { _goingForward = !_goingForward; - _next = null; + boolean wasAtEnd = _next == null; - if (_goingForward && _backing.hasNext()) + if (_goingForward && !wasAtEnd) _backing.skip(); - else if (!_goingForward && _backing.hasPrev()) + else if (!_goingForward && !wasAtEnd) _backing.skipPrev(); + if (!wasAtEnd) + Log.tracev("Skipped in reverse: {0}", _next); + + _next = null; + fillNext(); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java deleted file mode 100644 index 1ce8dd05..00000000 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/SelfRefreshingKvIterator.java +++ /dev/null @@ -1,125 +0,0 @@ -package com.usatiuk.dhfs.objects; - -import com.usatiuk.dhfs.objects.persistence.IteratorStart; -import io.quarkus.logging.Log; -import org.apache.commons.lang3.tuple.Pair; - -import java.util.NoSuchElementException; -import java.util.concurrent.locks.Lock; -import java.util.function.Function; -import java.util.function.Supplier; - -// Also checks that the next provided item is always consistent after a refresh -public class SelfRefreshingKvIterator, V> implements CloseableKvIterator { - private CloseableKvIterator _backing; - private long _curVersion = -1L; - private final Lock _lock; - private final Function, CloseableKvIterator> _iteratorSupplier; - private final Supplier _versionSupplier; - private Pair _next; - - public SelfRefreshingKvIterator(Function, CloseableKvIterator> iteratorSupplier, Supplier versionSupplier, Lock lock, - IteratorStart start, K key) { - _iteratorSupplier = iteratorSupplier; - _versionSupplier = versionSupplier; - _lock = lock; - - _lock.lock(); - try { - long curVersion = _versionSupplier.get(); - _backing = _iteratorSupplier.apply(Pair.of(start, key)); - _next = _backing.hasNext() ? _backing.next() : null; - _curVersion = curVersion; - } finally { - _lock.unlock(); - } - } - - private void maybeRefresh() { - _lock.lock(); - CloseableKvIterator oldBacking = null; - try { - if (_versionSupplier.get() == _curVersion) { - return; - } - Log.tracev("Refreshing iterator last refreshed {0}, current version {1}, current value {2}", - _curVersion, _versionSupplier.get(), _next); - long newVersion = _versionSupplier.get(); - oldBacking = _backing; - _backing = _iteratorSupplier.apply(Pair.of(IteratorStart.GE, _next.getKey())); - var next = _backing.hasNext() ? _backing.next() : null; - if (next == null) { - Log.errorv("Failed to refresh iterator, null last refreshed {0}," + - " current version {1}, current value {2}, read value {3}", _curVersion, newVersion, _next, next); - assert false; - } else if (!next.equals(_next)) { - Log.errorv("Failed to refresh iterator, mismatch last refreshed {0}," + - " current version {1}, current value {2}, read value {3}", _curVersion, newVersion, _next, next); - assert false; - } - Log.tracev("Refreshed iterator last refreshed {0}, current version {1}, old value {2}, new value {3}", - _curVersion, newVersion, _next, next); - - _next = next; - _curVersion = newVersion; - } finally { - _lock.unlock(); - if (oldBacking != null) { - oldBacking.close(); - } - } - } - - // _next should always be valid, so it's ok to do the refresh "lazily" - private void prepareNext() { - _lock.lock(); - try { - maybeRefresh(); - if (_backing.hasNext()) { - _next = _backing.next(); - } else { - _next = null; - } - } finally { - _lock.unlock(); - } - } - - @Override - public K peekNextKey() { - if (_next == null) { - throw new NoSuchElementException(); - } - return _next.getKey(); - } - - @Override - public void skip() { - if (_next == null) { - throw new NoSuchElementException(); - } - prepareNext(); - } - - @Override - public void close() { - _backing.close(); - } - - @Override - public boolean hasNext() { - return _next != null; - } - - @Override - public Pair next() { - if (_next == null) { - throw new NoSuchElementException("No more elements"); - } - var ret = _next; - prepareNext(); - Log.tracev("Read: {0}, next: {1}", ret, _next); - return ret; - } - -} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/StaleIteratorException.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/StaleIteratorException.java deleted file mode 100644 index 249f1c2f..00000000 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/StaleIteratorException.java +++ /dev/null @@ -1,11 +0,0 @@ -package com.usatiuk.dhfs.objects; - -public class StaleIteratorException extends RuntimeException { - public StaleIteratorException() { - super(); - } - - public StaleIteratorException(String message) { - super(message); - } -} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/Tombstone.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/Tombstone.java new file mode 100644 index 00000000..62a7ca1c --- /dev/null +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/Tombstone.java @@ -0,0 +1,10 @@ +package com.usatiuk.dhfs.objects; + +import java.util.Optional; + +public record Tombstone() implements MaybeTombstone { + @Override + public Optional opt() { + return Optional.empty(); + } +} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java index d84bdd79..e8e01e27 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TombstoneMergingKvIterator.java @@ -10,7 +10,7 @@ public class TombstoneMergingKvIterator, V> implements C private final CloseableKvIterator _backing; private final String _name; - public TombstoneMergingKvIterator(String name, IteratorStart startType, K startKey, List>> iterators) { + public TombstoneMergingKvIterator(String name, IteratorStart startType, K startKey, List>> iterators) { _name = name; _backing = new PredicateKvIterator<>( new MergingKvIterator<>(name + "-merging", startType, startKey, iterators), @@ -20,24 +20,15 @@ public class TombstoneMergingKvIterator, V> implements C if (pair instanceof Tombstone) { return null; } - return ((Data) pair).value; + return ((Data) pair).value(); }); } @SafeVarargs - public TombstoneMergingKvIterator(String name, IteratorStart startType, K startKey, IterProdFn>... iterators) { + public TombstoneMergingKvIterator(String name, IteratorStart startType, K startKey, IterProdFn>... iterators) { this(name, startType, startKey, List.of(iterators)); } - public interface DataType { - } - - public record Tombstone() implements DataType { - } - - public record Data(V value) implements DataType { - } - @Override public K peekNextKey() { return _backing.peekNextKey(); @@ -48,6 +39,26 @@ public class TombstoneMergingKvIterator, V> implements C _backing.skip(); } + @Override + public K peekPrevKey() { + return _backing.peekPrevKey(); + } + + @Override + public Pair prev() { + return _backing.prev(); + } + + @Override + public boolean hasPrev() { + return _backing.hasPrev(); + } + + @Override + public void skipPrev() { + _backing.skipPrev(); + } + @Override public void close() { _backing.close(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java deleted file mode 100644 index 8068e262..00000000 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/TxBundle.java +++ /dev/null @@ -1,9 +0,0 @@ -package com.usatiuk.dhfs.objects; - -public interface TxBundle { - long getId(); - - void commit(JDataVersionedWrapper obj); - - void delete(JObjectKey obj); -} diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java index 56c84aed..2fb14558 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/WritebackObjectPersistentStore.java @@ -25,20 +25,21 @@ import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.BiConsumer; import java.util.function.Consumer; @ApplicationScoped public class WritebackObjectPersistentStore { - private final LinkedList _pendingBundles = new LinkedList<>(); + private final LinkedList _pendingBundles = new LinkedList<>(); private final AtomicReference> _pendingWrites = new AtomicReference<>(TreePMap.empty()); private final ReentrantReadWriteLock _pendingWritesVersionLock = new ReentrantReadWriteLock(); - private final AtomicLong _pendingWritesVersion = new AtomicLong(); - private final LinkedHashMap _notFlushedBundles = new LinkedHashMap<>(); + private final LinkedHashMap _notFlushedBundles = new LinkedHashMap<>(); private final Object _flushWaitSynchronizer = new Object(); private final AtomicLong _lastWrittenTx = new AtomicLong(-1); private final AtomicLong _counter = new AtomicLong(); + private final AtomicLong _lastCommittedTx = new AtomicLong(-1); private final AtomicLong _waitedTotal = new AtomicLong(0); @Inject CachingObjectPersistentStore cachedStore; @@ -70,6 +71,8 @@ public class WritebackObjectPersistentStore { } catch (InterruptedException ignored) { } }); + _counter.set(cachedStore.getLastTxId()); + _lastCommittedTx.set(cachedStore.getLastTxId()); _ready = true; } @@ -94,7 +97,7 @@ public class WritebackObjectPersistentStore { private void writeback() { while (!Thread.interrupted()) { try { - TxBundleImpl bundle = new TxBundleImpl(0); + TxBundle bundle = new TxBundle(0); synchronized (_pendingBundles) { while (_pendingBundles.isEmpty() || !_pendingBundles.peek()._ready) _pendingBundles.wait(); @@ -116,11 +119,11 @@ public class WritebackObjectPersistentStore { for (var e : bundle._entries.values()) { switch (e) { - case TxBundleImpl.CommittedEntry(JObjectKey key, JDataVersionedWrapper data, int size) -> { + case TxBundle.CommittedEntry(JObjectKey key, JDataVersionedWrapper data, int size) -> { Log.trace("Writing new " + key); toWrite.add(Pair.of(key, data)); } - case TxBundleImpl.DeletedEntry(JObjectKey key) -> { + case TxBundle.DeletedEntry(JObjectKey key) -> { Log.trace("Deleting from persistent storage " + key); toDelete.add(key); } @@ -132,11 +135,13 @@ public class WritebackObjectPersistentStore { new TxManifestObj<>( Collections.unmodifiableList(toWrite), Collections.unmodifiableList(toDelete) - )); + ), bundle.getId()); Log.trace("Bundle " + bundle.getId() + " committed"); // Remove from pending writes, after real commit + // As we are the only writers to _pendingWrites, no need to synchronize with iterator creation + // if they get the older version, as it will still contain all the new changes synchronized (_pendingBundles) { var curPw = _pendingWrites.get(); for (var e : bundle._entries.values()) { @@ -219,7 +224,7 @@ public class WritebackObjectPersistentStore { } } synchronized (_notFlushedBundles) { - var bundle = new TxBundleImpl(_counter.incrementAndGet()); + var bundle = new TxBundle(_counter.incrementAndGet()); _pendingBundles.addLast(bundle); _notFlushedBundles.put(bundle.getId(), bundle); return bundle; @@ -234,26 +239,28 @@ public class WritebackObjectPersistentStore { try { synchronized (_pendingBundles) { var curPw = _pendingWrites.get(); - for (var e : ((TxBundleImpl) bundle)._entries.values()) { + for (var e : ((TxBundle) bundle)._entries.values()) { switch (e) { - case TxBundleImpl.CommittedEntry c -> { + case TxBundle.CommittedEntry c -> { curPw = curPw.plus(c.key(), new PendingWrite(c.data, bundle.getId())); } - case TxBundleImpl.DeletedEntry d -> { + case TxBundle.DeletedEntry d -> { curPw = curPw.plus(d.key(), new PendingDelete(d.key, bundle.getId())); } default -> throw new IllegalStateException("Unexpected value: " + e); } } + // Now, make the changes visible to new iterators _pendingWrites.set(curPw); - ((TxBundleImpl) bundle).setReady(); - _pendingWritesVersion.incrementAndGet(); + ((TxBundle) bundle).setReady(); if (_pendingBundles.peek() == bundle) _pendingBundles.notify(); synchronized (_flushWaitSynchronizer) { - currentSize += ((TxBundleImpl) bundle).calculateTotalSize(); + currentSize += ((TxBundle) bundle).calculateTotalSize(); } } + assert bundle.getId() > _lastCommittedTx.get(); + _lastCommittedTx.set(bundle.getId()); } finally { _pendingWritesVersionLock.writeLock().unlock(); } @@ -263,9 +270,9 @@ public class WritebackObjectPersistentStore { verifyReady(); synchronized (_pendingBundles) { Log.warn("Dropped bundle: " + bundle); - _pendingBundles.remove((TxBundleImpl) bundle); + _pendingBundles.remove((TxBundle) bundle); synchronized (_flushWaitSynchronizer) { - currentSize -= ((TxBundleImpl) bundle).calculateTotalSize(); + currentSize -= ((TxBundle) bundle).calculateTotalSize(); } } } @@ -296,7 +303,7 @@ public class WritebackObjectPersistentStore { } } - private static class TxBundleImpl implements TxBundle { + private static class TxBundle { private final LinkedHashMap _entries = new LinkedHashMap<>(); private final ArrayList _callbacks = new ArrayList<>(); private long _txId; @@ -304,7 +311,7 @@ public class WritebackObjectPersistentStore { private long _size = -1; private boolean _wasCommitted = false; - private TxBundleImpl(long txId) { + private TxBundle(long txId) { _txId = txId; } @@ -348,7 +355,7 @@ public class WritebackObjectPersistentStore { return _size; } - public void compress(TxBundleImpl other) { + public void compress(TxBundle other) { if (_txId >= other._txId) throw new IllegalArgumentException("Compressing an older bundle into newer"); @@ -412,14 +419,20 @@ public class WritebackObjectPersistentStore { return new VerboseReadResultPersisted(cachedStore.readObject(key)); } - public Consumer commitTx(Collection> writes, long id) { + /** + * @param commitLocked - a function that will be called with a Consumer of a new transaction id, + * that will commit the transaction the changes in the store will be visible to new transactions + * only after the runnable is called + */ + public Consumer commitTx(Collection> writes, BiConsumer commitLocked) { var bundle = createBundle(); + long bundleId = bundle.getId(); try { for (var action : writes) { switch (action) { case TxRecord.TxObjectRecordWrite write -> { Log.trace("Flushing object " + write.key()); - bundle.commit(new JDataVersionedWrapper(write.data(), id)); + bundle.commit(new JDataVersionedWrapper(write.data(), bundleId)); } case TxRecord.TxObjectRecordDeleted deleted -> { Log.trace("Deleting object " + deleted.key()); @@ -435,10 +448,11 @@ public class WritebackObjectPersistentStore { throw new TxCommitException(t.getMessage(), t); } - Log.tracef("Committing transaction %d to storage", id); - commitBundle(bundle); - long bundleId = bundle.getId(); + Log.tracef("Committing transaction %d to storage", bundleId); + commitLocked.accept(bundleId, () -> { + commitBundle(bundle); + }); return r -> asyncFence(bundleId, r); } @@ -451,29 +465,26 @@ public class WritebackObjectPersistentStore { _pendingWritesVersionLock.readLock().lock(); try { var curPending = _pendingWrites.get(); - - return new InvalidatableKvIterator<>( - new InconsistentKvIteratorWrapper<>( - p -> - new TombstoneMergingKvIterator<>("writeback-ps", p.getLeft(), p.getRight(), - (tS, tK) -> new MappingKvIterator<>( - new NavigableMapKvIterator<>(curPending, tS, tK), - e -> switch (e) { - case PendingWrite pw -> - new TombstoneMergingKvIterator.Data<>(pw.data()); - case PendingDelete d -> - new TombstoneMergingKvIterator.Tombstone<>(); - default -> - throw new IllegalStateException("Unexpected value: " + e); - }), - (tS, tK) -> new MappingKvIterator<>(cachedStore.getIterator(tS, tK), TombstoneMergingKvIterator.Data::new)), start, key), - _pendingWritesVersion::get, _pendingWritesVersionLock.readLock()); + return new TombstoneMergingKvIterator<>("writeback-ps", start, key, + (tS, tK) -> new MappingKvIterator<>( + new NavigableMapKvIterator<>(curPending, tS, tK), + e -> switch (e) { + case PendingWrite pw -> new Data<>(pw.data()); + case PendingDelete d -> new Tombstone<>(); + default -> throw new IllegalStateException("Unexpected value: " + e); + }), + (tS, tK) -> cachedStore.getIterator(tS, tK)); } finally { _pendingWritesVersionLock.readLock().unlock(); } } - public CloseableKvIterator getIterator(JObjectKey key) { - return getIterator(IteratorStart.GE, key); + public long getLastTxId() { + _pendingWritesVersionLock.readLock().lock(); + try { + return _lastCommittedTx.get(); + } finally { + _pendingWritesVersionLock.readLock().unlock(); + } } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java index 1d0c1b98..c3bd22dd 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/CachingObjectPersistentStore.java @@ -8,29 +8,24 @@ import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; import org.apache.commons.lang3.tuple.Pair; import org.eclipse.microprofile.config.inject.ConfigProperty; +import org.pcollections.TreePMap; import javax.annotation.Nonnull; -import java.util.Collection; -import java.util.HashSet; import java.util.LinkedHashMap; import java.util.Optional; -import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.stream.Stream; @ApplicationScoped public class CachingObjectPersistentStore { private final LinkedHashMap _cache = new LinkedHashMap<>(8, 0.75f, true); - private final ConcurrentSkipListMap _sortedCache = new ConcurrentSkipListMap<>(); + private TreePMap _sortedCache = TreePMap.empty(); + private long _cacheVersion = 0; - private final AtomicLong _cacheVersion = new AtomicLong(0); - private final ReentrantReadWriteLock _cacheVersionLock = new ReentrantReadWriteLock(); - - private final HashSet _pendingWrites = new HashSet<>(); + private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock(); private final DataLocker _locker = new DataLocker(); + @Inject SerializingObjectPersistentStore delegate; @ConfigProperty(name = "dhfs.objects.lru.limit") @@ -61,90 +56,78 @@ public class CachingObjectPersistentStore { } } - @Nonnull - public Collection findAllObjects() { - return delegate.findAllObjects(); - } - private void put(JObjectKey key, Optional obj) { // Log.tracev("Adding {0} to cache: {1}", key, obj); - synchronized (_cache) { - assert !_pendingWrites.contains(key); - int size = obj.map(o -> o.data().estimateSize()).orElse(0); + _lock.writeLock().lock(); + try { + int size = obj.map(o -> o.data().estimateSize()).orElse(16); _curSize += size; - var entry = new CacheEntry(obj, size); + var entry = new CacheEntry(obj.>map(Data::new).orElse(new Tombstone<>()), size); var old = _cache.putLast(key, entry); - _sortedCache.put(key, entry); + + _sortedCache = _sortedCache.plus(key, entry); if (old != null) _curSize -= old.size(); while (_curSize >= sizeLimit) { var del = _cache.pollFirstEntry(); - _sortedCache.remove(del.getKey(), del.getValue()); + _sortedCache = _sortedCache.minus(del.getKey()); _curSize -= del.getValue().size(); _evict++; } + } finally { + _lock.writeLock().unlock(); } } @Nonnull public Optional readObject(JObjectKey name) { try (var lock = _locker.lock(name)) { - synchronized (_cache) { + _lock.readLock().lock(); + try { var got = _cache.get(name); if (got != null) { - return got.object(); + return got.object().opt(); } + } finally { + _lock.readLock().unlock(); } - var got = delegate.readObject(name); - put(name, got); - return got; + // TODO: This is possibly racy +// var got = delegate.readObject(name); +// put(name, got); + return delegate.readObject(name); } } - public void commitTx(TxManifestObj names) { + public void commitTx(TxManifestObj names, long txId) { var serialized = delegate.prepareManifest(names); - _cacheVersionLock.writeLock().lock(); - try { - // During commit, readObject shouldn't be called for these items, - // it should be handled by the upstream store - synchronized (_cache) { + Log.tracev("Committing: {0} writes, {1} deletes", names.written().size(), names.deleted().size()); + delegate.commitTx(serialized, txId, (commit) -> { + _lock.writeLock().lock(); + try { + // Make the changes visible atomically both in cache and in the underlying store for (var write : names.written()) { put(write.getLeft(), Optional.of(write.getRight())); - var added = _pendingWrites.add(write.getLeft()); - assert added; } for (var del : names.deleted()) { - // TODO: tombstone cache? - _curSize -= Optional.ofNullable(_cache.get(del)).map(CacheEntry::size).orElse(0L); - _cache.remove(del); - _sortedCache.remove(del); - var added = _pendingWrites.add(del); - assert added; + put(del, Optional.empty()); } + ++_cacheVersion; + commit.run(); + } finally { + _lock.writeLock().unlock(); } - Log.tracev("Committing: {0} writes, {1} deletes", names.written().size(), names.deleted().size()); - delegate.commitTx(serialized); - // Now, reading from the backing store should return the new data - synchronized (_cache) { - for (var key : Stream.concat(names.written().stream().map(Pair::getLeft), - names.deleted().stream()).toList()) { - var removed = _pendingWrites.remove(key); - assert removed; - } - } - _cacheVersion.incrementAndGet(); - Log.tracev("Committed: {0} writes, {1} deletes", names.written().size(), names.deleted().size()); - } finally { - _cacheVersionLock.writeLock().unlock(); - } + }); + Log.tracev("Committed: {0} writes, {1} deletes", names.written().size(), names.deleted().size()); } private class CachingKvIterator implements CloseableKvIterator { private final CloseableKvIterator _delegate; + // This should be created under lock + private final long _curCacheVersion = _cacheVersion; private CachingKvIterator(CloseableKvIterator delegate) { _delegate = delegate; @@ -175,11 +158,24 @@ public class CachingObjectPersistentStore { return _delegate.peekPrevKey(); } + private void maybeCache(Pair prev) { + _lock.writeLock().lock(); + try { + if (_cacheVersion != _curCacheVersion) { + Log.tracev("Not caching: {0}", prev); + } else { + Log.tracev("Caching: {0}", prev); + put(prev.getKey(), Optional.of(prev.getValue())); + } + } finally { + _lock.writeLock().unlock(); + } + } + @Override public Pair prev() { var prev = _delegate.prev(); - Log.tracev("Caching: {0}", prev); - put(prev.getKey(), Optional.of(prev.getValue())); + maybeCache(prev); return prev; } @@ -196,8 +192,7 @@ public class CachingObjectPersistentStore { @Override public Pair next() { var next = _delegate.next(); - Log.tracev("Caching: {0}", next); - put(next.getKey(), Optional.of(next.getValue())); + maybeCache(next); return next; } } @@ -206,30 +201,31 @@ public class CachingObjectPersistentStore { // Does not have to guarantee consistent view, snapshots are handled by upper layers // Warning: it has a nasty side effect of global caching, so in this case don't even call next on it, // if some objects are still in writeback - public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { - Log.tracev("Getting cache iterator: {0}, {1}", start, key); - _cacheVersionLock.readLock().lock(); + public CloseableKvIterator> getIterator(IteratorStart start, JObjectKey key) { + _lock.readLock().lock(); try { - return new InconsistentSelfRefreshingKvIterator<>( - p -> new MergingKvIterator<>("cache", p.getLeft(), p.getRight(), - (mS, mK) -> new PredicateKvIterator<>( - new NavigableMapKvIterator<>(_sortedCache, mS, mK), - mS, mK, - e -> { - Log.tracev("Taken from cache: {0}", e); - return e.object().orElse(null); - } - ), (mS, mK) -> new CachingKvIterator(delegate.getIterator(mS, mK))), _cacheVersion::get, - _cacheVersionLock.readLock(), start, key); + Log.tracev("Getting cache iterator: {0}, {1}", start, key); + var curSortedCache = _sortedCache; + return new MergingKvIterator<>("cache", start, key, + (mS, mK) + -> new MappingKvIterator<>( + new NavigableMapKvIterator<>(curSortedCache, mS, mK), + e -> { + Log.tracev("Taken from cache: {0}", e); + return e.object(); + } + ), + (mS, mK) + -> new MappingKvIterator<>(new CachingKvIterator(delegate.getIterator(mS, mK)), Data::new)); } finally { - _cacheVersionLock.readLock().unlock(); + _lock.readLock().unlock(); } } - public CloseableKvIterator getIterator(JObjectKey key) { - return getIterator(IteratorStart.GE, key); + private record CacheEntry(MaybeTombstone object, long size) { } - private record CacheEntry(Optional object, long size) { + public long getLastTxId() { + return delegate.getLastCommitId(); } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java index a38f964c..080b51ab 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/LmdbObjectPersistentStore.java @@ -3,6 +3,7 @@ package com.usatiuk.dhfs.objects.persistence; import com.google.protobuf.ByteString; import com.usatiuk.dhfs.objects.CloseableKvIterator; import com.usatiuk.dhfs.objects.JObjectKey; +import com.usatiuk.dhfs.objects.KeyPredicateKvIterator; import com.usatiuk.dhfs.objects.ReversibleKvIterator; import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer; import io.quarkus.arc.properties.IfBuildProperty; @@ -21,11 +22,11 @@ import javax.annotation.Nonnull; import java.io.IOException; import java.lang.ref.Cleaner; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.nio.file.Path; -import java.util.Collection; -import java.util.List; -import java.util.NoSuchElementException; -import java.util.Optional; +import java.util.*; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Consumer; import static org.lmdbjava.DbiFlags.MDB_CREATE; import static org.lmdbjava.Env.create; @@ -38,7 +39,12 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { private Dbi _db; private boolean _ready = false; + private long _lastTxId = 0; + + private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock(); + private static final String DB_NAME = "objects"; + private static final byte[] DB_VER_OBJ_NAME = "__DB_VER_OBJ".getBytes(StandardCharsets.UTF_8); public LmdbObjectPersistentStore(@ConfigProperty(name = "dhfs.objects.persistence.files.root") String root) { _root = Path.of(root).resolve("objects"); @@ -54,6 +60,20 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { .setMaxDbs(1) .open(_root.toFile(), EnvFlags.MDB_NOTLS); _db = _env.openDbi(DB_NAME, MDB_CREATE); + + var bb = ByteBuffer.allocateDirect(DB_VER_OBJ_NAME.length); + bb.put(DB_VER_OBJ_NAME); + bb.flip(); + + try (Txn txn = _env.txnRead()) { + var value = _db.get(txn, bb); + if (value != null) { + var ver = value.getLong(); + Log.infov("Read version: {0}", ver); + _lastTxId = ver; + } + } + _ready = true; } @@ -100,13 +120,16 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { private static final Cleaner CLEANER = Cleaner.create(); private final MutableObject _closed = new MutableObject<>(false); + private final Exception _allocationStacktrace = new Exception(); LmdbKvIterator(IteratorStart start, JObjectKey key) { _goingForward = true; var closedRef = _closed; + var bt = _allocationStacktrace; CLEANER.register(this, () -> { if (!closedRef.getValue()) { - Log.error("Iterator was not closed before GC"); + Log.error("Iterator was not closed before GC, allocated at: {0}", bt); + System.exit(-1); } }); @@ -238,11 +261,11 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { @Override public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { - return new LmdbKvIterator(start, key); + return new KeyPredicateKvIterator<>(new LmdbKvIterator(start, key), start, key, (k) -> !Arrays.equals(k.name().getBytes(StandardCharsets.UTF_8), DB_VER_OBJ_NAME)); } @Override - public void commitTx(TxManifestRaw names) { + public void commitTx(TxManifestRaw names, long txId, Consumer commitLocked) { verifyReady(); try (Txn txn = _env.txnWrite()) { for (var written : names.written()) { @@ -255,7 +278,31 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { for (JObjectKey key : names.deleted()) { _db.delete(txn, key.toByteBuffer()); } - txn.commit(); + + var bb = ByteBuffer.allocateDirect(DB_VER_OBJ_NAME.length); + bb.put(DB_VER_OBJ_NAME); + bb.flip(); + var bbData = ByteBuffer.allocateDirect(8); + + commitLocked.accept(() -> { + _lock.writeLock().lock(); + try { + var realTxId = txId; + if (realTxId == -1) + realTxId = _lastTxId + 1; + + assert realTxId > _lastTxId; + _lastTxId = realTxId; + + bbData.putLong(realTxId); + bbData.flip(); + _db.put(txn, bb, bbData); + + txn.commit(); + } finally { + _lock.writeLock().unlock(); + } + }); } } @@ -277,4 +324,14 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore { return _root.toFile().getUsableSpace(); } + @Override + public long getLastCommitId() { + _lock.readLock().lock(); + try { + return _lastTxId; + } finally { + _lock.readLock().unlock(); + } + } + } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java index 7bba672a..0cf640bf 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/MemoryObjectPersistentStore.java @@ -11,11 +11,15 @@ import javax.annotation.Nonnull; import java.util.Collection; import java.util.Optional; import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Consumer; @ApplicationScoped @IfBuildProperty(name = "dhfs.objects.persistence", stringValue = "memory") public class MemoryObjectPersistentStore implements ObjectPersistentStore { private final ConcurrentSkipListMap _objects = new ConcurrentSkipListMap<>(); + private long _lastCommitId = 0; + private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock(); @Nonnull @Override @@ -39,7 +43,7 @@ public class MemoryObjectPersistentStore implements ObjectPersistentStore { } @Override - public void commitTx(TxManifestRaw names) { + public void commitTx(TxManifestRaw names, long txId, Consumer commitLocked) { synchronized (this) { for (var written : names.written()) { _objects.put(written.getKey(), written.getValue()); @@ -47,6 +51,15 @@ public class MemoryObjectPersistentStore implements ObjectPersistentStore { for (JObjectKey key : names.deleted()) { _objects.remove(key); } + commitLocked.accept(() -> { + _lock.writeLock().lock(); + try { + assert txId > _lastCommitId; + _lastCommitId = txId; + } finally { + _lock.writeLock().unlock(); + } + }); } } @@ -64,4 +77,14 @@ public class MemoryObjectPersistentStore implements ObjectPersistentStore { public long getUsableSpace() { return 0; } + + @Override + public long getLastCommitId() { + _lock.readLock().lock(); + try { + return _lastCommitId; + } finally { + _lock.readLock().unlock(); + } + } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java index 3467007b..bcb08401 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/ObjectPersistentStore.java @@ -7,6 +7,7 @@ import com.usatiuk.dhfs.objects.JObjectKey; import javax.annotation.Nonnull; import java.util.Collection; import java.util.Optional; +import java.util.function.Consumer; // Persistent storage of objects // All changes are written as sequential transactions @@ -21,15 +22,17 @@ public interface ObjectPersistentStore { // Does not have to guarantee consistent view, snapshots are handled by upper layers CloseableKvIterator getIterator(IteratorStart start, JObjectKey key); - default CloseableKvIterator getIterator(JObjectKey key) { - return getIterator(IteratorStart.GE, key); - } - - void commitTx(TxManifestRaw names); + /** + * @param commitLocked - a function that will be called with a Runnable that will commit the transaction + * the changes in the store will be visible to new transactions only after the runnable is called + */ + void commitTx(TxManifestRaw names, long txId, Consumer commitLocked); long getTotalSpace(); long getFreeSpace(); long getUsableSpace(); + + long getLastCommitId(); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java index a38604db..f439731e 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/persistence/SerializingObjectPersistentStore.java @@ -8,6 +8,7 @@ import org.apache.commons.lang3.tuple.Pair; import javax.annotation.Nonnull; import java.util.Collection; import java.util.Optional; +import java.util.function.Consumer; @ApplicationScoped public class SerializingObjectPersistentStore { @@ -41,11 +42,15 @@ public class SerializingObjectPersistentStore { , names.deleted()); } - void commitTx(TxManifestObj names) { - delegateStore.commitTx(prepareManifest(names)); +// void commitTx(TxManifestObj names, Consumer commitLocked) { +// delegateStore.commitTx(prepareManifest(names), commitLocked); +// } + + void commitTx(TxManifestRaw names, long txId, Consumer commitLocked) { + delegateStore.commitTx(names, txId, commitLocked); } - void commitTx(TxManifestRaw names) { - delegateStore.commitTx(names); + long getLastCommitId() { + return delegateStore.getLastCommitId(); } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntry.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntry.java index 1cdefc96..e783a2cf 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntry.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntry.java @@ -2,4 +2,6 @@ package com.usatiuk.dhfs.objects.snapshot; public interface SnapshotEntry { long whenToRemove(); + + SnapshotEntry withWhenToRemove(long whenToRemove); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryDeleted.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryDeleted.java index 3b0dbd6f..71113d45 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryDeleted.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryDeleted.java @@ -1,4 +1,8 @@ package com.usatiuk.dhfs.objects.snapshot; public record SnapshotEntryDeleted(long whenToRemove) implements SnapshotEntry { + @Override + public SnapshotEntryDeleted withWhenToRemove(long whenToRemove) { + return new SnapshotEntryDeleted(whenToRemove); + } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryObject.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryObject.java index 78036e17..98cfbefc 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryObject.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotEntryObject.java @@ -3,4 +3,8 @@ package com.usatiuk.dhfs.objects.snapshot; import com.usatiuk.dhfs.objects.JDataVersionedWrapper; public record SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) implements SnapshotEntry { + @Override + public SnapshotEntryObject withWhenToRemove(long whenToRemove) { + return new SnapshotEntryObject(data, whenToRemove); + } } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKvIterator.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKvIterator.java index 2b52fae4..1d045665 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKvIterator.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotKvIterator.java @@ -7,42 +7,98 @@ import org.apache.commons.lang3.tuple.Pair; import java.util.NavigableMap; import java.util.NoSuchElementException; +import java.util.Optional; -public class SnapshotKvIterator implements CloseableKvIterator> { +// TODO: test me +public class SnapshotKvIterator extends ReversibleKvIterator> { private final NavigableMap _objects; private final long _version; private final CloseableKvIterator _backing; - private Pair> _next = null; + private Pair> _next = null; public SnapshotKvIterator(NavigableMap objects, long version, IteratorStart start, JObjectKey startKey) { _objects = objects; _version = version; - _backing = new NavigableMapKvIterator<>(_objects, start, new SnapshotKey(startKey, 0L)); - fillNext(); - if (_next == null) { - return; + _goingForward = true; + _backing = new NavigableMapKvIterator<>(_objects, start, new SnapshotKey(startKey, Long.MIN_VALUE)); + fill(); + + boolean shouldGoBack = false; + if (start == IteratorStart.LE) { + if (_next == null || _next.getKey().compareTo(startKey) > 0) { + shouldGoBack = true; + } + } else if (start == IteratorStart.LT) { + if (_next == null || _next.getKey().compareTo(startKey) >= 0) { + shouldGoBack = true; + } } + + if (shouldGoBack && _backing.hasPrev()) { + _goingForward = false; + _backing.skipPrev(); + fill(); + _goingForward = true; + _backing.skip(); + fill(); + } + + switch (start) { case LT -> { - assert _next.getKey().compareTo(startKey) < 0; +// assert _next == null || _next.getKey().compareTo(startKey) < 0; } case LE -> { - assert _next.getKey().compareTo(startKey) <= 0; +// assert _next == null || _next.getKey().compareTo(startKey) <= 0; } case GT -> { - assert _next.getKey().compareTo(startKey) > 0; + assert _next == null || _next.getKey().compareTo(startKey) > 0; } case GE -> { - assert _next.getKey().compareTo(startKey) >= 0; + assert _next == null || _next.getKey().compareTo(startKey) >= 0; + } + } + + } + + private void fillPrev(JObjectKey ltKey) { + if (ltKey != null) + while (_backing.hasPrev() && _backing.peekPrevKey().key().equals(ltKey)) { + Log.tracev("Snapshot skipping prev: {0}", _backing.peekPrevKey()); + _backing.skipPrev(); + } + + _next = null; + + while (_backing.hasPrev() && _next == null) { + var prev = _backing.prev(); + if (prev.getKey().version() <= _version && prev.getValue().whenToRemove() > _version) { + Log.tracev("Snapshot skipping prev: {0} (too new)", prev); + _next = switch (prev.getValue()) { + case SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) -> + Pair.of(prev.getKey().key(), new Data<>(data)); + case SnapshotEntryDeleted(long whenToRemove) -> Pair.of(prev.getKey().key(), new Tombstone<>()); + default -> throw new IllegalStateException("Unexpected value: " + prev.getValue()); + }; + } + } + + if (_next != null) { + if (_next.getValue() instanceof Data( + JDataVersionedWrapper value + )) { + assert value.version() <= _version; } } } private void fillNext() { + _next = null; while (_backing.hasNext() && _next == null) { var next = _backing.next(); var nextNextKey = _backing.hasNext() ? _backing.peekNextKey() : null; while (nextNextKey != null && nextNextKey.key().equals(next.getKey().key()) && nextNextKey.version() <= _version) { + Log.tracev("Snapshot skipping next: {0} (too old)", next); next = _backing.next(); nextNextKey = _backing.hasNext() ? _backing.peekNextKey() : null; } @@ -50,14 +106,13 @@ public class SnapshotKvIterator implements CloseableKvIterator _version) { _next = switch (next.getValue()) { case SnapshotEntryObject(JDataVersionedWrapper data, long whenToRemove) -> - Pair.of(next.getKey().key(), new TombstoneMergingKvIterator.Data<>(data)); - case SnapshotEntryDeleted(long whenToRemove) -> - Pair.of(next.getKey().key(), new TombstoneMergingKvIterator.Tombstone<>()); + Pair.of(next.getKey().key(), new Data<>(data)); + case SnapshotEntryDeleted(long whenToRemove) -> Pair.of(next.getKey().key(), new Tombstone<>()); default -> throw new IllegalStateException("Unexpected value: " + next.getValue()); }; } if (_next != null) { - if (_next.getValue() instanceof TombstoneMergingKvIterator.Data( + if (_next.getValue() instanceof Data( JDataVersionedWrapper value )) { assert value.version() <= _version; @@ -66,19 +121,39 @@ public class SnapshotKvIterator implements CloseableKvIterator> next() { + public Pair> nextImpl() { if (_next == null) throw new NoSuchElementException("No more elements"); var ret = _next; - if (ret.getValue() instanceof TombstoneMergingKvIterator.Data( + if (ret.getValue() instanceof Data( JDataVersionedWrapper value )) { assert value.version() <= _version; } - _next = null; - fillNext(); + fill(); Log.tracev("Read: {0}, next: {1}", ret, _next); return ret; } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotManager.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotManager.java index bb7be190..77b36f46 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotManager.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/snapshot/SnapshotManager.java @@ -10,15 +10,13 @@ import jakarta.inject.Inject; import org.apache.commons.lang3.mutable.MutableObject; import org.apache.commons.lang3.tuple.Pair; import org.eclipse.microprofile.config.inject.ConfigProperty; +import org.pcollections.TreePMap; import javax.annotation.Nonnull; import java.lang.ref.Cleaner; import java.util.*; -import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Consumer; -import java.util.function.Function; @ApplicationScoped public class SnapshotManager { @@ -32,10 +30,9 @@ public class SnapshotManager { private long _lastSnapshotId = 0; private long _lastAliveSnapshotId = -1; - private final AtomicLong _snapshotVersion = new AtomicLong(0); private final Queue _snapshotIds = new ArrayDeque<>(); - private final ConcurrentSkipListMap _objects = new ConcurrentSkipListMap<>(); + private TreePMap _objects = TreePMap.empty(); private final TreeMap> _snapshotBounds = new TreeMap<>(); private final HashMap _snapshotRefCounts = new HashMap<>(); @@ -44,67 +41,78 @@ public class SnapshotManager { assert _snapshotIds.isEmpty() || _snapshotIds.peek() == _lastAliveSnapshotId; } - public Consumer commitTx(Collection> writes, long id) { + // This should not be called for the same objects concurrently + public Consumer commitTx(Collection> writes) { +// _lock.writeLock().lock(); +// try { +// if (!_snapshotIds.isEmpty()) { +// verify(); + HashMap newEntries = new HashMap<>(); + for (var action : writes) { + var current = writebackStore.readObjectVerbose(action.key()); + // Add to snapshot the previous visible version of the replaced object + // I.e. should be visible to all transactions with id <= id + // and at least as its corresponding version + Pair newSnapshotEntry = switch (current) { + case WritebackObjectPersistentStore.VerboseReadResultPersisted( + Optional data + ) -> Pair.of(new SnapshotKey(action.key(), data.map(JDataVersionedWrapper::version).orElse(-1L)), + data.map(o -> new SnapshotEntryObject(o, -1)).orElse(new SnapshotEntryDeleted(-1))); + case WritebackObjectPersistentStore.VerboseReadResultPending( + PendingWriteEntry pending + ) -> { + yield switch (pending) { + case PendingWrite write -> + Pair.of(new SnapshotKey(action.key(), write.bundleId()), new SnapshotEntryObject(write.data(), -1)); + case PendingDelete delete -> + Pair.of(new SnapshotKey(action.key(), delete.bundleId()), new SnapshotEntryDeleted(-1)); + default -> throw new IllegalStateException("Unexpected value: " + pending); + }; + } + default -> throw new IllegalStateException("Unexpected value: " + current); + }; + + + Log.tracev("Adding snapshot entry {0}", newSnapshotEntry); + + newEntries.put(newSnapshotEntry.getLeft(), newSnapshotEntry.getRight()); + } + _lock.writeLock().lock(); try { - assert id > _lastSnapshotId; - if (!_snapshotIds.isEmpty()) { - verify(); - for (var action : writes) { - var current = writebackStore.readObjectVerbose(action.key()); - // Add to snapshot the previous visible version of the replaced object - // I.e. should be visible to all transactions with id <= id - // and at least as its corresponding version - Pair newSnapshotEntry = switch (current) { - case WritebackObjectPersistentStore.VerboseReadResultPersisted( - Optional data - ) -> - Pair.of(new SnapshotKey(action.key(), Math.max(_snapshotIds.peek(), data.map(JDataVersionedWrapper::version).orElse(0L))), - data.map(o -> new SnapshotEntryObject(o, id)).orElse(new SnapshotEntryDeleted(id))); - case WritebackObjectPersistentStore.VerboseReadResultPending( - PendingWriteEntry pending - ) -> { - assert pending.bundleId() < id; - yield switch (pending) { - case PendingWrite write -> - Pair.of(new SnapshotKey(action.key(), write.bundleId()), new SnapshotEntryObject(write.data(), id)); - case PendingDelete delete -> - Pair.of(new SnapshotKey(action.key(), delete.bundleId()), new SnapshotEntryDeleted(id)); - default -> throw new IllegalStateException("Unexpected value: " + pending); - }; + return writebackStore.commitTx(writes, (id, commit) -> { + if (!_snapshotIds.isEmpty()) { + assert id > _lastSnapshotId; + for (var newSnapshotEntry : newEntries.entrySet()) { + assert newSnapshotEntry.getKey().version() < id; + var realNewSnapshotEntry = newSnapshotEntry.getValue().withWhenToRemove(id); + if (realNewSnapshotEntry instanceof SnapshotEntryObject re) { + assert re.data().version() <= newSnapshotEntry.getKey().version(); } - default -> throw new IllegalStateException("Unexpected value: " + current); - }; - - if (newSnapshotEntry.getValue() instanceof SnapshotEntryObject re) { - assert re.data().version() <= newSnapshotEntry.getKey().version(); - } - if (newSnapshotEntry.getValue() instanceof SnapshotEntryObject re) { - assert re.data().version() <= newSnapshotEntry.getKey().version(); - } - - Log.tracev("Adding snapshot entry {0}", newSnapshotEntry); - - var val = _objects.put(newSnapshotEntry.getLeft(), newSnapshotEntry.getRight()); + _objects = _objects.plus(newSnapshotEntry.getKey(), realNewSnapshotEntry); // assert val == null; - _snapshotBounds.merge(newSnapshotEntry.getLeft().version(), new ArrayDeque<>(List.of(newSnapshotEntry.getLeft())), - (a, b) -> { - a.addAll(b); - return a; - }); + _snapshotBounds.merge(newSnapshotEntry.getKey().version(), new ArrayDeque<>(List.of(newSnapshotEntry.getKey())), + (a, b) -> { + a.addAll(b); + return a; + }); + } } - - _snapshotVersion.incrementAndGet(); - } - - verify(); - // Commit under lock, iterators will see new version after the lock is released and writeback - // cache is updated - // TODO: Maybe writeback iterator being invalidated wouldn't be a problem? - return writebackStore.commitTx(writes, id); + commit.run(); + }); } finally { _lock.writeLock().unlock(); } + +// } + +// verify(); + // Commit under lock, iterators will see new version after the lock is released and writeback + // cache is updated + // TODO: Maybe writeback iterator being invalidated wouldn't be a problem? +// } finally { +// _lock.writeLock().unlock(); +// } } private void unrefSnapshot(long id) { @@ -144,11 +152,11 @@ public class SnapshotManager { Log.tracev("Could not find place to place entry {0}, curId={1}, nextId={2}, whenToRemove={3}, snapshotIds={4}", entry, finalCurId, finalNextId, entry.whenToRemove(), _snapshotIds); } else if (finalNextId < entry.whenToRemove()) { - _objects.put(new SnapshotKey(key.key(), finalNextId), entry); + _objects = _objects.plus(new SnapshotKey(key.key(), finalNextId), entry); assert finalNextId > finalCurId; toReAdd.add(Pair.of(finalNextId, new SnapshotKey(key.key(), finalNextId))); } - _objects.remove(key); + _objects = _objects.minus(key); }); toReAdd.forEach(p -> { @@ -232,92 +240,72 @@ public class SnapshotManager { @Override public JObjectKey peekNextKey() { - try { - return _backing.peekNextKey(); - } catch (StaleIteratorException e) { - assert false; - throw e; - } + return _backing.peekNextKey(); } @Override public void skip() { - try { - _backing.skip(); - } catch (StaleIteratorException e) { - assert false; - throw e; - } + _backing.skip(); + } + + @Override + public JObjectKey peekPrevKey() { + return _backing.peekPrevKey(); + } + + @Override + public Pair prev() { + var ret = _backing.prev(); + assert ret.getValue().version() <= _id; + return ret; + } + + @Override + public boolean hasPrev() { + return _backing.hasPrev(); + } + + @Override + public void skipPrev() { + _backing.skipPrev(); } @Override public void close() { - try { - _backing.close(); - } catch (StaleIteratorException e) { - assert false; - throw e; - } + _backing.close(); } @Override public boolean hasNext() { - try { - return _backing.hasNext(); - } catch (StaleIteratorException e) { - assert false; - throw e; - } + return _backing.hasNext(); } @Override public Pair next() { - try { - var ret = _backing.next(); - assert ret.getValue().version() <= _id; - return ret; - } catch (StaleIteratorException e) { - assert false; - throw e; - } + var ret = _backing.next(); + assert ret.getValue().version() <= _id; + return ret; } } public CloseableKvIterator getIterator(IteratorStart start, JObjectKey key) { - // In case something was added to the snapshot, it is not guaranteed that the iterators will see it, - // so refresh them manually. Otherwise, it could be possible that something from the writeback cache will - // be served instead. Note that refreshing the iterator will also refresh the writeback iterator, - // so it also should be consistent. - Log.tracev("Getting snapshot {0} iterator for {1} {2}", _id, start, key); _lock.readLock().lock(); try { - Function, CloseableKvIterator> iteratorFactory = - p -> new TombstoneMergingKvIterator<>("snapshot", p.getKey(), p.getValue(), - (tS, tK) -> new SnapshotKvIterator(_objects, _id, tS, tK), - (tS, tK) -> new MappingKvIterator<>( - writebackStore.getIterator(tS, tK), - d -> d.version() <= _id ? new TombstoneMergingKvIterator.Data<>(d) : new TombstoneMergingKvIterator.Tombstone<>()) - ); - - var backing = extraChecks ? new SelfRefreshingKvIterator<>( - iteratorFactory, _snapshotVersion::get, _lock.readLock(), start, key - ) : new InconsistentSelfRefreshingKvIterator<>( - iteratorFactory, _snapshotVersion::get, _lock.readLock(), start, key - ); - - return new CheckingSnapshotKvIterator(backing); + Log.tracev("Getting snapshot {0} iterator for {1} {2}\n" + + "objects in snapshots: {3}", _id, start, key, _objects); + return new CheckingSnapshotKvIterator(new TombstoneMergingKvIterator<>("snapshot", start, key, + (tS, tK) -> new SnapshotKvIterator(_objects, _id, tS, tK), + (tS, tK) -> new MappingKvIterator<>( + writebackStore.getIterator(tS, tK), d -> d.version() <= _id ? new Data<>(d) : new Tombstone<>()) + )); } finally { _lock.readLock().unlock(); } } - public CloseableKvIterator getIterator(JObjectKey key) { - return getIterator(IteratorStart.GE, key); - } - @Nonnull public Optional readObject(JObjectKey name) { - try (var it = getIterator(name)) { + try (var it = getIterator(IteratorStart.GE, name)) { if (it.hasNext()) { if (!it.peekNextKey().equals(name)) { return Optional.empty(); @@ -338,8 +326,13 @@ public class SnapshotManager { } } - public Snapshot createSnapshot(long id) { - return new Snapshot(id); + public Snapshot createSnapshot() { + _lock.writeLock().lock(); + try { + return new Snapshot(writebackStore.getLastTxId()); + } finally { + _lock.writeLock().unlock(); + } } @Nonnull diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java index eae1d216..e609081b 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/ReadTrackingObjectSourceFactory.java @@ -90,6 +90,28 @@ public class ReadTrackingObjectSourceFactory { _backing.skip(); } + @Override + public JObjectKey peekPrevKey() { + return _backing.peekPrevKey(); + } + + @Override + public Pair prev() { + var got = _backing.prev(); + _readSet.putIfAbsent(got.getKey(), new TransactionObjectNoLock<>(Optional.of(got.getValue()))); + return Pair.of(got.getKey(), got.getValue().data()); + } + + @Override + public boolean hasPrev() { + return _backing.hasPrev(); + } + + @Override + public void skipPrev() { + _backing.skipPrev(); + } + @Override public void close() { _backing.close(); diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactory.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactory.java index c4007d69..634daa22 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactory.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactory.java @@ -1,5 +1,5 @@ package com.usatiuk.dhfs.objects.transaction; public interface TransactionFactory { - TransactionPrivate createTransaction(long snapshotId); + TransactionPrivate createTransaction(); } diff --git a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java index 29c03c12..331fb033 100644 --- a/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java +++ b/dhfs-parent/objects/src/main/java/com/usatiuk/dhfs/objects/transaction/TransactionFactoryImpl.java @@ -18,24 +18,22 @@ public class TransactionFactoryImpl implements TransactionFactory { ReadTrackingObjectSourceFactory readTrackingObjectSourceFactory; @Override - public TransactionPrivate createTransaction(long snapshotId) { - Log.tracev("Trying to create transaction with snapshotId={0}", snapshotId); - return new TransactionImpl(snapshotId); + public TransactionPrivate createTransaction() { + return new TransactionImpl(); } private class TransactionImpl implements TransactionPrivate { private final ReadTrackingTransactionObjectSource _source; private final NavigableMap> _writes = new TreeMap<>(); - private long _writeVersion = 0; private Map> _newWrites = new HashMap<>(); private final List _onCommit = new ArrayList<>(); private final List _onFlush = new ArrayList<>(); private final SnapshotManager.Snapshot _snapshot; - private TransactionImpl(long snapshotId) { - _snapshot = snapshotManager.createSnapshot(snapshotId); + private TransactionImpl() { + _snapshot = snapshotManager.createSnapshot(); _source = readTrackingObjectSourceFactory.create(_snapshot); } @@ -108,12 +106,11 @@ public class TransactionFactoryImpl implements TransactionFactory { Log.tracev("Getting tx iterator with start={0}, key={1}", start, key); return new TombstoneMergingKvIterator<>("tx", start, key, (tS, tK) -> new MappingKvIterator<>(new NavigableMapKvIterator<>(_writes, tS, tK), t -> switch (t) { - case TxRecord.TxObjectRecordWrite write -> - new TombstoneMergingKvIterator.Data<>(write.data()); - case TxRecord.TxObjectRecordDeleted deleted -> new TombstoneMergingKvIterator.Tombstone<>(); + case TxRecord.TxObjectRecordWrite write -> new Data<>(write.data()); + case TxRecord.TxObjectRecordDeleted deleted -> new Tombstone<>(); case null, default -> null; }), - (tS, tK) -> new MappingKvIterator<>(_source.getIterator(tS, tK), TombstoneMergingKvIterator.Data::new)); + (tS, tK) -> new MappingKvIterator<>(_source.getIterator(tS, tK), Data::new)); } @Override diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/KeyPredicateKvIteratorTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/KeyPredicateKvIteratorTest.java new file mode 100644 index 00000000..055f4f29 --- /dev/null +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/KeyPredicateKvIteratorTest.java @@ -0,0 +1,154 @@ +package com.usatiuk.dhfs.objects; + +import com.usatiuk.dhfs.objects.persistence.IteratorStart; +import org.apache.commons.lang3.tuple.Pair; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.pcollections.TreePMap; + +import java.util.List; + +public class KeyPredicateKvIteratorTest { + + @Test + public void simpleTest() { + var source1 = TreePMap.empty().plus(3, 3).plus(5, 5).plus(6, 6); + var pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.GT, 3), + IteratorStart.GE, 3, v -> (v % 2 == 0)); + var expected = List.of(Pair.of(6, 6)); + for (var pair : expected) { + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(pair, pit.next()); + } + } + + @Test + public void ltTest() { + var source1 = TreePMap.empty().plus(3, 3).plus(5, 5).plus(6, 6); + var pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5), + IteratorStart.LT, 5, v -> (v % 2 == 0)); + var expected = List.of(Pair.of(6, 6)); + for (var pair : expected) { + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(pair, pit.next()); + } + Assertions.assertFalse(pit.hasNext()); + } + + @Test + public void ltTest2() { + var source1 = TreePMap.empty().plus(3, 3).plus(5, 5).plus(6, 6); + var pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 3), + IteratorStart.LT, 2, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4), + IteratorStart.LT, 4, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5), + IteratorStart.LT, 5, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LE, 5), + IteratorStart.LE, 5, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6)); + Assertions.assertFalse(pit.hasNext()); + } + + @Test + public void ltTest3() { + var source1 = TreePMap.empty().plus(3, 3).plus(5, 5).plus(6, 6).plus(7, 7).plus(8, 8); + var pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5), + IteratorStart.LT, 5, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5), + IteratorStart.LT, 5, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6), + IteratorStart.LT, 6, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 7), + IteratorStart.LT, 7, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 8), + IteratorStart.LT, 8, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LE, 6), + IteratorStart.LE, 6, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6), + IteratorStart.LT, 6, v -> (v % 2 == 0)); + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(6, pit.peekNextKey()); + Assertions.assertFalse(pit.hasPrev()); + Assertions.assertEquals(6, pit.peekNextKey()); + Assertions.assertFalse(pit.hasPrev()); + Assertions.assertEquals(Pair.of(6, 6), pit.next()); + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(8, pit.peekNextKey()); + Assertions.assertEquals(6, pit.peekPrevKey()); + Assertions.assertEquals(8, pit.peekNextKey()); + Assertions.assertEquals(6, pit.peekPrevKey()); + } + + @Test + public void itTest4() { + var source1 = TreePMap.empty().plus(3, 3).plus(5, 5).plus(6, 6).plus(8, 8).plus(10, 10); + var pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5), + IteratorStart.LT, 5, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8), Pair.of(10, 10)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5), + IteratorStart.LT, 5, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8), Pair.of(10, 10)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6), + IteratorStart.LT, 6, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8), Pair.of(10, 10)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 7), + IteratorStart.LT, 7, v -> (v % 2 == 0)); + Just.checkIterator(pit, Pair.of(6, 6), Pair.of(8, 8), Pair.of(10, 10)); + Assertions.assertFalse(pit.hasNext()); + + pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6), + IteratorStart.LT, 6, v -> (v % 2 == 0)); + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(6, pit.peekNextKey()); + Assertions.assertFalse(pit.hasPrev()); + Assertions.assertEquals(6, pit.peekNextKey()); + Assertions.assertEquals(Pair.of(6, 6), pit.next()); + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(8, pit.peekNextKey()); + Assertions.assertEquals(6, pit.peekPrevKey()); + Assertions.assertEquals(8, pit.peekNextKey()); + Assertions.assertEquals(6, pit.peekPrevKey()); + } + +// @Test +// public void reverseTest() { +// var source1 = TreePMap.empty().plus(3, 3).plus(5, 5).plus(6, 6); +// var pit = new KeyPredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4), +// IteratorStart.LT, 4, v -> (v % 2 == 0) ); +// +// } +} diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java index 63f25100..430dc635 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/MergingKvIteratorTest.java @@ -44,6 +44,27 @@ public class MergingKvIteratorTest { fillNext(); } + @Override + public K peekPrevKey() { + throw new UnsupportedOperationException(); + } + + @Override + public Pair prev() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean hasPrev() { + throw new UnsupportedOperationException(); + } + + @Override + public void skipPrev() { + throw new UnsupportedOperationException(); + + } + @Override public void close() { } @@ -148,7 +169,7 @@ public class MergingKvIteratorTest { Assertions.assertFalse(mergingIterator.hasNext()); Just.checkIterator(mergingIterator.reversed(), Pair.of(5, 6), Pair.of(2, 4), Pair.of(1, 3)); Assertions.assertFalse(mergingIterator.reversed().hasNext()); - Just.checkIterator(mergingIterator, Pair.of(1,3), Pair.of(2, 4), Pair.of(5, 6)); + Just.checkIterator(mergingIterator, Pair.of(1, 3), Pair.of(2, 4), Pair.of(5, 6)); Assertions.assertFalse(mergingIterator.hasNext()); @@ -161,7 +182,7 @@ public class MergingKvIteratorTest { Assertions.assertFalse(mergingIterator2.hasNext()); Just.checkIterator(mergingIterator2.reversed(), Pair.of(5, 6), Pair.of(2, 5), Pair.of(1, 3)); Assertions.assertFalse(mergingIterator2.reversed().hasNext()); - Just.checkIterator(mergingIterator2, Pair.of(1,3), Pair.of(2, 5), Pair.of(5, 6)); + Just.checkIterator(mergingIterator2, Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 6)); Assertions.assertFalse(mergingIterator2.hasNext()); var mergingIterator3 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK)); @@ -213,6 +234,9 @@ public class MergingKvIteratorTest { Assertions.assertEquals(2, mergingIterator3.peekPrevKey()); Assertions.assertEquals(5, mergingIterator3.peekNextKey()); Assertions.assertEquals(2, mergingIterator3.peekPrevKey()); + Assertions.assertTrue(mergingIterator3.hasPrev()); + Assertions.assertTrue(mergingIterator3.hasNext()); + Assertions.assertEquals(5, mergingIterator3.peekNextKey()); } @Test diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestExtraChecks.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestExtraChecks.java index 9c933417..a4bdfb57 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestExtraChecks.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestExtraChecks.java @@ -5,5 +5,5 @@ import io.quarkus.test.junit.TestProfile; @QuarkusTest @TestProfile(Profiles.ObjectsTestProfileExtraChecks.class) -public class ObjectsTestExtraChecks extends ObjectsTestImpl{ +public class ObjectsTestExtraChecks extends ObjectsTestImpl { } diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestImpl.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestImpl.java index a3c346ff..32a9ea31 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestImpl.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/ObjectsTestImpl.java @@ -5,7 +5,6 @@ import com.usatiuk.dhfs.objects.persistence.IteratorStart; import com.usatiuk.dhfs.objects.transaction.LockingStrategy; import com.usatiuk.dhfs.objects.transaction.Transaction; import io.quarkus.logging.Log; -import io.quarkus.test.junit.QuarkusTestProfile; import jakarta.inject.Inject; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Disabled; @@ -13,7 +12,6 @@ import org.junit.jupiter.api.RepeatedTest; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; -import org.pcollections.TreePMap; import java.util.List; import java.util.Map; @@ -24,17 +22,17 @@ import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicBoolean; class Profiles { - public static class ObjectsTestProfileExtraChecks implements QuarkusTestProfile { + public static class ObjectsTestProfileExtraChecks extends TempDataProfile { @Override - final public Map getConfigOverrides() { - return TreePMap.empty().plus("dhfs.objects.persistence.snapshot-extra-checks", "true"); + protected void getConfigOverrides(Map toPut) { + toPut.put("dhfs.objects.persistence.snapshot-extra-checks", "true"); } } - public static class ObjectsTestProfileNoExtraChecks implements QuarkusTestProfile { + public static class ObjectsTestProfileNoExtraChecks extends TempDataProfile { @Override - final public Map getConfigOverrides() { - return TreePMap.empty().plus("dhfs.objects.persistence.snapshot-extra-checks", "false"); + protected void getConfigOverrides(Map toPut) { + toPut.put("dhfs.objects.persistence.snapshot-extra-checks", "false"); } } } @@ -582,6 +580,7 @@ public abstract class ObjectsTestImpl { Assertions.assertEquals(key3, got.getKey().name()); got = iter.next(); Assertions.assertEquals(key4, got.getKey().name()); + iter.close(); }); } @@ -611,6 +610,18 @@ public abstract class ObjectsTestImpl { Assertions.assertEquals(key4, got.getKey().name()); } }); + txm.run(() -> { + try (var iter = curTx.getIterator(IteratorStart.LT, new JObjectKey(key + "_5"))) { + var got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + Assertions.assertTrue(iter.hasPrev()); + got = iter.prev(); + Assertions.assertEquals(key4, got.getKey().name()); + Assertions.assertTrue(iter.hasNext()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + } + }); txm.run(() -> { curTx.delete(new JObjectKey(key)); curTx.delete(new JObjectKey(key1)); @@ -816,6 +827,32 @@ public abstract class ObjectsTestImpl { try { barrier.await(); barrier2.await(); + try (var iter = curTx.getIterator(IteratorStart.LE, new JObjectKey(key3))) { + var got = iter.next(); + Assertions.assertEquals(key2, got.getKey().name()); + Assertions.assertEquals("John2", ((Parent) got.getValue()).name()); + Assertions.assertTrue(iter.hasNext()); + Assertions.assertTrue(iter.hasPrev()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + Assertions.assertTrue(iter.hasPrev()); + got = iter.prev(); + Assertions.assertEquals(key4, got.getKey().name()); + Assertions.assertTrue(iter.hasPrev()); + got = iter.prev(); + Assertions.assertEquals("John2", ((Parent) got.getValue()).name()); + Assertions.assertTrue(iter.hasPrev()); + got = iter.prev(); + Assertions.assertEquals(key1, got.getKey().name()); + Assertions.assertTrue(iter.hasNext()); + got = iter.next(); + Assertions.assertEquals(key1, got.getKey().name()); + got = iter.next(); + Assertions.assertEquals(key2, got.getKey().name()); + Assertions.assertEquals("John2", ((Parent) got.getValue()).name()); + got = iter.next(); + Assertions.assertEquals(key4, got.getKey().name()); + } try (var iter = curTx.getIterator(IteratorStart.GT, new JObjectKey(key))) { var got = iter.next(); Assertions.assertEquals(key1, got.getKey().name()); diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java index 0598e61e..1bae7b0a 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PreCommitTxHookTest.java @@ -3,6 +3,7 @@ package com.usatiuk.dhfs.objects; import com.usatiuk.dhfs.objects.data.Parent; import com.usatiuk.dhfs.objects.transaction.Transaction; import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.TestProfile; import io.quarkus.test.junit.mockito.InjectSpy; import jakarta.enterprise.context.ApplicationScoped; import jakarta.inject.Inject; @@ -12,6 +13,7 @@ import org.mockito.ArgumentCaptor; import org.mockito.Mockito; @QuarkusTest +@TestProfile(TempDataProfile.class) public class PreCommitTxHookTest { @Inject TransactionManager txm; diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PredicateKvIteratorTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PredicateKvIteratorTest.java index 3cf41813..05ad6d4b 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PredicateKvIteratorTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/PredicateKvIteratorTest.java @@ -27,7 +27,7 @@ public class PredicateKvIteratorTest { var source1 = TreePMap.empty().plus(1, 3).plus(3, 5).plus(4, 6); var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4), IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null); - var expected = List.of(); + var expected = List.of(Pair.of(4, 6)); for (var pair : expected) { Assertions.assertTrue(pit.hasNext()); Assertions.assertEquals(pair, pit.next()); @@ -129,6 +129,11 @@ public class PredicateKvIteratorTest { IteratorStart.LT, 7, v -> (v % 2 == 0) ? v : null); Just.checkIterator(pit, Pair.of(6, 10)); Assertions.assertFalse(pit.hasNext()); + Assertions.assertTrue(pit.hasPrev()); + Assertions.assertEquals(6, pit.peekPrevKey()); + Assertions.assertEquals(Pair.of(6, 10), pit.prev()); + Assertions.assertTrue(pit.hasNext()); + Assertions.assertEquals(6, pit.peekNextKey()); pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6), IteratorStart.LT, 6, v -> (v % 2 == 0) ? v : null); diff --git a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/LmdbKvIteratorTest.java b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/LmdbKvIteratorTest.java index e6baa8fa..483297ef 100644 --- a/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/LmdbKvIteratorTest.java +++ b/dhfs-parent/objects/src/test/java/com/usatiuk/dhfs/objects/persistence/LmdbKvIteratorTest.java @@ -10,18 +10,23 @@ import io.quarkus.test.junit.TestProfile; import jakarta.inject.Inject; import org.apache.commons.lang3.tuple.Pair; import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.RepeatedTest; import java.util.List; +class Profiles { + public static class LmdbKvIteratorTestProfile extends TempDataProfile { + } +} + @QuarkusTest -@TestProfile(TempDataProfile.class) +@TestProfile(Profiles.LmdbKvIteratorTestProfile.class) public class LmdbKvIteratorTest { @Inject LmdbObjectPersistentStore store; - @Test + @RepeatedTest(100) public void iteratorTest1() { store.commitTx( new TxManifestRaw( @@ -29,7 +34,7 @@ public class LmdbKvIteratorTest { Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))), List.of() - ) + ), -1, Runnable::run ); var iterator = store.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(3))); @@ -99,8 +104,10 @@ public class LmdbKvIteratorTest { iterator.close(); store.commitTx(new TxManifestRaw( - List.of(), - List.of(JObjectKey.of(Long.toString(1)), JObjectKey.of(Long.toString(2)), JObjectKey.of(Long.toString(3))) - )); + List.of(), + List.of(JObjectKey.of(Long.toString(1)), JObjectKey.of(Long.toString(2)), JObjectKey.of(Long.toString(3))) + ), + -1, Runnable::run + ); } } diff --git a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapIterator.java b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapIterator.java index d997f3b8..f13f1af7 100644 --- a/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapIterator.java +++ b/dhfs-parent/server/src/main/java/com/usatiuk/dhfs/objects/jmap/JMapIterator.java @@ -3,6 +3,7 @@ package com.usatiuk.dhfs.objects.jmap; import com.usatiuk.dhfs.objects.CloseableKvIterator; import com.usatiuk.dhfs.objects.JData; import com.usatiuk.dhfs.objects.JObjectKey; +import org.apache.commons.lang3.NotImplementedException; import org.apache.commons.lang3.tuple.Pair; public class JMapIterator> implements CloseableKvIterator> { @@ -52,6 +53,26 @@ public class JMapIterator> implements Closeabl advance(); } + @Override + public K peekPrevKey() { + throw new NotImplementedException(); + } + + @Override + public Pair> prev() { + throw new NotImplementedException(); + } + + @Override + public boolean hasPrev() { + throw new NotImplementedException(); + } + + @Override + public void skipPrev() { + throw new NotImplementedException(); + } + @Override public void close() { _backing.close();