mirror of
https://github.com/usatiuk/dhfs.git
synced 2025-10-28 12:37:48 +01:00
Compare commits
33 Commits
speed-dump
...
87c404828c
| Author | SHA1 | Date | |
|---|---|---|---|
| 87c404828c | |||
| b074e8eb44 | |||
| eb5b0ae03c | |||
| c329c1f982 | |||
| 4e7b13227b | |||
| db51d7280c | |||
| 70fecb389b | |||
| 6e9a2b25f6 | |||
| b84ef95703 | |||
| c0735801b9 | |||
| b506ced9d5 | |||
| 46bc9fa810 | |||
| 8ab034402d | |||
| d94d11ec8b | |||
| 5beaad2d32 | |||
| c4484d21e5 | |||
| 2766ef1bae | |||
| 58de85c078 | |||
| cc9da86440 | |||
| e6c9e6aee9 | |||
| 62265355c4 | |||
| 854bce1627 | |||
| 1b19c77bb6 | |||
| 7aa968a569 | |||
| e348c39be1 | |||
| 1b54830651 | |||
| bc5f0b816c | |||
| 9ff914bdaa | |||
| 1cee6f62b8 | |||
| 81703a9406 | |||
| 1757034e0b | |||
| d9765a51d8 | |||
| 99ef560b95 |
6
.github/workflows/server.yml
vendored
6
.github/workflows/server.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
submodules: "recursive"
|
||||
|
||||
- name: Install sudo for ACT
|
||||
run: apt-get update && apt-get install -y sudo
|
||||
@@ -32,7 +32,7 @@ jobs:
|
||||
|
||||
- name: User allow other for fuse
|
||||
run: echo "user_allow_other" | sudo tee -a /etc/fuse.conf
|
||||
|
||||
|
||||
- name: Dump fuse.conf
|
||||
run: cat /etc/fuse.conf
|
||||
|
||||
@@ -212,7 +212,7 @@ jobs:
|
||||
run: mkdir -p run-wrapper-out/dhfs/data && mkdir -p run-wrapper-out/dhfs/fuse && mkdir -p run-wrapper-out/dhfs/app
|
||||
|
||||
- name: Copy DHFS
|
||||
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/DHFS Package"
|
||||
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/Server"
|
||||
|
||||
- name: Copy Webui
|
||||
run: cp -r ./webui-dist-downloaded "run-wrapper-out/dhfs/app/Webui"
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
<component name="ProjectRunConfigurationManager">
|
||||
<configuration default="false" name="Main 2" type="QsApplicationConfigurationType" factoryName="QuarkusApplication">
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsapp.Main"/>
|
||||
<module name="dhfs-app"/>
|
||||
<option name="VM_PARAMETERS"
|
||||
value="-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011"/>
|
||||
<extension name="coverage">
|
||||
<pattern>
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*"/>
|
||||
<option name="ENABLED" value="true"/>
|
||||
</pattern>
|
||||
</extension>
|
||||
<method v="2">
|
||||
<option name="Make" enabled="true"/>
|
||||
</method>
|
||||
</configuration>
|
||||
<configuration default="false" name="Main 2" type="QsApplicationConfigurationType" factoryName="QuarkusApplication">
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsapp.Main" />
|
||||
<module name="dhfs-app" />
|
||||
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints --enable-preview --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011" />
|
||||
<extension name="coverage">
|
||||
<pattern>
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
|
||||
<option name="ENABLED" value="true" />
|
||||
</pattern>
|
||||
</extension>
|
||||
<method v="2">
|
||||
<option name="Make" enabled="true" />
|
||||
</method>
|
||||
</configuration>
|
||||
</component>
|
||||
@@ -1,18 +1,16 @@
|
||||
<component name="ProjectRunConfigurationManager">
|
||||
<configuration default="false" name="Main" type="QsApplicationConfigurationType" factoryName="QuarkusApplication"
|
||||
nameIsGenerated="true">
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsapp.Main"/>
|
||||
<module name="dhfs-app"/>
|
||||
<option name="VM_PARAMETERS"
|
||||
value="-XX:+UnlockDiagnosticVMOptions -XX:+UseParallelGC -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=true -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021 -Dquarkus.http.host=0.0.0.0"/>
|
||||
<extension name="coverage">
|
||||
<pattern>
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*"/>
|
||||
<option name="ENABLED" value="true"/>
|
||||
</pattern>
|
||||
</extension>
|
||||
<method v="2">
|
||||
<option name="Make" enabled="true"/>
|
||||
</method>
|
||||
</configuration>
|
||||
<configuration default="false" name="Main" type="QsApplicationConfigurationType" factoryName="QuarkusApplication" nameIsGenerated="true">
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsapp.Main" />
|
||||
<module name="dhfs-app" />
|
||||
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseParallelGC --enable-preview -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=true -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021 -Dquarkus.http.host=0.0.0.0" />
|
||||
<extension name="coverage">
|
||||
<pattern>
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
|
||||
<option name="ENABLED" value="true" />
|
||||
</pattern>
|
||||
</extension>
|
||||
<method v="2">
|
||||
<option name="Make" enabled="true" />
|
||||
</method>
|
||||
</configuration>
|
||||
</component>
|
||||
@@ -14,8 +14,9 @@ dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff
|
||||
dhfs.fuse.debug=false
|
||||
dhfs.fuse.enabled=true
|
||||
dhfs.files.allow_recursive_delete=false
|
||||
dhfs.files.target_chunk_size=2097152
|
||||
dhfs.files.target_chunk_alignment=19
|
||||
dhfs.files.target_chunk_size=524288
|
||||
dhfs.files.max_chunk_size=524288
|
||||
dhfs.files.target_chunk_alignment=17
|
||||
dhfs.objects.deletion.delay=1000
|
||||
dhfs.objects.deletion.can-delete-retry-delay=10000
|
||||
dhfs.objects.ref_verification=true
|
||||
|
||||
@@ -168,35 +168,6 @@ public class DhfsFuseIT {
|
||||
"rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
}
|
||||
|
||||
// TODO: How this fits with the tree?
|
||||
@Test
|
||||
@Disabled
|
||||
void deleteDelayedTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.pauseContainerCmd(container2.getContainerId()).exec();
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Delaying deletion check"), 60, TimeUnit.SECONDS, 1);
|
||||
|
||||
client.unpauseContainerCmd(container2.getContainerId()).exec();
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
|
||||
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 1);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container2.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
|
||||
}
|
||||
|
||||
@Test
|
||||
void deleteTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
@@ -221,6 +192,28 @@ public class DhfsFuseIT {
|
||||
1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
|
||||
}
|
||||
|
||||
@Test
|
||||
void deleteTestKickedOut() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
|
||||
|
||||
container2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("kicked"), 60, TimeUnit.SECONDS, 1);
|
||||
|
||||
Log.info("Deleting");
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
|
||||
Log.info("Deleted");
|
||||
|
||||
// FIXME?
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() ->
|
||||
1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
|
||||
}
|
||||
|
||||
@Test
|
||||
void moveFileTest() throws IOException, InterruptedException, TimeoutException {
|
||||
Log.info("Creating");
|
||||
|
||||
@@ -70,6 +70,7 @@ public class DhfsImage implements Future<String> {
|
||||
"--add-exports", "java.base/sun.nio.ch=ALL-UNNAMED",
|
||||
"--add-exports", "java.base/jdk.internal.access=ALL-UNNAMED",
|
||||
"--add-opens=java.base/java.nio=ALL-UNNAMED",
|
||||
"--enable-preview",
|
||||
"-Ddhfs.objects.peerdiscovery.interval=1s",
|
||||
"-Ddhfs.objects.invalidation.delay=100",
|
||||
"-Ddhfs.objects.deletion.delay=0",
|
||||
@@ -78,6 +79,8 @@ public class DhfsImage implements Future<String> {
|
||||
"-Ddhfs.objects.sync.timeout=30",
|
||||
"-Ddhfs.objects.sync.ping.timeout=5",
|
||||
"-Ddhfs.objects.reconnect_interval=1s",
|
||||
"-Ddhfs.objects.last-seen.timeout=30",
|
||||
"-Ddhfs.objects.last-seen.update=10",
|
||||
"-Ddhfs.sync.cert-check=false",
|
||||
"-Dquarkus.log.category.\"com.usatiuk\".level=TRACE",
|
||||
"-Dquarkus.log.category.\"com.usatiuk.dhfs\".level=TRACE",
|
||||
|
||||
@@ -55,6 +55,9 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
@ConfigProperty(name = "dhfs.files.target_chunk_size")
|
||||
int targetChunkSize;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.max_chunk_size", defaultValue = "524288")
|
||||
int maxChunkSize;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.use_hash_for_chunks")
|
||||
boolean useHashForChunks;
|
||||
|
||||
@@ -83,7 +86,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
|
||||
private ChunkData createChunk(ByteString bytes) {
|
||||
var newChunk = new ChunkData(JObjectKey.of(UUID.randomUUID().toString()), bytes);
|
||||
remoteTx.putData(newChunk);
|
||||
remoteTx.putDataNew(newChunk);
|
||||
return newChunk;
|
||||
}
|
||||
|
||||
@@ -215,6 +218,8 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
public void unlink(String name) {
|
||||
jObjectTxManager.executeTx(() -> {
|
||||
var node = getDirEntryOpt(name).orElse(null);
|
||||
if (node == null)
|
||||
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to unlink: " + name));
|
||||
if (node.meta() instanceof JKleppmannTreeNodeMetaDirectory f) {
|
||||
if (!allowRecursiveDelete && !node.children().isEmpty())
|
||||
throw new DirectoryNotEmptyException();
|
||||
@@ -360,16 +365,10 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
|
||||
var file = remoteTx.getData(File.class, fileUuid, LockingStrategy.WRITE).orElse(null);
|
||||
if (file == null) {
|
||||
Log.error("File not found when trying to write: " + fileUuid);
|
||||
return -1L;
|
||||
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to write: " + fileUuid));
|
||||
}
|
||||
|
||||
if (writeLogging) {
|
||||
Log.info("Writing to file: " + file.key() + " size=" + size(fileUuid) + " "
|
||||
+ offset + " " + data.size());
|
||||
}
|
||||
|
||||
NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
|
||||
Map<Long, JObjectKey> removedChunks = new HashMap<>();
|
||||
|
||||
long realOffset = targetChunkAlignment >= 0 ? alignDown(offset, targetChunkAlignment) : offset;
|
||||
long writeEnd = offset + data.size();
|
||||
@@ -407,7 +406,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
}
|
||||
|
||||
|
||||
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
|
||||
Map<Long, JObjectKey> newChunks = new HashMap<>();
|
||||
|
||||
if (existingEnd < offset) {
|
||||
if (!pendingPrefix.isEmpty()) {
|
||||
@@ -424,12 +423,13 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
int combinedSize = pendingWrites.size();
|
||||
|
||||
{
|
||||
int targetChunkSize = 1 << targetChunkAlignment;
|
||||
int cur = 0;
|
||||
while (cur < combinedSize) {
|
||||
int end;
|
||||
|
||||
if (targetChunkAlignment < 0)
|
||||
if (combinedSize - cur < maxChunkSize)
|
||||
end = combinedSize;
|
||||
else if (targetChunkAlignment < 0)
|
||||
end = combinedSize;
|
||||
else
|
||||
end = Math.min(cur + targetChunkSize, combinedSize);
|
||||
@@ -550,7 +550,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
});
|
||||
}
|
||||
|
||||
private void fillZeros(long fillStart, long length, NavigableMap<Long, JObjectKey> newChunks) {
|
||||
private void fillZeros(long fillStart, long length, Map<Long, JObjectKey> newChunks) {
|
||||
long combinedSize = (length - fillStart);
|
||||
|
||||
long start = fillStart;
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
package com.usatiuk.dhfsfuse;
|
||||
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import com.kenai.jffi.MemoryIO;
|
||||
import com.sun.security.auth.module.UnixSystem;
|
||||
import com.usatiuk.dhfsfs.service.DhfsFileService;
|
||||
import com.usatiuk.dhfsfs.service.DirectoryNotEmptyException;
|
||||
import com.usatiuk.dhfsfs.service.GetattrRes;
|
||||
import com.usatiuk.kleppmanntree.AlreadyExistsException;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.utils.UninitializedByteBuffer;
|
||||
import com.usatiuk.utils.UnsafeAccessor;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
@@ -17,15 +20,18 @@ import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.inject.Inject;
|
||||
import jnr.ffi.Pointer;
|
||||
import jnr.ffi.Runtime;
|
||||
import jnr.ffi.Struct;
|
||||
import jnr.ffi.types.off_t;
|
||||
import jnr.ffi.types.size_t;
|
||||
import org.apache.commons.lang3.SystemUtils;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
import ru.serce.jnrfuse.ErrorCodes;
|
||||
import ru.serce.jnrfuse.FuseFillDir;
|
||||
import ru.serce.jnrfuse.FuseStubFS;
|
||||
import ru.serce.jnrfuse.struct.FileStat;
|
||||
import ru.serce.jnrfuse.struct.FuseFileInfo;
|
||||
import ru.serce.jnrfuse.struct.Statvfs;
|
||||
import ru.serce.jnrfuse.struct.Timespec;
|
||||
import ru.serce.jnrfuse.NotImplemented;
|
||||
import ru.serce.jnrfuse.flags.FuseBufFlags;
|
||||
import ru.serce.jnrfuse.struct.*;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.file.Paths;
|
||||
@@ -51,8 +57,6 @@ public class DhfsFuse extends FuseStubFS {
|
||||
@ConfigProperty(name = "dhfs.files.target_chunk_size")
|
||||
int targetChunkSize;
|
||||
@Inject
|
||||
JnrPtrByteOutputAccessors jnrPtrByteOutputAccessors;
|
||||
@Inject
|
||||
DhfsFileService fileService;
|
||||
|
||||
private long allocateHandle(JObjectKey key) {
|
||||
@@ -231,7 +235,7 @@ public class DhfsFuse extends FuseStubFS {
|
||||
var fileKey = getFromHandle(fi.fh.get());
|
||||
var read = fileService.read(fileKey, offset, (int) size);
|
||||
if (read.isEmpty()) return 0;
|
||||
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size));
|
||||
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(buf, size));
|
||||
return read.size();
|
||||
} catch (Throwable e) {
|
||||
Log.error("When reading " + path, e);
|
||||
@@ -241,24 +245,22 @@ public class DhfsFuse extends FuseStubFS {
|
||||
|
||||
@Override
|
||||
public int write(String path, Pointer buf, long size, long offset, FuseFileInfo fi) {
|
||||
var buffer = UninitializedByteBuffer.allocate((int) size);
|
||||
UnsafeAccessor.UNSAFE.copyMemory(
|
||||
buf.address(),
|
||||
UnsafeAccessor.NIO.getBufferAddress(buffer),
|
||||
size
|
||||
);
|
||||
return write(path, buffer, offset, fi);
|
||||
}
|
||||
|
||||
public int write(String path, ByteBuffer buffer, long offset, FuseFileInfo fi) {
|
||||
if (offset < 0) return -ErrorCodes.EINVAL();
|
||||
try {
|
||||
var fileKey = getFromHandle(fi.fh.get());
|
||||
var buffer = ByteBuffer.allocateDirect((int) size);
|
||||
|
||||
if (buffer.isDirect()) {
|
||||
jnrPtrByteOutputAccessors.getUnsafe().copyMemory(
|
||||
buf.address(),
|
||||
jnrPtrByteOutputAccessors.getNioAccess().getBufferAddress(buffer),
|
||||
size
|
||||
);
|
||||
} else {
|
||||
buf.get(0, buffer.array(), 0, (int) size);
|
||||
}
|
||||
|
||||
var written = fileService.write(fileKey, offset, UnsafeByteOperations.unsafeWrap(buffer));
|
||||
return written.intValue();
|
||||
} catch (Throwable e) {
|
||||
} catch (Exception e) {
|
||||
Log.error("When writing " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
}
|
||||
@@ -394,7 +396,7 @@ public class DhfsFuse extends FuseStubFS {
|
||||
var file = fileOpt.get();
|
||||
var read = fileService.readlinkBS(fileOpt.get());
|
||||
if (read.isEmpty()) return 0;
|
||||
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size));
|
||||
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(buf, size));
|
||||
buf.putByte(Math.min(size - 1, read.size()), (byte) 0);
|
||||
return 0;
|
||||
} catch (Throwable e) {
|
||||
@@ -426,4 +428,29 @@ public class DhfsFuse extends FuseStubFS {
|
||||
return -ErrorCodes.EIO();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int write_buf(String path, FuseBufvec buf, @off_t long off, FuseFileInfo fi) {
|
||||
int size = (int) libFuse.fuse_buf_size(buf);
|
||||
FuseBufvec tmpVec = new FuseBufvec(Runtime.getSystemRuntime());
|
||||
long tmpVecAddr = MemoryIO.getInstance().allocateMemory(Struct.size(tmpVec), false);
|
||||
try {
|
||||
tmpVec.useMemory(Pointer.wrap(Runtime.getSystemRuntime(), tmpVecAddr));
|
||||
FuseBufvec.init(tmpVec, size);
|
||||
var bb = UninitializedByteBuffer.allocate(size);
|
||||
var mem = UninitializedByteBuffer.getAddress(bb);
|
||||
tmpVec.buf.mem.set(mem);
|
||||
tmpVec.buf.size.set(size);
|
||||
int res = (int) libFuse.fuse_buf_copy(tmpVec, buf, 0);
|
||||
if (res != size) {
|
||||
Log.errorv("fuse_buf_copy failed: {0} != {1}", res, size);
|
||||
return -ErrorCodes.ENOMEM();
|
||||
}
|
||||
return write(path, bb, off, fi);
|
||||
} finally {
|
||||
if (tmpVecAddr != 0) {
|
||||
MemoryIO.getInstance().freeMemory(tmpVecAddr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package com.usatiuk.dhfsfuse;
|
||||
|
||||
import com.google.protobuf.ByteOutput;
|
||||
import com.usatiuk.utils.UnsafeAccessor;
|
||||
import jnr.ffi.Pointer;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
@@ -9,14 +10,12 @@ import java.nio.MappedByteBuffer;
|
||||
public class JnrPtrByteOutput extends ByteOutput {
|
||||
private final Pointer _backing;
|
||||
private final long _size;
|
||||
private final JnrPtrByteOutputAccessors _accessors;
|
||||
private long _pos;
|
||||
|
||||
public JnrPtrByteOutput(JnrPtrByteOutputAccessors accessors, Pointer backing, long size) {
|
||||
public JnrPtrByteOutput(Pointer backing, long size) {
|
||||
_backing = backing;
|
||||
_size = size;
|
||||
_pos = 0;
|
||||
_accessors = accessors;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -47,9 +46,9 @@ public class JnrPtrByteOutput extends ByteOutput {
|
||||
if (value instanceof MappedByteBuffer mb) {
|
||||
mb.load();
|
||||
}
|
||||
long addr = _accessors.getNioAccess().getBufferAddress(value) + value.position();
|
||||
long addr = UnsafeAccessor.NIO.getBufferAddress(value) + value.position();
|
||||
var out = _backing.address() + _pos;
|
||||
_accessors.getUnsafe().copyMemory(addr, out, rem);
|
||||
UnsafeAccessor.UNSAFE.copyMemory(addr, out, rem);
|
||||
} else {
|
||||
_backing.put(_pos, value.array(), value.arrayOffset() + value.position(), rem);
|
||||
}
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
package com.usatiuk.dhfsfuse;
|
||||
|
||||
import jakarta.inject.Singleton;
|
||||
import jdk.internal.access.JavaNioAccess;
|
||||
import jdk.internal.access.SharedSecrets;
|
||||
import sun.misc.Unsafe;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
|
||||
@Singleton
|
||||
class JnrPtrByteOutputAccessors {
|
||||
JavaNioAccess _nioAccess;
|
||||
Unsafe _unsafe;
|
||||
|
||||
JnrPtrByteOutputAccessors() throws NoSuchFieldException, IllegalAccessException {
|
||||
_nioAccess = SharedSecrets.getJavaNioAccess();
|
||||
Field f = Unsafe.class.getDeclaredField("theUnsafe");
|
||||
f.setAccessible(true);
|
||||
_unsafe = (Unsafe) f.get(null);
|
||||
}
|
||||
|
||||
public JavaNioAccess getNioAccess() {
|
||||
return _nioAccess;
|
||||
}
|
||||
|
||||
public Unsafe getUnsafe() {
|
||||
return _unsafe;
|
||||
}
|
||||
}
|
||||
@@ -8,11 +8,10 @@ import jakarta.inject.Singleton;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
@Singleton
|
||||
public class JDataVersionedWrapperSerializer implements ObjectSerializer<JDataVersionedWrapper> {
|
||||
public class JDataVersionedWrapperSerializer {
|
||||
@Inject
|
||||
ObjectSerializer<JData> dataSerializer;
|
||||
|
||||
@Override
|
||||
public ByteString serialize(JDataVersionedWrapper obj) {
|
||||
ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES);
|
||||
buffer.putLong(obj.version());
|
||||
@@ -20,12 +19,10 @@ public class JDataVersionedWrapperSerializer implements ObjectSerializer<JDataVe
|
||||
return ByteString.copyFrom(buffer).concat(dataSerializer.serialize(obj.data()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public JDataVersionedWrapper deserialize(ByteString data) {
|
||||
var version = data.substring(0, Long.BYTES).asReadOnlyByteBuffer().getLong();
|
||||
var rawData = data.substring(Long.BYTES);
|
||||
return new JDataVersionedWrapperLazy(version, rawData.size(),
|
||||
() -> dataSerializer.deserialize(rawData)
|
||||
public JDataVersionedWrapper deserialize(ByteBuffer data) {
|
||||
var version = data.getLong();
|
||||
return new JDataVersionedWrapperLazy(version, data.remaining(),
|
||||
() -> dataSerializer.deserialize(data)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
package com.usatiuk.objects;
|
||||
|
||||
import com.usatiuk.utils.UninitializedByteBuffer;
|
||||
|
||||
import java.io.Serial;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
@@ -46,7 +48,7 @@ public final class JObjectKeyImpl implements JObjectKey {
|
||||
synchronized (this) {
|
||||
if (_bb != null) return _bb;
|
||||
var bytes = value.getBytes(StandardCharsets.ISO_8859_1);
|
||||
var directBb = ByteBuffer.allocateDirect(bytes.length);
|
||||
var directBb = UninitializedByteBuffer.allocate(bytes.length);
|
||||
directBb.put(bytes);
|
||||
directBb.flip();
|
||||
_bb = directBb;
|
||||
@@ -69,7 +71,7 @@ public final class JObjectKeyImpl implements JObjectKey {
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(value);
|
||||
return value.hashCode();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -2,11 +2,13 @@ package com.usatiuk.objects;
|
||||
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import com.usatiuk.utils.SerializationHelper;
|
||||
import io.quarkus.arc.DefaultBean;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
@ApplicationScoped
|
||||
@DefaultBean
|
||||
@@ -16,9 +18,8 @@ public class JavaDataSerializer implements ObjectSerializer<JData> {
|
||||
return SerializationHelper.serialize(obj);
|
||||
}
|
||||
|
||||
@Override
|
||||
public JData deserialize(ByteString data) {
|
||||
try (var is = data.newInput()) {
|
||||
public JData deserialize(ByteBuffer data) {
|
||||
try (var is = UnsafeByteOperations.unsafeWrap(data).newInput()) {
|
||||
return SerializationHelper.deserialize(is);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
|
||||
@@ -2,8 +2,10 @@ package com.usatiuk.objects;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
public interface ObjectSerializer<T> {
|
||||
ByteString serialize(T obj);
|
||||
|
||||
T deserialize(ByteString data);
|
||||
T deserialize(ByteBuffer data);
|
||||
}
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
@FunctionalInterface
|
||||
public interface IterProdFn<K extends Comparable<K>, V> {
|
||||
CloseableKvIterator<K, V> get(IteratorStart start, K key);
|
||||
}
|
||||
@@ -11,17 +11,15 @@ import java.util.TreeMap;
|
||||
|
||||
public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, V> {
|
||||
private final NavigableMap<K, IteratorEntry<K, V>> _sortedIterators = new TreeMap<>();
|
||||
private final String _name;
|
||||
private final List<IteratorEntry<K, V>> _iterators;
|
||||
public MergingKvIterator(String name, IteratorStart startType, K startKey, List<IterProdFn<K, V>> iterators) {
|
||||
_goingForward = true;
|
||||
_name = name;
|
||||
|
||||
// Why streams are so slow?
|
||||
public MergingKvIterator(IteratorStart startType, K startKey, List<CloseableKvIterator<K, V>> iterators) {
|
||||
_goingForward = true;
|
||||
|
||||
{
|
||||
IteratorEntry<K, V>[] iteratorEntries = new IteratorEntry[iterators.size()];
|
||||
for (int i = 0; i < iterators.size(); i++) {
|
||||
iteratorEntries[i] = new IteratorEntry<>(i, iterators.get(i).get(startType, startKey));
|
||||
iteratorEntries[i] = new IteratorEntry<>(i, iterators.get(i));
|
||||
}
|
||||
_iterators = List.of(iteratorEntries);
|
||||
}
|
||||
@@ -91,8 +89,8 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
|
||||
}
|
||||
|
||||
@SafeVarargs
|
||||
public MergingKvIterator(String name, IteratorStart startType, K startKey, IterProdFn<K, V>... iterators) {
|
||||
this(name, startType, startKey, List.of(iterators));
|
||||
public MergingKvIterator(IteratorStart startType, K startKey, CloseableKvIterator<K, V>... iterators) {
|
||||
this(startType, startKey, List.of(iterators));
|
||||
}
|
||||
|
||||
private void advanceIterator(IteratorEntry<K, V> iteratorEntry) {
|
||||
@@ -151,7 +149,6 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
|
||||
|| (!_goingForward && peekImpl().compareTo(cur.getKey()) >= 0))) {
|
||||
skipImpl();
|
||||
}
|
||||
Log.tracev("{0} Reversed to {1}", _name, _sortedIterators);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -199,28 +196,14 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
|
||||
@Override
|
||||
public String toString() {
|
||||
return "MergingKvIterator{" +
|
||||
"_name='" + _name + '\'' +
|
||||
", _sortedIterators=" + _sortedIterators.keySet() +
|
||||
", _iterators=" + _iterators +
|
||||
'}';
|
||||
}
|
||||
|
||||
private interface FirstMatchState<K extends Comparable<K>, V> {
|
||||
}
|
||||
|
||||
private record IteratorEntry<K extends Comparable<K>, V>(int priority, CloseableKvIterator<K, V> iterator) {
|
||||
public IteratorEntry<K, V> reversed() {
|
||||
return new IteratorEntry<>(priority, iterator.reversed());
|
||||
}
|
||||
}
|
||||
|
||||
private record FirstMatchNone<K extends Comparable<K>, V>() implements FirstMatchState<K, V> {
|
||||
}
|
||||
|
||||
private record FirstMatchFound<K extends Comparable<K>, V>(
|
||||
CloseableKvIterator<K, V> iterator) implements FirstMatchState<K, V> {
|
||||
}
|
||||
|
||||
private record FirstMatchConsumed<K extends Comparable<K>, V>() implements FirstMatchState<K, V> {
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public abstract class TombstoneMergingKvIterator {
|
||||
public static <K extends Comparable<K>, V> CloseableKvIterator<K, V> of(String name, IteratorStart startType, K startKey, List<IterProdFn<K, MaybeTombstone<V>>> iterators) {
|
||||
return new PredicateKvIterator<K, MaybeTombstone<V>, V>(
|
||||
new MergingKvIterator<K, MaybeTombstone<V>>(name + "-merging", startType, startKey, iterators),
|
||||
startType, startKey,
|
||||
pair -> {
|
||||
// Log.tracev("{0} - Processing pair {1}", name, pair);
|
||||
if (pair instanceof Tombstone<V>) {
|
||||
return null;
|
||||
}
|
||||
return ((Data<V>) pair).value();
|
||||
});
|
||||
}
|
||||
|
||||
public static <K extends Comparable<K>, V> CloseableKvIterator<K, V> of(String name, IteratorStart startType, K startKey, IterProdFn<K, MaybeTombstone<V>>... iterators) {
|
||||
return of(name, startType, startKey, List.of(iterators));
|
||||
}
|
||||
}
|
||||
@@ -2,26 +2,26 @@ package com.usatiuk.objects.iterators;
|
||||
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.function.Function;
|
||||
|
||||
public class PredicateKvIterator<K extends Comparable<K>, V, V_T> extends ReversibleKvIterator<K, V_T> {
|
||||
private final CloseableKvIterator<K, V> _backing;
|
||||
private final Function<V, V_T> _transformer;
|
||||
private Pair<K, V_T> _next = null;
|
||||
public class TombstoneSkippingIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, V> {
|
||||
private final MergingKvIterator<K, MaybeTombstone<V>> _backing;
|
||||
private Pair<K, V> _next = null;
|
||||
private boolean _checkedNext = false;
|
||||
|
||||
public PredicateKvIterator(CloseableKvIterator<K, V> backing, IteratorStart start, K startKey, Function<V, V_T> transformer) {
|
||||
public TombstoneSkippingIterator(IteratorStart start, K startKey, List<CloseableKvIterator<K, MaybeTombstone<V>>> iterators) {
|
||||
_goingForward = true;
|
||||
_backing = backing;
|
||||
_transformer = transformer;
|
||||
_backing = new MergingKvIterator<>(start, startKey, iterators);
|
||||
|
||||
if (start == IteratorStart.GE || start == IteratorStart.GT)
|
||||
return;
|
||||
|
||||
fillNext();
|
||||
|
||||
boolean shouldGoBack = false;
|
||||
if (canHaveNext())
|
||||
tryFillNext();
|
||||
|
||||
if (start == IteratorStart.LE) {
|
||||
if (_next == null || _next.getKey().compareTo(startKey) > 0) {
|
||||
shouldGoBack = true;
|
||||
@@ -38,34 +38,27 @@ public class PredicateKvIterator<K extends Comparable<K>, V, V_T> extends Revers
|
||||
_backing.skipPrev();
|
||||
fillNext();
|
||||
_goingForward = true;
|
||||
_backing.skip();
|
||||
if (_next != null)
|
||||
_backing.skip();
|
||||
fillNext();
|
||||
}
|
||||
}
|
||||
|
||||
private boolean canHaveNext() {
|
||||
return (_goingForward ? _backing.hasNext() : _backing.hasPrev());
|
||||
}
|
||||
|
||||
// switch (start) {
|
||||
// case LT -> {
|
||||
//// assert _next == null || _next.getKey().compareTo(startKey) < 0;
|
||||
// }
|
||||
// case LE -> {
|
||||
//// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
|
||||
// }
|
||||
// case GT -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) > 0;
|
||||
// }
|
||||
// case GE -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) >= 0;
|
||||
// }
|
||||
// }
|
||||
private boolean tryFillNext() {
|
||||
var next = _goingForward ? _backing.next() : _backing.prev();
|
||||
if (next.getValue() instanceof Tombstone<?>)
|
||||
return false;
|
||||
_next = Pair.of(next.getKey(), ((Data<V>) next.getValue()).value());
|
||||
return true;
|
||||
}
|
||||
|
||||
private void fillNext() {
|
||||
while ((_goingForward ? _backing.hasNext() : _backing.hasPrev()) && _next == null) {
|
||||
var next = _goingForward ? _backing.next() : _backing.prev();
|
||||
var transformed = _transformer.apply(next.getValue());
|
||||
if (transformed == null)
|
||||
continue;
|
||||
_next = Pair.of(next.getKey(), transformed);
|
||||
while (_next == null && canHaveNext()) {
|
||||
tryFillNext();
|
||||
}
|
||||
_checkedNext = true;
|
||||
}
|
||||
@@ -80,9 +73,6 @@ public class PredicateKvIterator<K extends Comparable<K>, V, V_T> extends Revers
|
||||
else if (!_goingForward && !wasAtEnd)
|
||||
_backing.skipPrev();
|
||||
|
||||
// if (!wasAtEnd)
|
||||
// Log.tracev("Skipped in reverse: {0}", _next);
|
||||
|
||||
_next = null;
|
||||
_checkedNext = false;
|
||||
}
|
||||
@@ -117,7 +107,7 @@ public class PredicateKvIterator<K extends Comparable<K>, V, V_T> extends Revers
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Pair<K, V_T> nextImpl() {
|
||||
protected Pair<K, V> nextImpl() {
|
||||
if (!_checkedNext)
|
||||
fillNext();
|
||||
|
||||
@@ -137,7 +127,6 @@ public class PredicateKvIterator<K extends Comparable<K>, V, V_T> extends Revers
|
||||
@Override
|
||||
public String toString() {
|
||||
return "PredicateKvIterator{" +
|
||||
"_backing=" + _backing +
|
||||
", _next=" + _next +
|
||||
'}';
|
||||
}
|
||||
@@ -2,13 +2,17 @@ package com.usatiuk.objects.snapshot;
|
||||
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.iterators.MaybeTombstone;
|
||||
import com.usatiuk.objects.iterators.Tombstone;
|
||||
import com.usatiuk.utils.AutoCloseableNoThrow;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public interface Snapshot<K extends Comparable<K>, V> extends AutoCloseableNoThrow {
|
||||
CloseableKvIterator<K, V> getIterator(IteratorStart start, K key);
|
||||
List<CloseableKvIterator<K, MaybeTombstone<V>>> getIterator(IteratorStart start, K key);
|
||||
|
||||
@Nonnull
|
||||
Optional<V> readObject(K name);
|
||||
|
||||
@@ -5,6 +5,7 @@ import com.usatiuk.objects.JDataVersionedWrapperLazy;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.*;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import com.usatiuk.utils.ListUtils;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
@@ -16,6 +17,7 @@ import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
import org.pcollections.TreePMap;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
@@ -33,6 +35,7 @@ public class CachingObjectPersistentStore {
|
||||
private ExecutorService _statusExecutor;
|
||||
private AtomicLong _cached = new AtomicLong();
|
||||
private AtomicLong _cacheTries = new AtomicLong();
|
||||
|
||||
public CachingObjectPersistentStore(@ConfigProperty(name = "dhfs.objects.lru.limit") int sizeLimit) {
|
||||
_cache = new AtomicReference<>(
|
||||
new Cache(TreePMap.empty(), 0, -1, sizeLimit)
|
||||
@@ -150,10 +153,12 @@ public class CachingObjectPersistentStore {
|
||||
}
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return TombstoneMergingKvIterator.<JObjectKey, JDataVersionedWrapper>of("cache", start, key,
|
||||
(mS, mK) -> new NavigableMapKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>(_curCache.map(), mS, mK),
|
||||
(mS, mK) -> new CachingKvIterator(_backing.getIterator(start, key)));
|
||||
public List<CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return ListUtils.prependAndMap(
|
||||
new NavigableMapKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>(_curCache.map(), start, key),
|
||||
_backing.getIterator(start, key),
|
||||
i -> new CachingKvIterator((CloseableKvIterator<JObjectKey, JDataVersionedWrapper>) (CloseableKvIterator<JObjectKey, ?>) i)
|
||||
);
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
|
||||
@@ -1,14 +1,9 @@
|
||||
package com.usatiuk.objects.stores;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.JObjectKeyMax;
|
||||
import com.usatiuk.objects.JObjectKeyMin;
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.iterators.KeyPredicateKvIterator;
|
||||
import com.usatiuk.objects.iterators.ReversibleKvIterator;
|
||||
import com.usatiuk.objects.iterators.*;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import io.quarkus.arc.properties.IfBuildProperty;
|
||||
import io.quarkus.logging.Log;
|
||||
@@ -28,8 +23,10 @@ import java.lang.ref.Cleaner;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Path;
|
||||
import java.util.List;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.lmdbjava.DbiFlags.MDB_CREATE;
|
||||
import static org.lmdbjava.Env.create;
|
||||
@@ -104,27 +101,27 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Snapshot<JObjectKey, ByteString> getSnapshot() {
|
||||
public Snapshot<JObjectKey, ByteBuffer> getSnapshot() {
|
||||
var txn = _env.txnRead();
|
||||
try {
|
||||
long commitId = readTxId(txn).orElseThrow();
|
||||
return new Snapshot<JObjectKey, ByteString>() {
|
||||
return new Snapshot<JObjectKey, ByteBuffer>() {
|
||||
private final Txn<ByteBuffer> _txn = txn;
|
||||
private final long _id = commitId;
|
||||
private boolean _closed = false;
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, ByteString> getIterator(IteratorStart start, JObjectKey key) {
|
||||
public List<CloseableKvIterator<JObjectKey, MaybeTombstone<ByteBuffer>>> getIterator(IteratorStart start, JObjectKey key) {
|
||||
assert !_closed;
|
||||
return new KeyPredicateKvIterator<>(new LmdbKvIterator(_txn, start, key), start, key, (k) -> !k.value().equals(DB_VER_OBJ_NAME_STR));
|
||||
return List.of(new KeyPredicateKvIterator<>(new LmdbKvIterator(_txn, start, key), start, key, (k) -> !k.value().equals(DB_VER_OBJ_NAME_STR)));
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
@Override
|
||||
public Optional<ByteString> readObject(JObjectKey name) {
|
||||
public Optional<ByteBuffer> readObject(JObjectKey name) {
|
||||
assert !_closed;
|
||||
var got = _db.get(_txn, name.toByteBuffer());
|
||||
var ret = Optional.ofNullable(got).map(UnsafeByteOperations::unsafeWrap);
|
||||
var ret = Optional.ofNullable(got).map(ByteBuffer::asReadOnlyBuffer);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -197,7 +194,7 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
return _root.toFile().getUsableSpace();
|
||||
}
|
||||
|
||||
private class LmdbKvIterator extends ReversibleKvIterator<JObjectKey, ByteString> {
|
||||
private class LmdbKvIterator extends ReversibleKvIterator<JObjectKey, MaybeTombstone<ByteBuffer>> {
|
||||
private static final Cleaner CLEANER = Cleaner.create();
|
||||
private final Txn<ByteBuffer> _txn; // Managed by the snapshot
|
||||
private final Cursor<ByteBuffer> _cursor;
|
||||
@@ -352,14 +349,13 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Pair<JObjectKey, ByteString> nextImpl() {
|
||||
protected Pair<JObjectKey, MaybeTombstone<ByteBuffer>> nextImpl() {
|
||||
if (!_hasNext) {
|
||||
throw new NoSuchElementException("No more elements");
|
||||
}
|
||||
// TODO: Right now with java serialization it doesn't matter, it's all copied to arrays anyway
|
||||
var val = _cursor.val();
|
||||
var bs = UnsafeByteOperations.unsafeWrap(val);
|
||||
var ret = Pair.of(JObjectKey.fromByteBuffer(_cursor.key()), bs);
|
||||
Pair<JObjectKey, MaybeTombstone<ByteBuffer>> ret = Pair.of(JObjectKey.fromByteBuffer(_cursor.key()), new DataWrapper<>(val.asReadOnlyBuffer()));
|
||||
if (_goingForward)
|
||||
_hasNext = _cursor.next();
|
||||
else
|
||||
|
||||
@@ -2,17 +2,18 @@ package com.usatiuk.objects.stores;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.iterators.NavigableMapKvIterator;
|
||||
import com.usatiuk.objects.iterators.*;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import io.quarkus.arc.properties.IfBuildProperty;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import org.pcollections.TreePMap;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@ApplicationScoped
|
||||
@IfBuildProperty(name = "dhfs.objects.persistence", stringValue = "memory")
|
||||
@@ -22,21 +23,21 @@ public class MemoryObjectPersistentStore implements ObjectPersistentStore {
|
||||
private long _lastCommitId = 0;
|
||||
|
||||
@Override
|
||||
public Snapshot<JObjectKey, ByteString> getSnapshot() {
|
||||
public Snapshot<JObjectKey, ByteBuffer> getSnapshot() {
|
||||
synchronized (this) {
|
||||
return new Snapshot<JObjectKey, ByteString>() {
|
||||
return new Snapshot<JObjectKey, ByteBuffer>() {
|
||||
private final TreePMap<JObjectKey, ByteString> _objects = MemoryObjectPersistentStore.this._objects;
|
||||
private final long _lastCommitId = MemoryObjectPersistentStore.this._lastCommitId;
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, ByteString> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return new NavigableMapKvIterator<>(_objects, start, key);
|
||||
public List<CloseableKvIterator<JObjectKey, MaybeTombstone<ByteBuffer>>> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return List.of(new MappingKvIterator<>(new NavigableMapKvIterator<>(_objects, start, key), s -> new DataWrapper<>(s.asReadOnlyByteBuffer())));
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
@Override
|
||||
public Optional<ByteString> readObject(JObjectKey name) {
|
||||
return Optional.ofNullable(_objects.get(name));
|
||||
public Optional<ByteBuffer> readObject(JObjectKey name) {
|
||||
return Optional.ofNullable(_objects.get(name)).map(ByteString::asReadOnlyByteBuffer);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -5,12 +5,13 @@ import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Optional;
|
||||
|
||||
// Persistent storage of objects
|
||||
// All changes are written as sequential transactions
|
||||
public interface ObjectPersistentStore {
|
||||
Snapshot<JObjectKey, ByteString> getSnapshot();
|
||||
Snapshot<JObjectKey, ByteBuffer> getSnapshot();
|
||||
|
||||
Runnable prepareTx(TxManifestRaw names, long txId);
|
||||
|
||||
|
||||
@@ -1,35 +1,38 @@
|
||||
package com.usatiuk.objects.stores;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.JDataVersionedWrapperSerializer;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.ObjectSerializer;
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.iterators.MappingKvIterator;
|
||||
import com.usatiuk.objects.iterators.*;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import com.usatiuk.utils.ListUtils;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@ApplicationScoped
|
||||
public class SerializingObjectPersistentStore {
|
||||
@Inject
|
||||
ObjectSerializer<JDataVersionedWrapper> serializer;
|
||||
JDataVersionedWrapperSerializer serializer;
|
||||
|
||||
@Inject
|
||||
ObjectPersistentStore delegateStore;
|
||||
|
||||
public Snapshot<JObjectKey, JDataVersionedWrapper> getSnapshot() {
|
||||
return new Snapshot<JObjectKey, JDataVersionedWrapper>() {
|
||||
private final Snapshot<JObjectKey, ByteString> _backing = delegateStore.getSnapshot();
|
||||
private final Snapshot<JObjectKey, ByteBuffer> _backing = delegateStore.getSnapshot();
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return new MappingKvIterator<>(_backing.getIterator(start, key), d -> serializer.deserialize(d));
|
||||
public List<CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return ListUtils.map(_backing.getIterator(start, key),
|
||||
i -> new MappingKvIterator<JObjectKey, MaybeTombstone<ByteBuffer>, MaybeTombstone<JDataVersionedWrapper>>(i,
|
||||
d -> serializer.deserialize(((DataWrapper<ByteBuffer>) d).value())));
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
|
||||
@@ -3,10 +3,13 @@ package com.usatiuk.objects.stores;
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.JDataVersionedWrapperImpl;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.*;
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.iterators.MaybeTombstone;
|
||||
import com.usatiuk.objects.iterators.NavigableMapKvIterator;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import com.usatiuk.objects.transaction.TxCommitException;
|
||||
import com.usatiuk.objects.transaction.TxRecord;
|
||||
import com.usatiuk.utils.ListUtils;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
@@ -26,24 +29,38 @@ import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.concurrent.locks.Condition;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
@ApplicationScoped
|
||||
public class WritebackObjectPersistentStore {
|
||||
private final LinkedList<TxBundle> _pendingBundles = new LinkedList<>();
|
||||
private final LinkedHashMap<Long, TxBundle> _notFlushedBundles = new LinkedHashMap<>();
|
||||
private final AtomicReference<PendingWriteData> _pendingWrites = new AtomicReference<>(null);
|
||||
private final Object _flushWaitSynchronizer = new Object();
|
||||
private final AtomicLong _lastWrittenId = new AtomicLong(-1);
|
||||
private final AtomicLong _lastCommittedId = new AtomicLong();
|
||||
private final AtomicLong _waitedTotal = new AtomicLong(0);
|
||||
@Inject
|
||||
CachingObjectPersistentStore cachedStore;
|
||||
@Inject
|
||||
ExecutorService _callbackExecutor;
|
||||
|
||||
@ConfigProperty(name = "dhfs.objects.writeback.limit")
|
||||
long sizeLimit;
|
||||
private long currentSize = 0;
|
||||
int sizeLimit;
|
||||
|
||||
private TxBundle _pendingBundle = null;
|
||||
private int _curSize = 0;
|
||||
|
||||
private final AtomicReference<PendingWriteData> _pendingWrites = new AtomicReference<>(null);
|
||||
|
||||
private final ReentrantLock _pendingBundleLock = new ReentrantLock();
|
||||
|
||||
private final Condition _newBundleCondition = _pendingBundleLock.newCondition();
|
||||
private final Condition _flushCondition = _pendingBundleLock.newCondition();
|
||||
|
||||
private final AtomicLong _lastFlushedId = new AtomicLong(-1);
|
||||
private final AtomicLong _lastCommittedId = new AtomicLong(-1);
|
||||
|
||||
private final AtomicLong _waitedTotal = new AtomicLong(0);
|
||||
|
||||
private ExecutorService _writebackExecutor;
|
||||
private ExecutorService _statusExecutor;
|
||||
|
||||
private volatile boolean _ready = false;
|
||||
|
||||
void init(@Observes @Priority(120) StartupEvent event) {
|
||||
@@ -61,8 +78,8 @@ public class WritebackObjectPersistentStore {
|
||||
try {
|
||||
while (true) {
|
||||
Thread.sleep(1000);
|
||||
if (currentSize > 0)
|
||||
Log.info("Tx commit status: size=" + currentSize / 1024 / 1024 + "MB");
|
||||
if (_curSize > 0)
|
||||
Log.info("Tx commit status: size=" + _curSize / 1024 / 1024 + "MB");
|
||||
}
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
@@ -72,7 +89,7 @@ public class WritebackObjectPersistentStore {
|
||||
lastTxId = s.id();
|
||||
}
|
||||
_lastCommittedId.set(lastTxId);
|
||||
_lastWrittenId.set(lastTxId);
|
||||
_lastFlushedId.set(lastTxId);
|
||||
_pendingWrites.set(new PendingWriteData(TreePMap.empty(), lastTxId, lastTxId));
|
||||
_ready = true;
|
||||
}
|
||||
@@ -80,11 +97,14 @@ public class WritebackObjectPersistentStore {
|
||||
void shutdown(@Observes @Priority(890) ShutdownEvent event) throws InterruptedException {
|
||||
Log.info("Waiting for all transactions to drain");
|
||||
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
_ready = false;
|
||||
while (currentSize > 0) {
|
||||
_flushWaitSynchronizer.wait();
|
||||
_ready = false;
|
||||
_pendingBundleLock.lock();
|
||||
try {
|
||||
while (_curSize > 0) {
|
||||
_flushCondition.await();
|
||||
}
|
||||
} finally {
|
||||
_pendingBundleLock.unlock();
|
||||
}
|
||||
|
||||
_writebackExecutor.shutdownNow();
|
||||
@@ -98,21 +118,19 @@ public class WritebackObjectPersistentStore {
|
||||
private void writeback() {
|
||||
while (!Thread.interrupted()) {
|
||||
try {
|
||||
TxBundle bundle = new TxBundle(0);
|
||||
synchronized (_pendingBundles) {
|
||||
while (_pendingBundles.isEmpty() || !_pendingBundles.peek()._ready)
|
||||
_pendingBundles.wait();
|
||||
TxBundle bundle;
|
||||
_pendingBundleLock.lock();
|
||||
try {
|
||||
while (_pendingBundle == null)
|
||||
_newBundleCondition.await();
|
||||
bundle = _pendingBundle;
|
||||
_pendingBundle = null;
|
||||
|
||||
long diff = 0;
|
||||
while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) {
|
||||
var toCompress = _pendingBundles.poll();
|
||||
diff -= toCompress.size();
|
||||
bundle.compress(toCompress);
|
||||
}
|
||||
diff += bundle.size();
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
currentSize += diff;
|
||||
}
|
||||
_curSize -= bundle.size();
|
||||
assert _curSize == 0;
|
||||
_flushCondition.signal();
|
||||
} finally {
|
||||
_pendingBundleLock.unlock();
|
||||
}
|
||||
|
||||
var toWrite = new ArrayList<Pair<JObjectKey, JDataVersionedWrapper>>();
|
||||
@@ -132,15 +150,12 @@ public class WritebackObjectPersistentStore {
|
||||
}
|
||||
}
|
||||
|
||||
cachedStore.commitTx(
|
||||
new TxManifestObj<>(
|
||||
Collections.unmodifiableList(toWrite),
|
||||
Collections.unmodifiableList(toDelete)
|
||||
), bundle.id());
|
||||
cachedStore.commitTx(new TxManifestObj<>(toWrite, toDelete), bundle.id());
|
||||
|
||||
Log.tracev("Bundle {0} committed", bundle.id());
|
||||
|
||||
while (true) {
|
||||
_pendingBundleLock.lock();
|
||||
try {
|
||||
var curPw = _pendingWrites.get();
|
||||
var curPwMap = curPw.pendingWrites();
|
||||
for (var e : bundle._entries.values()) {
|
||||
@@ -153,25 +168,16 @@ public class WritebackObjectPersistentStore {
|
||||
bundle.id(),
|
||||
curPw.lastCommittedId()
|
||||
);
|
||||
if (_pendingWrites.compareAndSet(curPw, newCurPw))
|
||||
break;
|
||||
_pendingWrites.compareAndSet(curPw, newCurPw);
|
||||
} finally {
|
||||
_pendingBundleLock.unlock();
|
||||
}
|
||||
|
||||
List<List<Runnable>> callbacks = new ArrayList<>();
|
||||
synchronized (_notFlushedBundles) {
|
||||
_lastWrittenId.set(bundle.id());
|
||||
while (!_notFlushedBundles.isEmpty() && _notFlushedBundles.firstEntry().getKey() <= bundle.id()) {
|
||||
callbacks.add(_notFlushedBundles.pollFirstEntry().getValue().setCommitted());
|
||||
}
|
||||
}
|
||||
callbacks.forEach(l -> l.forEach(Runnable::run));
|
||||
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
currentSize -= bundle.size();
|
||||
// FIXME:
|
||||
if (currentSize <= sizeLimit || !_ready)
|
||||
_flushWaitSynchronizer.notifyAll();
|
||||
}
|
||||
_lastFlushedId.set(bundle.id());
|
||||
var callbacks = bundle.callbacks();
|
||||
_callbackExecutor.submit(() -> {
|
||||
callbacks.forEach(Runnable::run);
|
||||
});
|
||||
} catch (InterruptedException ignored) {
|
||||
} catch (Exception e) {
|
||||
Log.error("Uncaught exception in writeback", e);
|
||||
@@ -184,123 +190,97 @@ public class WritebackObjectPersistentStore {
|
||||
|
||||
public long commitBundle(Collection<TxRecord.TxObjectRecord<?>> writes) {
|
||||
verifyReady();
|
||||
boolean wait = false;
|
||||
while (true) {
|
||||
if (wait) {
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
long started = System.currentTimeMillis();
|
||||
while (currentSize > sizeLimit) {
|
||||
try {
|
||||
_flushWaitSynchronizer.wait();
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
long waited = System.currentTimeMillis() - started;
|
||||
_waitedTotal.addAndGet(waited);
|
||||
if (Log.isTraceEnabled())
|
||||
Log.tracev("Thread {0} waited for tx bundle for {1} ms", Thread.currentThread().getName(), waited);
|
||||
wait = false;
|
||||
}
|
||||
_pendingBundleLock.lock();
|
||||
try {
|
||||
boolean shouldWake = false;
|
||||
if (_curSize > sizeLimit) {
|
||||
shouldWake = true;
|
||||
long started = System.currentTimeMillis();
|
||||
while (_curSize > sizeLimit)
|
||||
_flushCondition.await();
|
||||
long waited = System.currentTimeMillis() - started;
|
||||
_waitedTotal.addAndGet(waited);
|
||||
if (Log.isTraceEnabled())
|
||||
Log.tracev("Thread {0} waited for tx bundle for {1} ms", Thread.currentThread().getName(), waited);
|
||||
}
|
||||
|
||||
synchronized (_pendingBundles) {
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
if (currentSize > sizeLimit) {
|
||||
if (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) {
|
||||
var target = _pendingBundles.poll();
|
||||
var oursId = _lastCommittedId.incrementAndGet();
|
||||
|
||||
long diff = -target.size();
|
||||
while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) {
|
||||
var toCompress = _pendingBundles.poll();
|
||||
diff -= toCompress.size();
|
||||
target.compress(toCompress);
|
||||
}
|
||||
diff += target.size();
|
||||
currentSize += diff;
|
||||
_pendingBundles.addFirst(target);
|
||||
}
|
||||
var curBundle = _pendingBundle;
|
||||
int oldSize = 0;
|
||||
if (curBundle != null) {
|
||||
oldSize = curBundle.size();
|
||||
curBundle.setId(oursId);
|
||||
} else {
|
||||
curBundle = new TxBundle(oursId);
|
||||
}
|
||||
|
||||
var curPw = _pendingWrites.get();
|
||||
var curPwMap = curPw.pendingWrites();
|
||||
|
||||
for (var action : writes) {
|
||||
var key = action.key();
|
||||
switch (action) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> {
|
||||
// Log.tracev("Flushing object {0}", write.key());
|
||||
var wrapper = new JDataVersionedWrapperImpl(write.data(), oursId);
|
||||
curPwMap = curPwMap.plus(key, new PendingWrite(wrapper, oursId));
|
||||
curBundle.commit(wrapper);
|
||||
}
|
||||
|
||||
if (currentSize > sizeLimit) {
|
||||
wait = true;
|
||||
continue;
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> {
|
||||
// Log.tracev("Deleting object {0}", deleted.key());
|
||||
curPwMap = curPwMap.plus(key, new PendingDelete(key, oursId));
|
||||
curBundle.delete(key);
|
||||
}
|
||||
}
|
||||
|
||||
TxBundle bundle;
|
||||
synchronized (_notFlushedBundles) {
|
||||
bundle = new TxBundle(_lastCommittedId.incrementAndGet());
|
||||
_pendingBundles.addLast(bundle);
|
||||
_notFlushedBundles.put(bundle.id(), bundle);
|
||||
}
|
||||
|
||||
for (var action : writes) {
|
||||
switch (action) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> {
|
||||
Log.tracev("Flushing object {0}", write.key());
|
||||
bundle.commit(new JDataVersionedWrapperImpl(write.data(), bundle.id()));
|
||||
}
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> {
|
||||
Log.tracev("Deleting object {0}", deleted.key());
|
||||
bundle.delete(deleted.key());
|
||||
}
|
||||
default -> {
|
||||
throw new TxCommitException("Unexpected value: " + action.key());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while (true) {
|
||||
var curPw = _pendingWrites.get();
|
||||
var curPwMap = curPw.pendingWrites();
|
||||
for (var e : ((TxBundle) bundle)._entries.values()) {
|
||||
switch (e) {
|
||||
case TxBundle.CommittedEntry c -> {
|
||||
curPwMap = curPwMap.plus(c.key(), new PendingWrite(c.data, bundle.id()));
|
||||
}
|
||||
case TxBundle.DeletedEntry d -> {
|
||||
curPwMap = curPwMap.plus(d.key(), new PendingDelete(d.key, bundle.id()));
|
||||
}
|
||||
default -> throw new IllegalStateException("Unexpected value: " + e);
|
||||
}
|
||||
}
|
||||
// Now, make the changes visible to new iterators
|
||||
var newCurPw = new PendingWriteData(
|
||||
curPwMap,
|
||||
curPw.lastFlushedId(),
|
||||
bundle.id()
|
||||
);
|
||||
|
||||
if (!_pendingWrites.compareAndSet(curPw, newCurPw))
|
||||
continue;
|
||||
|
||||
((TxBundle) bundle).setReady();
|
||||
if (_pendingBundles.peek() == bundle)
|
||||
_pendingBundles.notify();
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
currentSize += ((TxBundle) bundle).size();
|
||||
}
|
||||
|
||||
return bundle.id();
|
||||
}
|
||||
}
|
||||
// Now, make the changes visible to new iterators
|
||||
var newCurPw = new PendingWriteData(
|
||||
curPwMap,
|
||||
curPw.lastFlushedId(),
|
||||
oursId
|
||||
);
|
||||
|
||||
_pendingWrites.compareAndSet(curPw, newCurPw);
|
||||
|
||||
_pendingBundle = curBundle;
|
||||
_newBundleCondition.signalAll();
|
||||
|
||||
_curSize += (curBundle.size() - oldSize);
|
||||
|
||||
if (shouldWake && _curSize < sizeLimit) {
|
||||
_flushCondition.signal();
|
||||
}
|
||||
|
||||
return oursId;
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
} finally {
|
||||
_pendingBundleLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
public void asyncFence(long bundleId, Runnable fn) {
|
||||
verifyReady();
|
||||
if (bundleId < 0) throw new IllegalArgumentException("txId should be >0!");
|
||||
if (_lastWrittenId.get() >= bundleId) {
|
||||
if (_lastFlushedId.get() >= bundleId) {
|
||||
fn.run();
|
||||
return;
|
||||
}
|
||||
synchronized (_notFlushedBundles) {
|
||||
if (_lastWrittenId.get() >= bundleId) {
|
||||
_pendingBundleLock.lock();
|
||||
try {
|
||||
if (_lastFlushedId.get() >= bundleId) {
|
||||
fn.run();
|
||||
return;
|
||||
}
|
||||
_notFlushedBundles.get(bundleId).addCallback(fn);
|
||||
var pendingBundle = _pendingBundle;
|
||||
if (pendingBundle == null) {
|
||||
fn.run();
|
||||
return;
|
||||
}
|
||||
pendingBundle.addCallback(fn);
|
||||
} finally {
|
||||
_pendingBundleLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -341,10 +321,8 @@ public class WritebackObjectPersistentStore {
|
||||
private final long txId = finalPw.lastCommittedId();
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return TombstoneMergingKvIterator.<JObjectKey, JDataVersionedWrapper>of("writeback-ps", start, key,
|
||||
(tS, tK) -> new NavigableMapKvIterator<>(_pendingWrites, tS, tK),
|
||||
(tS, tK) -> (CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>) (CloseableKvIterator<JObjectKey, ?>) _cache.getIterator(tS, tK));
|
||||
public List<CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return ListUtils.prepend(new NavigableMapKvIterator<>(_pendingWrites, start, key), _cache.getIterator(start, key));
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
@@ -381,46 +359,40 @@ public class WritebackObjectPersistentStore {
|
||||
}
|
||||
}
|
||||
|
||||
public interface VerboseReadResult {
|
||||
}
|
||||
|
||||
private record PendingWriteData(TreePMap<JObjectKey, PendingWriteEntry> pendingWrites,
|
||||
long lastFlushedId,
|
||||
long lastCommittedId) {
|
||||
}
|
||||
|
||||
private static class TxBundle {
|
||||
private final LinkedHashMap<JObjectKey, BundleEntry> _entries = new LinkedHashMap<>();
|
||||
private final HashMap<JObjectKey, BundleEntry> _entries = new HashMap<>();
|
||||
private final ArrayList<Runnable> _callbacks = new ArrayList<>();
|
||||
private int _size = 0;
|
||||
private long _txId;
|
||||
private volatile boolean _ready = false;
|
||||
private long _size = 0;
|
||||
private boolean _wasCommitted = false;
|
||||
|
||||
ArrayList<Runnable> callbacks() {
|
||||
return _callbacks;
|
||||
}
|
||||
|
||||
private TxBundle(long txId) {
|
||||
_txId = txId;
|
||||
}
|
||||
|
||||
public long id() {
|
||||
return _txId;
|
||||
public void setId(long id) {
|
||||
_txId = id;
|
||||
}
|
||||
|
||||
public void setReady() {
|
||||
_ready = true;
|
||||
public long id() {
|
||||
return _txId;
|
||||
|
||||
}
|
||||
|
||||
public void addCallback(Runnable callback) {
|
||||
synchronized (_callbacks) {
|
||||
if (_wasCommitted) throw new IllegalStateException();
|
||||
_callbacks.add(callback);
|
||||
}
|
||||
_callbacks.add(callback);
|
||||
}
|
||||
|
||||
public List<Runnable> setCommitted() {
|
||||
synchronized (_callbacks) {
|
||||
_wasCommitted = true;
|
||||
return Collections.unmodifiableList(_callbacks);
|
||||
}
|
||||
public int size() {
|
||||
return _size;
|
||||
}
|
||||
|
||||
private void putEntry(BundleEntry entry) {
|
||||
@@ -439,28 +411,7 @@ public class WritebackObjectPersistentStore {
|
||||
putEntry(new DeletedEntry(obj));
|
||||
}
|
||||
|
||||
public long size() {
|
||||
return _size;
|
||||
}
|
||||
|
||||
public void compress(TxBundle other) {
|
||||
if (_txId >= other._txId)
|
||||
throw new IllegalArgumentException("Compressing an older bundle into newer");
|
||||
|
||||
_txId = other._txId;
|
||||
|
||||
for (var entry : other._entries.values()) {
|
||||
putEntry(entry);
|
||||
}
|
||||
|
||||
synchronized (_callbacks) {
|
||||
assert !_wasCommitted;
|
||||
assert !other._wasCommitted;
|
||||
_callbacks.addAll(other._callbacks);
|
||||
}
|
||||
}
|
||||
|
||||
private interface BundleEntry {
|
||||
private sealed interface BundleEntry permits CommittedEntry, DeletedEntry {
|
||||
JObjectKey key();
|
||||
|
||||
int size();
|
||||
@@ -478,10 +429,4 @@ public class WritebackObjectPersistentStore {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public record VerboseReadResultPersisted(Optional<JDataVersionedWrapper> data) implements VerboseReadResult {
|
||||
}
|
||||
|
||||
public record VerboseReadResultPending(PendingWriteEntry pending) implements VerboseReadResult {
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,4 +43,9 @@ public class CurrentTransaction implements Transaction {
|
||||
public <T extends JData> void put(JData obj) {
|
||||
transactionManager.current().put(obj);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends JData> void putNew(JData obj) {
|
||||
transactionManager.current().putNew(obj);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,17 +12,17 @@ import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.enterprise.inject.Instance;
|
||||
import jakarta.enterprise.inject.spi.CDI;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@ApplicationScoped
|
||||
public class JObjectManager {
|
||||
private final List<PreCommitTxHook> _preCommitTxHooks;
|
||||
private static final List<PreCommitTxHook> _preCommitTxHooks;
|
||||
@Inject
|
||||
WritebackObjectPersistentStore writebackObjectPersistentStore;
|
||||
@Inject
|
||||
@@ -30,8 +30,12 @@ public class JObjectManager {
|
||||
@Inject
|
||||
LockManager lockManager;
|
||||
private boolean _ready = false;
|
||||
|
||||
static {
|
||||
_preCommitTxHooks = List.copyOf(CDI.current().select(PreCommitTxHook.class).stream().sorted(Comparator.comparingInt(PreCommitTxHook::getPriority)).toList());
|
||||
}
|
||||
|
||||
JObjectManager(Instance<PreCommitTxHook> preCommitTxHooks) {
|
||||
_preCommitTxHooks = List.copyOf(preCommitTxHooks.stream().sorted(Comparator.comparingInt(PreCommitTxHook::getPriority)).toList());
|
||||
Log.debugv("Pre-commit hooks: {0}", String.join("->", _preCommitTxHooks.stream().map(Objects::toString).toList()));
|
||||
}
|
||||
|
||||
@@ -54,97 +58,92 @@ public class JObjectManager {
|
||||
verifyReady();
|
||||
var writes = new HashMap<JObjectKey, TxRecord.TxObjectRecord<?>>();
|
||||
Snapshot<JObjectKey, JDataVersionedWrapper> commitSnapshot = null;
|
||||
Map<JObjectKey, TransactionObject<?>> readSet = null;
|
||||
Map<JObjectKey, Optional<JDataVersionedWrapper>> readSet = null;
|
||||
Collection<AutoCloseableNoThrow> toUnlock = null;
|
||||
|
||||
try {
|
||||
try {
|
||||
long pendingCount = 0;
|
||||
List<CommitHookIterationData> hookIterationData;
|
||||
{
|
||||
CommitHookIterationData[] hookIterationDataArray = new CommitHookIterationData[_preCommitTxHooks.size()];
|
||||
for (int i = 0; i < _preCommitTxHooks.size(); i++) {
|
||||
var hook = _preCommitTxHooks.get(i);
|
||||
hookIterationDataArray[i] = new CommitHookIterationData(hook, new HashMap<>(), new HashMap<>());
|
||||
}
|
||||
hookIterationData = List.of(hookIterationDataArray);
|
||||
long pendingCount = 0;
|
||||
List<CommitHookIterationData> hookIterationData;
|
||||
{
|
||||
CommitHookIterationData[] hookIterationDataArray = new CommitHookIterationData[_preCommitTxHooks.size()];
|
||||
for (int i = 0; i < _preCommitTxHooks.size(); i++) {
|
||||
var hook = _preCommitTxHooks.get(i);
|
||||
hookIterationDataArray[i] = new CommitHookIterationData(hook, new HashMap<>(), new HashMap<>());
|
||||
}
|
||||
hookIterationData = List.of(hookIterationDataArray);
|
||||
}
|
||||
|
||||
for (var n : tx.drainNewWrites()) {
|
||||
for (var hookPut : hookIterationData) {
|
||||
hookPut.pendingWrites().put(n.key(), n);
|
||||
pendingCount++;
|
||||
}
|
||||
writes.put(n.key(), n);
|
||||
for (var n : tx.drainNewWrites()) {
|
||||
var key = n.key();
|
||||
for (var hookPut : hookIterationData) {
|
||||
hookPut.pendingWrites().put(key, n);
|
||||
pendingCount++;
|
||||
}
|
||||
writes.put(key, n);
|
||||
}
|
||||
|
||||
// Run hooks for all objects
|
||||
// Every hook should see every change made to every object, yet the object's evolution
|
||||
// should be consistent from the view point of each individual hook
|
||||
// For example, when a hook makes changes to an object, and another hook changes the object before/after it
|
||||
// on the next iteration, the first hook should receive the version of the object it had created
|
||||
// as the "old" version, and the new version with all the changes after it.
|
||||
while (pendingCount > 0) {
|
||||
for (var hookId : hookIterationData) {
|
||||
var hook = hookId.hook();
|
||||
var lastCurHookSeen = hookId.lastWrites();
|
||||
Function<JObjectKey, JData> getPrev =
|
||||
key -> switch (lastCurHookSeen.get(key)) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> write.data();
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> null;
|
||||
case null -> tx.getFromSource(JData.class, key).orElse(null);
|
||||
default -> {
|
||||
throw new TxCommitException("Unexpected value: " + writes.get(key));
|
||||
}
|
||||
};
|
||||
|
||||
var curIteration = hookId.pendingWrites();
|
||||
// Run hooks for all objects
|
||||
// Every hook should see every change made to every object, yet the object's evolution
|
||||
// should be consistent from the view point of each individual hook
|
||||
// For example, when a hook makes changes to an object, and another hook changes the object before/after it
|
||||
// on the next iteration, the first hook should receive the version of the object it had created
|
||||
// as the "old" version, and the new version with all the changes after it.
|
||||
while (pendingCount > 0) {
|
||||
for (var hookId : hookIterationData) {
|
||||
var hook = hookId.hook();
|
||||
var lastCurHookSeen = hookId.lastWrites();
|
||||
Function<JObjectKey, JData> getPrev =
|
||||
key -> switch (lastCurHookSeen.get(key)) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> write.data();
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> null;
|
||||
case null -> tx.getFromSource(JData.class, key).orElse(null);
|
||||
default -> {
|
||||
throw new TxCommitException("Unexpected value: " + writes.get(key));
|
||||
}
|
||||
};
|
||||
|
||||
var curIteration = hookId.pendingWrites();
|
||||
|
||||
// Log.trace("Commit iteration with " + curIteration.size() + " records for hook " + hook.getClass());
|
||||
|
||||
for (var entry : curIteration.entrySet()) {
|
||||
for (var entry : curIteration.entrySet()) {
|
||||
var key = entry.getKey();
|
||||
// Log.trace("Running pre-commit hook " + hook.getClass() + " for" + entry.getKey());
|
||||
var oldObj = getPrev.apply(entry.getKey());
|
||||
lastCurHookSeen.put(entry.getKey(), entry.getValue());
|
||||
switch (entry.getValue()) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> {
|
||||
if (oldObj == null) {
|
||||
hook.onCreate(write.key(), write.data());
|
||||
} else {
|
||||
hook.onChange(write.key(), oldObj, write.data());
|
||||
}
|
||||
var oldObj = getPrev.apply(key);
|
||||
lastCurHookSeen.put(key, entry.getValue());
|
||||
switch (entry.getValue()) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> {
|
||||
if (oldObj == null) {
|
||||
hook.onCreate(key, write.data());
|
||||
} else {
|
||||
hook.onChange(key, oldObj, write.data());
|
||||
}
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> {
|
||||
hook.onDelete(deleted.key(), oldObj);
|
||||
}
|
||||
default -> throw new TxCommitException("Unexpected value: " + entry);
|
||||
}
|
||||
}
|
||||
|
||||
pendingCount -= curIteration.size();
|
||||
curIteration.clear();
|
||||
|
||||
for (var n : tx.drainNewWrites()) {
|
||||
for (var hookPut : hookIterationData) {
|
||||
if (hookPut == hookId) {
|
||||
lastCurHookSeen.put(n.key(), n);
|
||||
continue;
|
||||
}
|
||||
var before = hookPut.pendingWrites().put(n.key(), n);
|
||||
if (before == null)
|
||||
pendingCount++;
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> {
|
||||
hook.onDelete(key, oldObj);
|
||||
}
|
||||
writes.put(n.key(), n);
|
||||
default -> throw new TxCommitException("Unexpected value: " + entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
for (var read : tx.reads().entrySet()) {
|
||||
if (read.getValue() instanceof TransactionObjectLocked<?> locked) {
|
||||
locked.lock().close();
|
||||
|
||||
pendingCount -= curIteration.size();
|
||||
curIteration.clear();
|
||||
|
||||
for (var n : tx.drainNewWrites()) {
|
||||
var key = n.key();
|
||||
for (var hookPut : hookIterationData) {
|
||||
if (hookPut == hookId) {
|
||||
lastCurHookSeen.put(key, n);
|
||||
continue;
|
||||
}
|
||||
var before = hookPut.pendingWrites().put(key, n);
|
||||
if (before == null)
|
||||
pendingCount++;
|
||||
}
|
||||
writes.put(key, n);
|
||||
}
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
|
||||
readSet = tx.reads();
|
||||
@@ -153,17 +152,16 @@ public class JObjectManager {
|
||||
toUnlock = new ArrayList<>(readSet.size() + writes.size());
|
||||
ArrayList<JObjectKey> toLock = new ArrayList<>(readSet.size() + writes.size());
|
||||
for (var read : readSet.entrySet()) {
|
||||
if (read.getValue() instanceof TransactionObjectLocked<?> locked) {
|
||||
toUnlock.add(locked.lock());
|
||||
} else {
|
||||
toLock.add(read.getKey());
|
||||
}
|
||||
toLock.add(read.getKey());
|
||||
}
|
||||
for (var write : writes.entrySet()) {
|
||||
toLock.add(write.getKey());
|
||||
for (var write : writes.keySet()) {
|
||||
if (!readSet.containsKey(write))
|
||||
toLock.add(write);
|
||||
}
|
||||
Collections.sort(toLock);
|
||||
toLock.sort(null);
|
||||
for (var key : toLock) {
|
||||
if (tx.knownNew().contains(key))
|
||||
continue;
|
||||
var lock = lockManager.lockObject(key);
|
||||
toUnlock.add(lock);
|
||||
}
|
||||
@@ -175,10 +173,7 @@ public class JObjectManager {
|
||||
long version = 0L;
|
||||
|
||||
for (var read : readSet.values()) {
|
||||
version = Math.max(version, read.data().map(JDataVersionedWrapper::version).orElse(0L));
|
||||
if (read instanceof TransactionObjectLocked<?> locked) {
|
||||
locked.lock().close();
|
||||
}
|
||||
version = Math.max(version, read.map(JDataVersionedWrapper::version).orElse(0L));
|
||||
}
|
||||
|
||||
long finalVersion = version;
|
||||
@@ -186,14 +181,16 @@ public class JObjectManager {
|
||||
writebackObjectPersistentStore.asyncFence(finalVersion, r);
|
||||
};
|
||||
|
||||
var onCommit = tx.getOnCommit();
|
||||
var onFlush = tx.getOnFlush();
|
||||
|
||||
return Pair.of(
|
||||
Stream.concat(
|
||||
tx.getOnCommit().stream(),
|
||||
Stream.<Runnable>of(() -> {
|
||||
for (var f : tx.getOnFlush())
|
||||
fenceFn.accept(f);
|
||||
})
|
||||
).toList(),
|
||||
List.of(() -> {
|
||||
for (var f : onCommit)
|
||||
f.run();
|
||||
for (var f : onFlush)
|
||||
fenceFn.accept(f);
|
||||
}),
|
||||
new TransactionHandle() {
|
||||
@Override
|
||||
public void onFlush(Runnable runnable) {
|
||||
@@ -209,13 +206,13 @@ public class JObjectManager {
|
||||
for (var read : readSet.entrySet()) {
|
||||
var current = commitSnapshot.readObject(read.getKey());
|
||||
|
||||
if (current.isEmpty() != read.getValue().data().isEmpty()) {
|
||||
if (current.isEmpty() != read.getValue().isEmpty()) {
|
||||
Log.tracev("Checking read dependency {0} - not found", read.getKey());
|
||||
throw new TxCommitException("Serialization hazard: " + current.isEmpty() + " vs " + read.getValue().data().isEmpty());
|
||||
throw new TxCommitException("Serialization hazard: " + current.isEmpty() + " vs " + read.getValue().isEmpty());
|
||||
}
|
||||
|
||||
if (current.isEmpty()) {
|
||||
// TODO: Every write gets a dependency due to hooks
|
||||
// Every write gets a dependency due to hooks
|
||||
continue;
|
||||
// assert false;
|
||||
// throw new TxCommitException("Serialization hazard: " + dep.isEmpty() + " vs " + read.getValue().value().isEmpty());
|
||||
@@ -240,7 +237,7 @@ public class JObjectManager {
|
||||
}
|
||||
|
||||
return Pair.of(
|
||||
List.copyOf(tx.getOnCommit()),
|
||||
tx.getOnCommit(),
|
||||
new TransactionHandle() {
|
||||
@Override
|
||||
public void onFlush(Runnable runnable) {
|
||||
@@ -263,11 +260,6 @@ public class JObjectManager {
|
||||
|
||||
public void rollback(TransactionPrivate tx) {
|
||||
verifyReady();
|
||||
tx.reads().forEach((key, value) -> {
|
||||
if (value instanceof TransactionObjectLocked<?> locked) {
|
||||
locked.lock().close();
|
||||
}
|
||||
});
|
||||
tx.close();
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ public interface Transaction extends TransactionHandle {
|
||||
<T extends JData> Optional<T> get(Class<T> type, JObjectKey key, LockingStrategy strategy);
|
||||
|
||||
<T extends JData> void put(JData obj);
|
||||
<T extends JData> void putNew(JData obj);
|
||||
|
||||
void delete(JObjectKey key);
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.*;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import com.usatiuk.objects.stores.WritebackObjectPersistentStore;
|
||||
import com.usatiuk.utils.ListUtils;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Singleton;
|
||||
@@ -55,12 +56,15 @@ public class TransactionFactoryImpl implements TransactionFactory {
|
||||
}
|
||||
|
||||
private class TransactionImpl implements TransactionPrivate {
|
||||
private final Map<JObjectKey, TransactionObject<?>> _readSet = new HashMap<>();
|
||||
private final Map<JObjectKey, Optional<JDataVersionedWrapper>> _readSet = new HashMap<>();
|
||||
private final NavigableMap<JObjectKey, TxRecord.TxObjectRecord<?>> _writes = new TreeMap<>();
|
||||
private final List<Runnable> _onCommit = new ArrayList<>();
|
||||
private final List<Runnable> _onFlush = new ArrayList<>();
|
||||
private final List<Runnable> _onCommit = new LinkedList<>();
|
||||
private final List<Runnable> _onFlush = new LinkedList<>();
|
||||
private final HashSet<JObjectKey> _knownNew = new HashSet<>();
|
||||
private final Snapshot<JObjectKey, JDataVersionedWrapper> _snapshot;
|
||||
private boolean _closed = false;
|
||||
|
||||
private boolean _writeTrack = false;
|
||||
private Map<JObjectKey, TxRecord.TxObjectRecord<?>> _newWrites = new HashMap<>();
|
||||
|
||||
private TransactionImpl() {
|
||||
@@ -94,103 +98,101 @@ public class TransactionFactoryImpl implements TransactionFactory {
|
||||
|
||||
@Override
|
||||
public <T extends JData> Optional<T> getFromSource(Class<T> type, JObjectKey key) {
|
||||
return _readSet.computeIfAbsent(key, k -> {
|
||||
var read = _snapshot.readObject(k);
|
||||
return new TransactionObjectNoLock<>(read);
|
||||
})
|
||||
.data()
|
||||
.map(w -> type.cast(w.data()));
|
||||
}
|
||||
|
||||
public <T extends JData> Optional<T> getWriteLockedFromSource(Class<T> type, JObjectKey key) {
|
||||
var got = _readSet.get(key);
|
||||
|
||||
if (got == null) {
|
||||
var lock = lockManager.lockObject(key);
|
||||
try {
|
||||
var read = _snapshot.readObject(key);
|
||||
_readSet.put(key, new TransactionObjectLocked<>(read, lock));
|
||||
return read.map(JDataVersionedWrapper::data).map(type::cast);
|
||||
} catch (Exception e) {
|
||||
lock.close();
|
||||
throw e;
|
||||
}
|
||||
if (_knownNew.contains(key)) {
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
return got.data().map(JDataVersionedWrapper::data).map(type::cast);
|
||||
return _readSet.computeIfAbsent(key, _snapshot::readObject)
|
||||
.map(JDataVersionedWrapper::data)
|
||||
.map(type::cast);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends JData> Optional<T> get(Class<T> type, JObjectKey key, LockingStrategy strategy) {
|
||||
switch (_writes.get(key)) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> {
|
||||
return Optional.of(type.cast(write.data()));
|
||||
}
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> {
|
||||
return Optional.empty();
|
||||
}
|
||||
case null, default -> {
|
||||
}
|
||||
}
|
||||
|
||||
if (neverLock)
|
||||
return getFromSource(type, key);
|
||||
|
||||
return switch (strategy) {
|
||||
case OPTIMISTIC -> getFromSource(type, key);
|
||||
case WRITE -> getWriteLockedFromSource(type, key);
|
||||
return switch (_writes.get(key)) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write -> Optional.of(type.cast(write.data()));
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> Optional.empty();
|
||||
case null -> getFromSource(type, key);
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public void delete(JObjectKey key) {
|
||||
var got = _writes.get(key);
|
||||
if (got != null) {
|
||||
if (got instanceof TxRecord.TxObjectRecordDeleted) {
|
||||
return;
|
||||
}
|
||||
var record = new TxRecord.TxObjectRecordDeleted(key);
|
||||
if (_writes.put(key, record) instanceof TxRecord.TxObjectRecordDeleted) {
|
||||
return;
|
||||
}
|
||||
|
||||
_writes.put(key, new TxRecord.TxObjectRecordDeleted(key));
|
||||
_newWrites.put(key, new TxRecord.TxObjectRecordDeleted(key));
|
||||
if (_writeTrack)
|
||||
_newWrites.put(key, record);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, JData> getIterator(IteratorStart start, JObjectKey key) {
|
||||
Log.tracev("Getting tx iterator with start={0}, key={1}", start, key);
|
||||
return new ReadTrackingIterator(TombstoneMergingKvIterator.<JObjectKey, ReadTrackingInternalCrap>of("tx", start, key,
|
||||
(tS, tK) -> new MappingKvIterator<>(new NavigableMapKvIterator<>(_writes, tS, tK),
|
||||
t -> switch (t) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write ->
|
||||
new DataWrapper<>(new ReadTrackingInternalCrapTx(write.data()));
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> new TombstoneImpl<>();
|
||||
case null, default -> null;
|
||||
}),
|
||||
(tS, tK) -> new MappingKvIterator<>(_snapshot.getIterator(tS, tK),
|
||||
d -> new DataWrapper<ReadTrackingInternalCrap>(new ReadTrackingInternalCrapSource(d)))));
|
||||
return new ReadTrackingIterator(new TombstoneSkippingIterator<JObjectKey, ReadTrackingInternalCrap>(start, key,
|
||||
ListUtils.prependAndMap(
|
||||
new MappingKvIterator<>(new NavigableMapKvIterator<>(_writes, start, key),
|
||||
t -> switch (t) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write ->
|
||||
new DataWrapper<ReadTrackingInternalCrap>(new ReadTrackingInternalCrapTx(write.data()));
|
||||
case TxRecord.TxObjectRecordDeleted deleted ->
|
||||
new TombstoneImpl<ReadTrackingInternalCrap>();
|
||||
case null, default -> null;
|
||||
}),
|
||||
_snapshot.getIterator(start, key),
|
||||
itin -> new MappingKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>, MaybeTombstone<ReadTrackingInternalCrap>>(itin,
|
||||
d -> switch (d) {
|
||||
case Data<JDataVersionedWrapper> w ->
|
||||
new DataWrapper<>(new ReadTrackingInternalCrapSource(w.value()));
|
||||
case Tombstone<JDataVersionedWrapper> t -> new TombstoneImpl<>();
|
||||
case null, default -> null;
|
||||
}))));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void put(JData obj) {
|
||||
var read = _readSet.get(obj.key());
|
||||
if (read != null && (read.data().map(JDataVersionedWrapper::data).orElse(null) == obj)) {
|
||||
var key = obj.key();
|
||||
var read = _readSet.get(key);
|
||||
if (read != null && (read.map(JDataVersionedWrapper::data).orElse(null) == obj)) {
|
||||
return;
|
||||
}
|
||||
|
||||
_writes.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj));
|
||||
_newWrites.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj));
|
||||
var record = new TxRecord.TxObjectRecordWrite<>(obj);
|
||||
_writes.put(key, record);
|
||||
if (_writeTrack)
|
||||
_newWrites.put(key, record);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void putNew(JData obj) {
|
||||
var key = obj.key();
|
||||
_knownNew.add(key);
|
||||
|
||||
var record = new TxRecord.TxObjectRecordWrite<>(obj);
|
||||
_writes.put(key, record);
|
||||
if (_writeTrack)
|
||||
_newWrites.put(key, record);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<TxRecord.TxObjectRecord<?>> drainNewWrites() {
|
||||
if (!_writeTrack) {
|
||||
_writeTrack = true;
|
||||
return Collections.unmodifiableCollection(_writes.values());
|
||||
}
|
||||
var ret = _newWrites;
|
||||
_newWrites = new HashMap<>();
|
||||
return ret.values();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<JObjectKey, TransactionObject<?>> reads() {
|
||||
return Collections.unmodifiableMap(_readSet);
|
||||
public Map<JObjectKey, Optional<JDataVersionedWrapper>> reads() {
|
||||
return _readSet;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<JObjectKey> knownNew() {
|
||||
return _knownNew;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -226,7 +228,7 @@ public class TransactionFactoryImpl implements TransactionFactory {
|
||||
public Pair<JObjectKey, JData> prev() {
|
||||
var got = _backing.prev();
|
||||
if (got.getValue() instanceof ReadTrackingInternalCrapSource(JDataVersionedWrapper wrapped)) {
|
||||
_readSet.putIfAbsent(got.getKey(), new TransactionObjectNoLock<>(Optional.of(wrapped)));
|
||||
_readSet.putIfAbsent(got.getKey(), Optional.of(wrapped));
|
||||
}
|
||||
return Pair.of(got.getKey(), got.getValue().obj());
|
||||
}
|
||||
@@ -255,7 +257,7 @@ public class TransactionFactoryImpl implements TransactionFactory {
|
||||
public Pair<JObjectKey, JData> next() {
|
||||
var got = _backing.next();
|
||||
if (got.getValue() instanceof ReadTrackingInternalCrapSource(JDataVersionedWrapper wrapped)) {
|
||||
_readSet.putIfAbsent(got.getKey(), new TransactionObjectNoLock<>(Optional.of(wrapped)));
|
||||
_readSet.putIfAbsent(got.getKey(), Optional.of(wrapped));
|
||||
}
|
||||
return Pair.of(got.getKey(), got.getValue().obj());
|
||||
}
|
||||
|
||||
@@ -12,8 +12,8 @@ public interface TransactionManager {
|
||||
|
||||
void rollback();
|
||||
|
||||
default <T> T runTries(Supplier<T> supplier, int tries) {
|
||||
if (current() != null) {
|
||||
default <T> T runTries(Supplier<T> supplier, int tries, boolean nest) {
|
||||
if (!nest && current() != null) {
|
||||
return supplier.get();
|
||||
}
|
||||
|
||||
@@ -41,8 +41,8 @@ public interface TransactionManager {
|
||||
}
|
||||
}
|
||||
|
||||
default TransactionHandle runTries(VoidFn fn, int tries) {
|
||||
if (current() != null) {
|
||||
default TransactionHandle runTries(VoidFn fn, int tries, boolean nest) {
|
||||
if (!nest && current() != null) {
|
||||
fn.apply();
|
||||
return new TransactionHandle() {
|
||||
@Override
|
||||
@@ -74,23 +74,38 @@ public interface TransactionManager {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
default <T> T runTries(Supplier<T> supplier, int tries) {
|
||||
return runTries(supplier, tries, false);
|
||||
}
|
||||
|
||||
default TransactionHandle runTries(VoidFn fn, int tries) {
|
||||
return runTries(fn, tries, false);
|
||||
}
|
||||
|
||||
default TransactionHandle run(VoidFn fn, boolean nest) {
|
||||
return runTries(fn, 10, nest);
|
||||
}
|
||||
|
||||
default <T> T run(Supplier<T> supplier, boolean nest) {
|
||||
return runTries(supplier, 10, nest);
|
||||
}
|
||||
|
||||
default TransactionHandle run(VoidFn fn) {
|
||||
return runTries(fn, 10);
|
||||
return run(fn, false);
|
||||
}
|
||||
|
||||
default <T> T run(Supplier<T> supplier) {
|
||||
return runTries(supplier, 10);
|
||||
return run(supplier, false);
|
||||
}
|
||||
|
||||
default void executeTx(VoidFn fn) {
|
||||
run(fn);
|
||||
run(fn, false);
|
||||
}
|
||||
|
||||
default <T> T executeTx(Supplier<T> supplier) {
|
||||
return run(supplier);
|
||||
return run(supplier, false);
|
||||
}
|
||||
|
||||
Transaction current();
|
||||
|
||||
@@ -1,46 +1,48 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.annotation.Nullable;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Singleton;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Stack;
|
||||
|
||||
@Singleton
|
||||
public class TransactionManagerImpl implements TransactionManager {
|
||||
private static final ThreadLocal<TransactionPrivate> _currentTransaction = new ThreadLocal<>();
|
||||
private static final ThreadLocal<Stack<TransactionPrivate>> _currentTransaction = ThreadLocal.withInitial(Stack::new);
|
||||
@Inject
|
||||
JObjectManager jObjectManager;
|
||||
|
||||
@Override
|
||||
public void begin() {
|
||||
if (_currentTransaction.get() != null) {
|
||||
throw new IllegalStateException("Transaction already started");
|
||||
}
|
||||
|
||||
Log.trace("Starting transaction");
|
||||
var tx = jObjectManager.createTransaction();
|
||||
_currentTransaction.set(tx);
|
||||
_currentTransaction.get().push(tx);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TransactionHandle commit() {
|
||||
if (_currentTransaction.get() == null) {
|
||||
var stack = _currentTransaction.get();
|
||||
if (stack.empty()) {
|
||||
throw new IllegalStateException("No transaction started");
|
||||
}
|
||||
var peeked = stack.peek();
|
||||
|
||||
Log.trace("Committing transaction");
|
||||
|
||||
Pair<Collection<Runnable>, TransactionHandle> ret;
|
||||
try {
|
||||
ret = jObjectManager.commit(_currentTransaction.get());
|
||||
ret = jObjectManager.commit(peeked);
|
||||
} catch (Throwable e) {
|
||||
Log.trace("Transaction commit failed", e);
|
||||
throw e;
|
||||
} finally {
|
||||
_currentTransaction.get().close();
|
||||
_currentTransaction.remove();
|
||||
peeked.close();
|
||||
stack.pop();
|
||||
if (stack.empty())
|
||||
_currentTransaction.remove();
|
||||
}
|
||||
|
||||
for (var r : ret.getLeft()) {
|
||||
@@ -55,24 +57,33 @@ public class TransactionManagerImpl implements TransactionManager {
|
||||
|
||||
@Override
|
||||
public void rollback() {
|
||||
if (_currentTransaction.get() == null) {
|
||||
var stack = _currentTransaction.get();
|
||||
if (stack.empty()) {
|
||||
throw new IllegalStateException("No transaction started");
|
||||
}
|
||||
var peeked = stack.peek();
|
||||
|
||||
try {
|
||||
jObjectManager.rollback(_currentTransaction.get());
|
||||
jObjectManager.rollback(peeked);
|
||||
} catch (Throwable e) {
|
||||
Log.error("Transaction rollback failed", e);
|
||||
throw e;
|
||||
} finally {
|
||||
_currentTransaction.get().close();
|
||||
_currentTransaction.remove();
|
||||
peeked.close();
|
||||
stack.pop();
|
||||
if (stack.empty())
|
||||
_currentTransaction.remove();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@Nullable
|
||||
public Transaction current() {
|
||||
return _currentTransaction.get();
|
||||
var stack = _currentTransaction.get();
|
||||
if (stack.empty()) {
|
||||
return null;
|
||||
}
|
||||
return stack.peek();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
|
||||
import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
public interface TransactionObject<T extends JData> {
|
||||
Optional<JDataVersionedWrapper> data();
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
|
||||
import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.utils.AutoCloseableNoThrow;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
public record TransactionObjectLocked<T extends JData>
|
||||
(Optional<JDataVersionedWrapper> data, AutoCloseableNoThrow lock)
|
||||
implements TransactionObject<T> {
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
|
||||
import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
public record TransactionObjectNoLock<T extends JData>
|
||||
(Optional<JDataVersionedWrapper> data)
|
||||
implements TransactionObject<T> {
|
||||
}
|
||||
@@ -9,12 +9,15 @@ import com.usatiuk.utils.AutoCloseableNoThrow;
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
|
||||
// The transaction interface actually used by user code to retrieve objects
|
||||
public interface TransactionPrivate extends Transaction, TransactionHandlePrivate, AutoCloseableNoThrow {
|
||||
Collection<TxRecord.TxObjectRecord<?>> drainNewWrites();
|
||||
|
||||
Map<JObjectKey, TransactionObject<?>> reads();
|
||||
Map<JObjectKey, Optional<JDataVersionedWrapper>> reads();
|
||||
|
||||
Set<JObjectKey> knownNew();
|
||||
|
||||
<T extends JData> Optional<T> getFromSource(Class<T> type, JObjectKey key);
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
|
||||
public class TxRecord {
|
||||
public interface TxObjectRecord<T> {
|
||||
public sealed interface TxObjectRecord<T> permits TxObjectRecordWrite, TxObjectRecordDeleted {
|
||||
JObjectKey key();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
dhfs.objects.persistence=lmdb
|
||||
dhfs.objects.writeback.limit=134217728
|
||||
dhfs.objects.lru.limit=134217728
|
||||
dhfs.objects.writeback.limit=16777216
|
||||
dhfs.objects.lru.limit=67108864
|
||||
dhfs.objects.lru.print-stats=false
|
||||
dhfs.objects.lock_timeout_secs=15
|
||||
dhfs.objects.persistence.files.root=${HOME}/dhfs_default/data/objs
|
||||
dhfs.objects.persistence.snapshot-extra-checks=false
|
||||
dhfs.objects.transaction.never-lock=true
|
||||
dhfs.objects.last-seen.update=60
|
||||
dhfs.objects.last-seen.timeout=43200
|
||||
quarkus.log.category."com.usatiuk.objects.iterators".level=INFO
|
||||
quarkus.log.category."com.usatiuk.objects.iterators".min-level=INFO
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
import jnr.ffi.annotations.In;
|
||||
import net.jqwik.api.*;
|
||||
import net.jqwik.api.state.Action;
|
||||
import net.jqwik.api.state.ActionChain;
|
||||
@@ -9,6 +10,7 @@ import org.junit.jupiter.api.Assertions;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.BiConsumer;
|
||||
|
||||
public class MergingKvIteratorPbtTest {
|
||||
@Property
|
||||
@@ -54,8 +56,8 @@ public class MergingKvIteratorPbtTest {
|
||||
}
|
||||
}
|
||||
mergedIterator = new NavigableMapKvIterator<>(perfectMerged, startType, startKey);
|
||||
mergingIterator = new MergingKvIterator<>("test", startType, startKey, pairs.stream().<IterProdFn<Integer, Integer>>map(
|
||||
list -> (IteratorStart start, Integer key) -> new NavigableMapKvIterator<>(new TreeMap<Integer, Integer>(Map.ofEntries(list.toArray(Map.Entry[]::new))), start, key)
|
||||
mergingIterator = new MergingKvIterator<>(startType, startKey, pairs.stream().<CloseableKvIterator<Integer, Integer>>map(
|
||||
list -> new NavigableMapKvIterator<>(new TreeMap<Integer, Integer>(Map.ofEntries(list.toArray(Map.Entry[]::new))), startType, startKey)
|
||||
).toList());
|
||||
}
|
||||
|
||||
|
||||
@@ -1,348 +0,0 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
import com.usatiuk.objects.Just;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.pcollections.TreePMap;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
public class MergingKvIteratorTest {
|
||||
|
||||
@Test
|
||||
public void testTestIterator() {
|
||||
var list = List.of(Pair.of(1, 2), Pair.of(3, 4), Pair.of(5, 6));
|
||||
var iterator = new SimpleIteratorWrapper<>(list.iterator());
|
||||
var realIterator = list.iterator();
|
||||
while (realIterator.hasNext()) {
|
||||
Assertions.assertTrue(iterator.hasNext());
|
||||
Assertions.assertEquals(realIterator.next(), iterator.next());
|
||||
}
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
|
||||
var emptyList = List.<Pair<Integer, Integer>>of();
|
||||
var emptyIterator = new SimpleIteratorWrapper<>(emptyList.iterator());
|
||||
Assertions.assertFalse(emptyIterator.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSimple() {
|
||||
var source1 = List.of(Pair.of(1, 2), Pair.of(3, 4), Pair.of(5, 6)).iterator();
|
||||
var source2 = List.of(Pair.of(2, 3), Pair.of(4, 5), Pair.of(6, 7)).iterator();
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source1), (a, b) -> new SimpleIteratorWrapper<>(source2));
|
||||
var expected = List.of(Pair.of(1, 2), Pair.of(2, 3), Pair.of(3, 4), Pair.of(4, 5), Pair.of(5, 6), Pair.of(6, 7));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPriority() {
|
||||
var source1 = List.of(Pair.of(1, 2), Pair.of(2, 4), Pair.of(5, 6));
|
||||
var source2 = List.of(Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 7));
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source1.iterator()), (a, b) -> new SimpleIteratorWrapper<>(source2.iterator()));
|
||||
var expected = List.of(Pair.of(1, 2), Pair.of(2, 4), Pair.of(5, 6));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
|
||||
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source2.iterator()), (a, b) -> new SimpleIteratorWrapper<>(source1.iterator()));
|
||||
var expected2 = List.of(Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 7));
|
||||
for (var pair : expected2) {
|
||||
Assertions.assertTrue(mergingIterator2.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator2.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator2.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPriority2() {
|
||||
var source1 = List.of(Pair.of(2, 4), Pair.of(5, 6));
|
||||
var source2 = List.of(Pair.of(1, 3), Pair.of(2, 5));
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source1.iterator()), (a, b) -> new SimpleIteratorWrapper<>(source2.iterator()));
|
||||
var expected = List.of(Pair.of(1, 3), Pair.of(2, 4), Pair.of(5, 6));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
|
||||
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.GE, 0, (a, b) -> new SimpleIteratorWrapper<>(source2.iterator()), (a, b) -> new SimpleIteratorWrapper<>(source1.iterator()));
|
||||
var expected2 = List.of(Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 6));
|
||||
for (var pair : expected2) {
|
||||
Assertions.assertTrue(mergingIterator2.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator2.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator2.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPriorityLe() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(2, 4).plus(5, 6);
|
||||
var source2 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(2, 5);
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
|
||||
var expected = List.of(Pair.of(5, 6));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
Just.checkIterator(mergingIterator.reversed(), Pair.of(5, 6), Pair.of(2, 4), Pair.of(1, 3));
|
||||
Assertions.assertFalse(mergingIterator.reversed().hasNext());
|
||||
Just.checkIterator(mergingIterator, Pair.of(1, 3), Pair.of(2, 4), Pair.of(5, 6));
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
|
||||
|
||||
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK));
|
||||
var expected2 = List.of(Pair.of(5, 6));
|
||||
for (var pair : expected2) {
|
||||
Assertions.assertTrue(mergingIterator2.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator2.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator2.hasNext());
|
||||
Just.checkIterator(mergingIterator2.reversed(), Pair.of(5, 6), Pair.of(2, 5), Pair.of(1, 3));
|
||||
Assertions.assertFalse(mergingIterator2.reversed().hasNext());
|
||||
Just.checkIterator(mergingIterator2, Pair.of(1, 3), Pair.of(2, 5), Pair.of(5, 6));
|
||||
Assertions.assertFalse(mergingIterator2.hasNext());
|
||||
|
||||
var mergingIterator3 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
|
||||
Assertions.assertEquals(5, mergingIterator3.peekNextKey());
|
||||
Assertions.assertEquals(2, mergingIterator3.peekPrevKey());
|
||||
Assertions.assertEquals(5, mergingIterator3.peekNextKey());
|
||||
Assertions.assertEquals(2, mergingIterator3.peekPrevKey());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPriorityLe2() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(2, 4).plus(5, 6);
|
||||
var source2 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(2, 5).plus(3, 4);
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
|
||||
var expected = List.of(Pair.of(5, 6));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPriorityLe3() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(2, 4).plus(5, 6);
|
||||
var source2 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(2, 5).plus(6, 8);
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
|
||||
var expected = List.of(Pair.of(5, 6), Pair.of(6, 8));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
Just.checkIterator(mergingIterator.reversed(), Pair.of(6, 8), Pair.of(5, 6), Pair.of(2, 4), Pair.of(1, 3));
|
||||
Assertions.assertFalse(mergingIterator.reversed().hasNext());
|
||||
Just.checkIterator(mergingIterator, Pair.of(1, 3), Pair.of(2, 4), Pair.of(5, 6), Pair.of(6, 8));
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
|
||||
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK));
|
||||
var expected2 = List.of(Pair.of(5, 6), Pair.of(6, 8));
|
||||
for (var pair : expected2) {
|
||||
Assertions.assertTrue(mergingIterator2.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator2.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator2.hasNext());
|
||||
|
||||
var mergingIterator3 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
|
||||
Assertions.assertEquals(5, mergingIterator3.peekNextKey());
|
||||
Assertions.assertEquals(2, mergingIterator3.peekPrevKey());
|
||||
Assertions.assertEquals(5, mergingIterator3.peekNextKey());
|
||||
Assertions.assertEquals(2, mergingIterator3.peekPrevKey());
|
||||
Assertions.assertTrue(mergingIterator3.hasPrev());
|
||||
Assertions.assertTrue(mergingIterator3.hasNext());
|
||||
Assertions.assertEquals(5, mergingIterator3.peekNextKey());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPriorityLe4() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(6, 7);
|
||||
var source2 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(2, 5).plus(3, 4);
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
|
||||
var expected = List.of(Pair.of(3, 4), Pair.of(6, 7));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
|
||||
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK));
|
||||
var expected2 = List.of(Pair.of(3, 4), Pair.of(6, 7));
|
||||
for (var pair : expected2) {
|
||||
Assertions.assertTrue(mergingIterator2.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator2.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator2.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPriorityLe5() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 2).plus(6, 7);
|
||||
var source2 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(2, 5).plus(3, 4);
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
|
||||
var expected = List.of(Pair.of(3, 4), Pair.of(6, 7));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
|
||||
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK));
|
||||
var expected2 = List.of(Pair.of(3, 4), Pair.of(6, 7));
|
||||
for (var pair : expected2) {
|
||||
Assertions.assertTrue(mergingIterator2.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator2.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator2.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPriorityLe6() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(2, 5).plus(3, 4);
|
||||
var source2 = TreePMap.<Integer, Integer>empty().plus(4, 6);
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
|
||||
var expected = List.of(Pair.of(4, 6));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
|
||||
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK));
|
||||
var expected2 = List.of(Pair.of(4, 6));
|
||||
for (var pair : expected2) {
|
||||
Assertions.assertTrue(mergingIterator2.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator2.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator2.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPriorityLe7() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(3, 5).plus(4, 6);
|
||||
var source2 = TreePMap.<Integer, Integer>empty().plus(1, 4).plus(3, 5).plus(4, 6);
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LE, 2, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
|
||||
var expected = List.of(Pair.of(1, 3), Pair.of(3, 5), Pair.of(4, 6));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
Just.checkIterator(mergingIterator.reversed(), Pair.of(4, 6), Pair.of(3, 5), Pair.of(1, 3));
|
||||
Just.checkIterator(mergingIterator, Pair.of(1, 3), Pair.of(3, 5), Pair.of(4, 6));
|
||||
|
||||
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LE, 2, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK));
|
||||
var expected2 = List.of(Pair.of(1, 4), Pair.of(3, 5), Pair.of(4, 6));
|
||||
for (var pair : expected2) {
|
||||
Assertions.assertTrue(mergingIterator2.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator2.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator2.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPriorityLt() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(2, 4).plus(5, 6);
|
||||
var source2 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(2, 5);
|
||||
var mergingIterator = new MergingKvIterator<>("test", IteratorStart.LT, 5, (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK));
|
||||
var expected = List.of(Pair.of(2, 4), Pair.of(5, 6));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(mergingIterator.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator.hasNext());
|
||||
|
||||
var mergingIterator2 = new MergingKvIterator<>("test", IteratorStart.LT, 5, (mS, mK) -> new NavigableMapKvIterator<>(source2, mS, mK), (mS, mK) -> new NavigableMapKvIterator<>(source1, mS, mK));
|
||||
var expected2 = List.of(Pair.of(2, 5), Pair.of(5, 6));
|
||||
for (var pair : expected2) {
|
||||
Assertions.assertTrue(mergingIterator2.hasNext());
|
||||
Assertions.assertEquals(pair, mergingIterator2.next());
|
||||
}
|
||||
Assertions.assertFalse(mergingIterator2.hasNext());
|
||||
}
|
||||
|
||||
private class SimpleIteratorWrapper<K extends Comparable<K>, V> implements CloseableKvIterator<K, V> {
|
||||
private final Iterator<Pair<K, V>> _iterator;
|
||||
private Pair<K, V> _next;
|
||||
|
||||
public SimpleIteratorWrapper(Iterator<Pair<K, V>> iterator) {
|
||||
_iterator = iterator;
|
||||
fillNext();
|
||||
}
|
||||
|
||||
private void fillNext() {
|
||||
while (_iterator.hasNext() && _next == null) {
|
||||
_next = _iterator.next();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public K peekNextKey() {
|
||||
if (_next == null) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
return _next.getKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skip() {
|
||||
if (_next == null) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
_next = null;
|
||||
fillNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public K peekPrevKey() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<K, V> prev() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPrev() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skipPrev() {
|
||||
throw new UnsupportedOperationException();
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return _next != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<K, V> next() {
|
||||
if (_next == null) {
|
||||
throw new NoSuchElementException("No more elements");
|
||||
}
|
||||
var ret = _next;
|
||||
_next = null;
|
||||
fillNext();
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,161 +0,0 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
import com.usatiuk.objects.Just;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.pcollections.TreePMap;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class PredicateKvIteratorTest {
|
||||
|
||||
@Test
|
||||
public void simpleTest() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(3, 5).plus(4, 6);
|
||||
var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.GT, 1),
|
||||
IteratorStart.GE, 1, v -> (v % 2 == 0) ? v : null);
|
||||
var expected = List.of(Pair.of(4, 6));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(pit.hasNext());
|
||||
Assertions.assertEquals(pair, pit.next());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void ltTest() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(3, 5).plus(4, 6);
|
||||
var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4),
|
||||
IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null);
|
||||
var expected = List.of(Pair.of(4, 6));
|
||||
for (var pair : expected) {
|
||||
Assertions.assertTrue(pit.hasNext());
|
||||
Assertions.assertEquals(pair, pit.next());
|
||||
}
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void ltTest2() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(3, 5).plus(4, 6);
|
||||
var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 1),
|
||||
IteratorStart.LT, 1, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(4, 6));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 2),
|
||||
IteratorStart.LT, 2, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(4, 6));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4),
|
||||
IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(4, 6));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LE, 4),
|
||||
IteratorStart.LE, 4, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(4, 6));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void ltTest3() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(3, 5).plus(4, 6).plus(5, 7).plus(6, 8);
|
||||
var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4),
|
||||
IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(4, 6), Pair.of(6, 8));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5),
|
||||
IteratorStart.LT, 5, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(4, 6), Pair.of(6, 8));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6),
|
||||
IteratorStart.LT, 6, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(4, 6), Pair.of(6, 8));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 7),
|
||||
IteratorStart.LT, 7, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(6, 8));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 8),
|
||||
IteratorStart.LT, 8, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(6, 8));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LE, 6),
|
||||
IteratorStart.LE, 6, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(6, 8));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6),
|
||||
IteratorStart.LT, 6, v -> (v % 2 == 0) ? v : null);
|
||||
Assertions.assertTrue(pit.hasNext());
|
||||
Assertions.assertEquals(4, pit.peekNextKey());
|
||||
Assertions.assertFalse(pit.hasPrev());
|
||||
Assertions.assertEquals(4, pit.peekNextKey());
|
||||
Assertions.assertFalse(pit.hasPrev());
|
||||
Assertions.assertEquals(Pair.of(4, 6), pit.next());
|
||||
Assertions.assertTrue(pit.hasNext());
|
||||
Assertions.assertEquals(6, pit.peekNextKey());
|
||||
Assertions.assertEquals(4, pit.peekPrevKey());
|
||||
Assertions.assertEquals(6, pit.peekNextKey());
|
||||
Assertions.assertEquals(4, pit.peekPrevKey());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void itTest4() {
|
||||
var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(3, 5).plus(4, 6).plus(5, 8).plus(6, 10);
|
||||
var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4),
|
||||
IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(4, 6), Pair.of(5, 8), Pair.of(6, 10));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 5),
|
||||
IteratorStart.LT, 5, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(4, 6), Pair.of(5, 8), Pair.of(6, 10));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6),
|
||||
IteratorStart.LT, 6, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(5, 8), Pair.of(6, 10));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 7),
|
||||
IteratorStart.LT, 7, v -> (v % 2 == 0) ? v : null);
|
||||
Just.checkIterator(pit, Pair.of(6, 10));
|
||||
Assertions.assertFalse(pit.hasNext());
|
||||
Assertions.assertTrue(pit.hasPrev());
|
||||
Assertions.assertEquals(6, pit.peekPrevKey());
|
||||
Assertions.assertEquals(Pair.of(6, 10), pit.prev());
|
||||
Assertions.assertTrue(pit.hasNext());
|
||||
Assertions.assertEquals(6, pit.peekNextKey());
|
||||
|
||||
pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 6),
|
||||
IteratorStart.LT, 6, v -> (v % 2 == 0) ? v : null);
|
||||
Assertions.assertTrue(pit.hasNext());
|
||||
Assertions.assertEquals(5, pit.peekNextKey());
|
||||
Assertions.assertTrue(pit.hasPrev());
|
||||
Assertions.assertEquals(4, pit.peekPrevKey());
|
||||
Assertions.assertEquals(5, pit.peekNextKey());
|
||||
Assertions.assertEquals(4, pit.peekPrevKey());
|
||||
Assertions.assertEquals(Pair.of(5, 8), pit.next());
|
||||
Assertions.assertTrue(pit.hasNext());
|
||||
Assertions.assertEquals(6, pit.peekNextKey());
|
||||
Assertions.assertEquals(5, pit.peekPrevKey());
|
||||
Assertions.assertEquals(6, pit.peekNextKey());
|
||||
Assertions.assertEquals(5, pit.peekPrevKey());
|
||||
}
|
||||
|
||||
// @Test
|
||||
// public void reverseTest() {
|
||||
// var source1 = TreePMap.<Integer, Integer>empty().plus(1, 3).plus(3, 5).plus(4, 6);
|
||||
// var pit = new PredicateKvIterator<>(new NavigableMapKvIterator<>(source1, IteratorStart.LT, 4),
|
||||
// IteratorStart.LT, 4, v -> (v % 2 == 0) ? v : null);
|
||||
//
|
||||
// }
|
||||
}
|
||||
@@ -0,0 +1,275 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
import net.jqwik.api.*;
|
||||
import net.jqwik.api.state.Action;
|
||||
import net.jqwik.api.state.ActionChain;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
public class TombstoneSkippingIteratorPbtTest {
|
||||
@Property
|
||||
public void checkMergingIterator(@ForAll("actions") ActionChain<MergingIteratorModel> actions) {
|
||||
actions.run();
|
||||
}
|
||||
|
||||
@Provide
|
||||
Arbitrary<ActionChain<MergingIteratorModel>> actions(@ForAll("lists") List<List<Map.Entry<Integer, MaybeTombstone<Integer>>>> list,
|
||||
@ForAll IteratorStart iteratorStart, @ForAll("startKey") Integer startKey) {
|
||||
return ActionChain.startWith(() -> new MergingIteratorModel(list, iteratorStart, startKey))
|
||||
.withAction(new NextAction())
|
||||
.withAction(new PeekNextKeyAction())
|
||||
.withAction(new SkipAction())
|
||||
.withAction(new PeekPrevKeyAction())
|
||||
.withAction(new SkipPrevAction())
|
||||
.withAction(new PrevAction())
|
||||
.withAction(new HasNextAction())
|
||||
.withAction(new HasPrevAction());
|
||||
}
|
||||
|
||||
@Provide
|
||||
Arbitrary<List<List<Map.Entry<Integer, MaybeTombstone<Integer>>>>> lists() {
|
||||
return Arbitraries.entries(Arbitraries.integers().between(-50, 50),
|
||||
Arbitraries.integers().between(-50, 50).flatMap(i -> Arbitraries.of(true, false).<MaybeTombstone<Integer>>flatMap(
|
||||
b -> b ? Arbitraries.just(new DataWrapper<Integer>(i)) : Arbitraries.just(new TombstoneImpl<>())
|
||||
))
|
||||
)
|
||||
.list().uniqueElements(Map.Entry::getKey).ofMinSize(0).ofMaxSize(20)
|
||||
.list().ofMinSize(1).ofMaxSize(5);
|
||||
}
|
||||
|
||||
@Provide
|
||||
Arbitrary<Integer> startKey() {
|
||||
return Arbitraries.integers().between(-51, 51);
|
||||
}
|
||||
|
||||
static class MergingIteratorModel implements CloseableKvIterator<Integer, Integer> {
|
||||
private final CloseableKvIterator<Integer, Integer> mergedIterator;
|
||||
private final CloseableKvIterator<Integer, Integer> mergingIterator;
|
||||
|
||||
private MergingIteratorModel(List<List<Map.Entry<Integer, MaybeTombstone<Integer>>>> pairs, IteratorStart startType, Integer startKey) {
|
||||
TreeMap<Integer, MaybeTombstone<Integer>> perfectMergedTombstones = new TreeMap<>();
|
||||
for (List<Map.Entry<Integer, MaybeTombstone<Integer>>> list : pairs) {
|
||||
for (Map.Entry<Integer, MaybeTombstone<Integer>> pair : list) {
|
||||
perfectMergedTombstones.putIfAbsent(pair.getKey(), pair.getValue());
|
||||
}
|
||||
}
|
||||
TreeMap<Integer, Integer> perfectMerged = new TreeMap<>();
|
||||
for (var e : perfectMergedTombstones.entrySet()) {
|
||||
if (e.getValue() instanceof Data<Integer> data)
|
||||
perfectMerged.put(e.getKey(), data.value());
|
||||
}
|
||||
|
||||
|
||||
mergedIterator = new NavigableMapKvIterator<>(perfectMerged, startType, startKey);
|
||||
mergingIterator = new TombstoneSkippingIterator<>(startType, startKey, pairs.stream().<CloseableKvIterator<Integer, MaybeTombstone<Integer>>>map(
|
||||
list -> new NavigableMapKvIterator<Integer, MaybeTombstone<Integer>>(new TreeMap<Integer, MaybeTombstone<Integer>>(Map.ofEntries(list.toArray(Map.Entry[]::new))), startType, startKey)
|
||||
).toList());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer peekNextKey() {
|
||||
var mergedKey = mergedIterator.peekNextKey();
|
||||
var mergingKey = mergingIterator.peekNextKey();
|
||||
Assertions.assertEquals(mergedKey, mergingKey);
|
||||
return mergedKey;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skip() {
|
||||
mergedIterator.skip();
|
||||
mergingIterator.skip();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer peekPrevKey() {
|
||||
var mergedKey = mergedIterator.peekPrevKey();
|
||||
var mergingKey = mergingIterator.peekPrevKey();
|
||||
Assertions.assertEquals(mergedKey, mergingKey);
|
||||
return mergedKey;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<Integer, Integer> prev() {
|
||||
var mergedKey = mergedIterator.prev();
|
||||
var mergingKey = mergingIterator.prev();
|
||||
Assertions.assertEquals(mergedKey, mergingKey);
|
||||
return mergedKey;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPrev() {
|
||||
var mergedKey = mergedIterator.hasPrev();
|
||||
var mergingKey = mergingIterator.hasPrev();
|
||||
Assertions.assertEquals(mergedKey, mergingKey);
|
||||
return mergedKey;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skipPrev() {
|
||||
mergedIterator.skipPrev();
|
||||
mergingIterator.skipPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
mergedIterator.close();
|
||||
mergingIterator.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
var mergedKey = mergedIterator.hasNext();
|
||||
var mergingKey = mergingIterator.hasNext();
|
||||
Assertions.assertEquals(mergedKey, mergingKey);
|
||||
return mergedKey;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<Integer, Integer> next() {
|
||||
var mergedKey = mergedIterator.next();
|
||||
var mergingKey = mergingIterator.next();
|
||||
Assertions.assertEquals(mergedKey, mergingKey);
|
||||
return mergedKey;
|
||||
}
|
||||
}
|
||||
|
||||
static class PeekNextKeyAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
@Override
|
||||
public void mutate(MergingIteratorModel state) {
|
||||
state.peekNextKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean precondition(MergingIteratorModel state) {
|
||||
return state.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String description() {
|
||||
return "Peek next key";
|
||||
}
|
||||
}
|
||||
|
||||
static class SkipAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
@Override
|
||||
public void mutate(MergingIteratorModel state) {
|
||||
state.skip();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean precondition(MergingIteratorModel state) {
|
||||
return state.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String description() {
|
||||
return "Skip next key";
|
||||
}
|
||||
}
|
||||
|
||||
static class PeekPrevKeyAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
@Override
|
||||
public void mutate(MergingIteratorModel state) {
|
||||
state.peekPrevKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean precondition(MergingIteratorModel state) {
|
||||
return state.hasPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String description() {
|
||||
return "Peek prev key";
|
||||
}
|
||||
}
|
||||
|
||||
static class SkipPrevAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
@Override
|
||||
public void mutate(MergingIteratorModel state) {
|
||||
state.skipPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean precondition(MergingIteratorModel state) {
|
||||
return state.hasPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String description() {
|
||||
return "Skip prev key";
|
||||
}
|
||||
}
|
||||
|
||||
static class PrevAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
@Override
|
||||
public void mutate(MergingIteratorModel state) {
|
||||
state.prev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean precondition(MergingIteratorModel state) {
|
||||
return state.hasPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String description() {
|
||||
return "Prev key";
|
||||
}
|
||||
}
|
||||
|
||||
static class NextAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
@Override
|
||||
public void mutate(MergingIteratorModel state) {
|
||||
state.next();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean precondition(MergingIteratorModel state) {
|
||||
return state.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String description() {
|
||||
return "Next key";
|
||||
}
|
||||
}
|
||||
|
||||
static class HasNextAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
@Override
|
||||
public void mutate(MergingIteratorModel state) {
|
||||
state.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean precondition(MergingIteratorModel state) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String description() {
|
||||
return "Has next key";
|
||||
}
|
||||
}
|
||||
|
||||
static class HasPrevAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
@Override
|
||||
public void mutate(MergingIteratorModel state) {
|
||||
state.hasPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean precondition(MergingIteratorModel state) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String description() {
|
||||
return "Has prev key";
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,129 +0,0 @@
|
||||
package com.usatiuk.objects.stores;
|
||||
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.Just;
|
||||
import com.usatiuk.objects.TempDataProfile;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import io.quarkus.test.junit.QuarkusTest;
|
||||
import io.quarkus.test.junit.TestProfile;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.RepeatedTest;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
class Profiles {
|
||||
public static class LmdbKvIteratorTestProfile extends TempDataProfile {
|
||||
}
|
||||
}
|
||||
|
||||
@QuarkusTest
|
||||
@TestProfile(Profiles.LmdbKvIteratorTestProfile.class)
|
||||
public class LmdbKvIteratorTest {
|
||||
|
||||
@Inject
|
||||
LmdbObjectPersistentStore store;
|
||||
|
||||
long getNextTxId() {
|
||||
try (var s = store.getSnapshot()) {
|
||||
return s.id() + 1;
|
||||
}
|
||||
}
|
||||
|
||||
@RepeatedTest(100)
|
||||
public void iteratorTest1() {
|
||||
store.prepareTx(
|
||||
new TxManifestRaw(
|
||||
List.of(Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})),
|
||||
Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})),
|
||||
Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))),
|
||||
List.of()
|
||||
), getNextTxId()
|
||||
).run();
|
||||
|
||||
try (var snapshot = store.getSnapshot()) {
|
||||
var iterator = snapshot.getIterator(IteratorStart.GE, JObjectKey.of(""));
|
||||
Just.checkIterator(iterator, List.of(Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})),
|
||||
Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})),
|
||||
Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4}))));
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
iterator.close();
|
||||
|
||||
iterator = snapshot.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(3)));
|
||||
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
iterator.close();
|
||||
|
||||
iterator = snapshot.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(2)));
|
||||
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
iterator.close();
|
||||
|
||||
iterator = snapshot.getIterator(IteratorStart.GE, JObjectKey.of(Long.toString(2)));
|
||||
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
iterator.close();
|
||||
|
||||
iterator = snapshot.getIterator(IteratorStart.GT, JObjectKey.of(Long.toString(2)));
|
||||
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
iterator.close();
|
||||
|
||||
iterator = snapshot.getIterator(IteratorStart.LT, JObjectKey.of(Long.toString(3)));
|
||||
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
iterator.close();
|
||||
|
||||
iterator = snapshot.getIterator(IteratorStart.LT, JObjectKey.of(Long.toString(2)));
|
||||
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
iterator.close();
|
||||
|
||||
iterator = snapshot.getIterator(IteratorStart.LT, JObjectKey.of(Long.toString(1)));
|
||||
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
iterator.close();
|
||||
|
||||
iterator = snapshot.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(1)));
|
||||
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
iterator.close();
|
||||
|
||||
iterator = snapshot.getIterator(IteratorStart.GT, JObjectKey.of(Long.toString(3)));
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
iterator.close();
|
||||
|
||||
iterator = snapshot.getIterator(IteratorStart.GT, JObjectKey.of(Long.toString(4)));
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
iterator.close();
|
||||
|
||||
iterator = snapshot.getIterator(IteratorStart.LE, JObjectKey.of(Long.toString(0)));
|
||||
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
|
||||
Assertions.assertFalse(iterator.hasNext());
|
||||
iterator.close();
|
||||
|
||||
iterator = snapshot.getIterator(IteratorStart.GE, JObjectKey.of(Long.toString(2)));
|
||||
Assertions.assertTrue(iterator.hasNext());
|
||||
Assertions.assertEquals(JObjectKey.of(Long.toString(2)), iterator.peekNextKey());
|
||||
Assertions.assertEquals(JObjectKey.of(Long.toString(1)), iterator.peekPrevKey());
|
||||
Assertions.assertEquals(JObjectKey.of(Long.toString(2)), iterator.peekNextKey());
|
||||
Assertions.assertEquals(JObjectKey.of(Long.toString(1)), iterator.peekPrevKey());
|
||||
Just.checkIterator(iterator.reversed(), Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})));
|
||||
Just.checkIterator(iterator, Pair.of(JObjectKey.of(Long.toString(1)), ByteString.copyFrom(new byte[]{2})), Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})));
|
||||
Assertions.assertEquals(Pair.of(JObjectKey.of(Long.toString(3)), ByteString.copyFrom(new byte[]{4})), iterator.prev());
|
||||
Assertions.assertEquals(Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), iterator.prev());
|
||||
Assertions.assertEquals(Pair.of(JObjectKey.of(Long.toString(2)), ByteString.copyFrom(new byte[]{3})), iterator.next());
|
||||
iterator.close();
|
||||
}
|
||||
|
||||
store.prepareTx(new TxManifestRaw(
|
||||
List.of(),
|
||||
List.of(JObjectKey.of(Long.toString(1)), JObjectKey.of(Long.toString(2)), JObjectKey.of(Long.toString(3)))
|
||||
),
|
||||
getNextTxId()
|
||||
).run();
|
||||
}
|
||||
}
|
||||
@@ -102,6 +102,7 @@
|
||||
<arg>-parameters</arg>
|
||||
<arg>--add-exports</arg>
|
||||
<arg>java.base/jdk.internal.access=ALL-UNNAMED</arg>
|
||||
<arg>--enable-preview</arg>
|
||||
</compilerArgs>
|
||||
</configuration>
|
||||
</plugin>
|
||||
@@ -119,6 +120,7 @@
|
||||
--add-exports java.base/sun.nio.ch=ALL-UNNAMED
|
||||
--add-exports java.base/jdk.internal.access=ALL-UNNAMED
|
||||
--add-opens=java.base/java.nio=ALL-UNNAMED
|
||||
--enable-preview
|
||||
</argLine>
|
||||
<skipTests>${skip.unit}</skipTests>
|
||||
<redirectTestOutputToFile>true</redirectTestOutputToFile>
|
||||
|
||||
@@ -19,8 +19,6 @@ import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.concurrent.BasicThreadFactory;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
||||
@@ -56,27 +54,21 @@ public class AutosyncProcessor {
|
||||
if (downloadAll)
|
||||
executorService.submit(() -> {
|
||||
Log.info("Adding all to autosync");
|
||||
List<JObjectKey> objs = new LinkedList<>();
|
||||
txm.run(() -> {
|
||||
try (var it = curTx.getIterator(IteratorStart.GE, JObjectKey.first())) {
|
||||
while (it.hasNext()) {
|
||||
var key = it.peekNextKey();
|
||||
objs.add(key);
|
||||
// TODO: Nested transactions
|
||||
txm.run(() -> {
|
||||
var gotObj = curTx.get(JData.class, key).orElse(null);
|
||||
if (!(gotObj instanceof RemoteObjectMeta meta))
|
||||
return;
|
||||
if (!meta.hasLocalData())
|
||||
add(meta.key());
|
||||
}, true);
|
||||
it.skip();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
for (var obj : objs) {
|
||||
txm.run(() -> {
|
||||
var gotObj = curTx.get(JData.class, obj).orElse(null);
|
||||
if (!(gotObj instanceof RemoteObjectMeta meta))
|
||||
return;
|
||||
if (!meta.hasLocalData())
|
||||
add(meta.key());
|
||||
});
|
||||
}
|
||||
Log.info("Adding all to autosync: finished");
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
package com.usatiuk.dhfs.invalidation;
|
||||
|
||||
import com.usatiuk.dhfs.peersync.PeerId;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import org.pcollections.PMap;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
public record IndexUpdateOp(JObjectKey key, PMap<PeerId, Long> changelog) implements Op {
|
||||
public record IndexUpdateOp(JObjectKey key, PMap<PeerId, Long> changelog, JDataRemoteDto data) implements Op {
|
||||
@Override
|
||||
public Collection<JObjectKey> getEscapedRefs() {
|
||||
return List.of(key);
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
package com.usatiuk.dhfs.invalidation;
|
||||
|
||||
import com.usatiuk.dhfs.peersync.PeerId;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemotePush;
|
||||
import com.usatiuk.dhfs.remoteobj.RemoteObjectMeta;
|
||||
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
|
||||
import com.usatiuk.dhfs.syncmap.DtoMapperService;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import com.usatiuk.objects.transaction.TransactionManager;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
@@ -19,11 +23,22 @@ public class RemoteObjectMetaOpExtractor implements OpExtractor<RemoteObjectMeta
|
||||
Transaction curTx;
|
||||
@Inject
|
||||
RemoteTransaction remoteTransaction;
|
||||
@Inject
|
||||
DtoMapperService dtoMapperService;
|
||||
|
||||
@Override
|
||||
public Pair<List<Op>, Runnable> extractOps(RemoteObjectMeta data, PeerId peerId) {
|
||||
return txm.run(() -> {
|
||||
return Pair.of(List.of(new IndexUpdateOp(data.key(), data.changelog())), () -> {
|
||||
JDataRemoteDto dto =
|
||||
data.knownType().isAnnotationPresent(JDataRemotePush.class)
|
||||
? remoteTransaction.getData(data.knownType(), data.key())
|
||||
.map(d -> dtoMapperService.toDto(d, d.dtoClass())).orElse(null)
|
||||
: null;
|
||||
|
||||
if (data.knownType().isAnnotationPresent(JDataRemotePush.class) && dto == null) {
|
||||
Log.warnv("Failed to get data for push {0} of type {1}", data.key(), data.knownType());
|
||||
}
|
||||
return Pair.of(List.of(new IndexUpdateOp(data.key(), data.changelog(), dto)), () -> {
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -54,11 +54,11 @@ public class JKleppmannTreeManager {
|
||||
);
|
||||
curTx.put(data);
|
||||
var rootNode = new JKleppmannTreeNode(JObjectKey.of(name.value() + "_jt_root"), null, rootNodeSupplier.get());
|
||||
curTx.put(new JKleppmannTreeNodeHolder(rootNode));
|
||||
curTx.put(new JKleppmannTreeNodeHolder(rootNode, true));
|
||||
var trashNode = new JKleppmannTreeNode(JObjectKey.of(name.value() + "_jt_trash"), null, rootNodeSupplier.get());
|
||||
curTx.put(new JKleppmannTreeNodeHolder(trashNode));
|
||||
curTx.put(new JKleppmannTreeNodeHolder(trashNode, true));
|
||||
var lf_node = new JKleppmannTreeNode(JObjectKey.of(name.value() + "_jt_lf"), null, rootNodeSupplier.get());
|
||||
curTx.put(new JKleppmannTreeNodeHolder(lf_node));
|
||||
curTx.put(new JKleppmannTreeNodeHolder(lf_node, true));
|
||||
}
|
||||
return new JKleppmannTree(data);
|
||||
// opObjectRegistry.registerObject(tree);
|
||||
|
||||
@@ -24,6 +24,6 @@ public class JKleppmannTreePeerInterface implements PeerInterface<PeerId> {
|
||||
|
||||
@Override
|
||||
public Collection<PeerId> getAllPeers() {
|
||||
return peerInfoService.getPeers().stream().map(PeerInfo::id).toList();
|
||||
return peerInfoService.getSynchronizedPeers().stream().map(PeerInfo::id).toList();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,10 @@ public record JKleppmannTreeNodeHolder(PCollection<JDataRef> refsFrom, boolean f
|
||||
this(TreePSet.empty(), false, node);
|
||||
}
|
||||
|
||||
public JKleppmannTreeNodeHolder(JKleppmannTreeNode node, boolean frozen) {
|
||||
this(TreePSet.empty(), frozen, node);
|
||||
}
|
||||
|
||||
public JKleppmannTreeNodeHolder withNode(JKleppmannTreeNode node) {
|
||||
Objects.requireNonNull(node, "node");
|
||||
return new JKleppmannTreeNodeHolder(refsFrom, frozen, node);
|
||||
|
||||
@@ -4,24 +4,48 @@ import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.dhfs.peertrust.CertificateTools;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemote;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemotePush;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import org.pcollections.HashTreePMap;
|
||||
import org.pcollections.PMap;
|
||||
|
||||
import java.security.cert.X509Certificate;
|
||||
|
||||
public record PeerInfo(JObjectKey key, PeerId id, ByteString cert) implements JDataRemote, JDataRemoteDto {
|
||||
@JDataRemotePush
|
||||
public record PeerInfo(JObjectKey key, PeerId id, ByteString cert,
|
||||
PMap<PeerId, Long> kickCounter,
|
||||
long lastSeenTimestamp) implements JDataRemote, JDataRemoteDto {
|
||||
public PeerInfo(PeerId id, byte[] cert) {
|
||||
this(id.toJObjectKey(), id, ByteString.copyFrom(cert));
|
||||
this(id.toJObjectKey(), id, ByteString.copyFrom(cert), HashTreePMap.empty(), System.currentTimeMillis());
|
||||
}
|
||||
|
||||
public X509Certificate parsedCert() {
|
||||
return CertificateTools.certFromBytes(cert.toByteArray());
|
||||
}
|
||||
|
||||
public PeerInfo withKickCounter(PMap<PeerId, Long> kickCounter) {
|
||||
return new PeerInfo(key, id, cert, kickCounter, lastSeenTimestamp);
|
||||
}
|
||||
|
||||
public PeerInfo withIncrementedKickCounter(PeerId peerId) {
|
||||
return new PeerInfo(key, id, cert, kickCounter.plus(peerId, kickCounter.getOrDefault(peerId, 0L) + 1), lastSeenTimestamp);
|
||||
}
|
||||
|
||||
public PeerInfo withLastSeenTimestamp(long lastSeenTimestamp) {
|
||||
return new PeerInfo(key, id, cert, kickCounter, lastSeenTimestamp);
|
||||
}
|
||||
|
||||
public long kickCounterSum() {
|
||||
return kickCounter.values().stream().mapToLong(Long::longValue).sum();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "PeerInfo{" +
|
||||
"key=" + key +
|
||||
", id=" + id +
|
||||
", kickCounter=" + kickCounter +
|
||||
", lastSeenTimestamp=" + lastSeenTimestamp +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,17 +87,27 @@ public class PeerInfoService {
|
||||
|
||||
public List<PeerInfo> getPeersNoSelf() {
|
||||
return jObjectTxManager.run(() -> {
|
||||
var gotKey = getTreeR().traverse(List.of());
|
||||
return curTx.get(JKleppmannTreeNodeHolder.class, gotKey).map(JKleppmannTreeNodeHolder::node).map(
|
||||
node -> node.children().keySet().stream()
|
||||
.map(JObjectKey::of).map(this::getPeerInfoImpl)
|
||||
.filter(o -> {
|
||||
if (o.isEmpty())
|
||||
Log.warnv("Could not get peer info for peer {0}", o);
|
||||
return o.isPresent();
|
||||
}).map(Optional::get).filter(
|
||||
peerInfo -> !peerInfo.id().equals(persistentPeerDataService.getSelfUuid())).toList())
|
||||
.orElseThrow();
|
||||
return getPeers().stream().filter(
|
||||
peerInfo -> !peerInfo.id().equals(persistentPeerDataService.getSelfUuid())).toList();
|
||||
});
|
||||
}
|
||||
|
||||
public List<PeerInfo> getSynchronizedPeers() {
|
||||
return jObjectTxManager.run(() -> {
|
||||
return getPeers().stream().filter(pi -> {
|
||||
if (pi.id().equals(persistentPeerDataService.getSelfUuid())) {
|
||||
return true;
|
||||
}
|
||||
return persistentPeerDataService.isInitialSyncDone(pi.id());
|
||||
}).toList();
|
||||
});
|
||||
}
|
||||
|
||||
public List<PeerInfo> getSynchronizedPeersNoSelf() {
|
||||
return jObjectTxManager.run(() -> {
|
||||
return getPeersNoSelf().stream().filter(pi -> {
|
||||
return persistentPeerDataService.isInitialSyncDone(pi.id());
|
||||
}).toList();
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -89,10 +89,11 @@ public class PeerInfoSyncHandler implements ObjSyncHandler<PeerInfo, PeerInfo> {
|
||||
if (oursCurData == null)
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict but we don't have local copy"));
|
||||
|
||||
if (!receivedData.equals(oursCurData))
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("PeerInfo data conflict"));
|
||||
if (!receivedData.cert().equals(oursCurData.cert()))
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("PeerInfo certificate conflict for " + key));
|
||||
|
||||
HashPMap<PeerId, Long> newChangelog = HashTreePMap.from(current.changelog());
|
||||
HashPMap<PeerId, Long> newKickCounter = HashTreePMap.from(oursCurData.kickCounter());
|
||||
|
||||
for (var entry : receivedChangelog.entrySet()) {
|
||||
newChangelog = newChangelog.plus(entry.getKey(),
|
||||
@@ -100,6 +101,20 @@ public class PeerInfoSyncHandler implements ObjSyncHandler<PeerInfo, PeerInfo> {
|
||||
);
|
||||
}
|
||||
|
||||
for (var entry : receivedData.kickCounter().entrySet()) {
|
||||
newKickCounter = newKickCounter.plus(entry.getKey(),
|
||||
Long.max(newKickCounter.getOrDefault(entry.getKey(), 0L), entry.getValue())
|
||||
);
|
||||
}
|
||||
|
||||
var newData = oursCurData.withKickCounter(newKickCounter)
|
||||
.withLastSeenTimestamp(Math.max(oursCurData.lastSeenTimestamp(), receivedData.lastSeenTimestamp()));
|
||||
|
||||
if (!newData.equals(oursCurData))
|
||||
newChangelog = newChangelog.plus(persistentPeerDataService.getSelfUuid(), newChangelog.getOrDefault(persistentPeerDataService.getSelfUuid(), 0L) + 1L);
|
||||
|
||||
remoteTx.putDataRaw(newData);
|
||||
|
||||
current = current.withChangelog(newChangelog);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,60 @@
|
||||
package com.usatiuk.dhfs.peersync;
|
||||
|
||||
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import com.usatiuk.objects.transaction.TransactionManager;
|
||||
import io.quarkus.scheduler.Scheduled;
|
||||
import io.smallrye.common.annotation.Blocking;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
@ApplicationScoped
|
||||
public class PeerLastSeenUpdater {
|
||||
@Inject
|
||||
PeerManager peerManager;
|
||||
@Inject
|
||||
PeerInfoService peerInfoService;
|
||||
@Inject
|
||||
Transaction curTx;
|
||||
@Inject
|
||||
TransactionManager txm;
|
||||
@Inject
|
||||
RemoteTransaction remoteTransaction;
|
||||
|
||||
@ConfigProperty(name = "dhfs.objects.last-seen.timeout")
|
||||
long lastSeenTimeout;
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
|
||||
@Scheduled(every = "${dhfs.objects.last-seen.update}", concurrentExecution = Scheduled.ConcurrentExecution.SKIP)
|
||||
@Blocking
|
||||
void update() {
|
||||
var snapshot = peerManager.getHostStateSnapshot();
|
||||
for (var a : snapshot.available()) {
|
||||
txm.run(() -> {
|
||||
var curInfo = remoteTransaction.getData(PeerInfo.class, a.id()).orElse(null);
|
||||
if (curInfo == null) return;
|
||||
|
||||
var newInfo = curInfo.withLastSeenTimestamp(System.currentTimeMillis());
|
||||
remoteTransaction.putData(newInfo);
|
||||
});
|
||||
}
|
||||
|
||||
for (var u : snapshot.unavailable()) {
|
||||
txm.run(() -> {
|
||||
if (!persistentPeerDataService.isInitialSyncDone(u))
|
||||
return;
|
||||
|
||||
var curInfo = remoteTransaction.getData(PeerInfo.class, u.id()).orElse(null);
|
||||
if (curInfo == null) return;
|
||||
|
||||
var lastSeen = curInfo.lastSeenTimestamp();
|
||||
if (System.currentTimeMillis() - lastSeen > (lastSeenTimeout * 1000)) {
|
||||
var kicked = curInfo.withIncrementedKickCounter(persistentPeerDataService.getSelfUuid());
|
||||
remoteTransaction.putData(kicked);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -125,19 +125,23 @@ public class PeerManager {
|
||||
}
|
||||
}
|
||||
|
||||
public void handleConnectionError(com.usatiuk.dhfs.peersync.PeerInfo host) {
|
||||
public void handleConnectionError(PeerId host) {
|
||||
boolean wasReachable = isReachable(host);
|
||||
|
||||
if (wasReachable)
|
||||
Log.infov("Lost connection to {0}", host);
|
||||
|
||||
_states.remove(host.id());
|
||||
_states.remove(host);
|
||||
|
||||
for (var l : _disconnectedListeners) {
|
||||
l.handlePeerDisconnected(host.id());
|
||||
l.handlePeerDisconnected(host);
|
||||
}
|
||||
}
|
||||
|
||||
public void handleConnectionError(com.usatiuk.dhfs.peersync.PeerInfo host) {
|
||||
handleConnectionError(host.id());
|
||||
}
|
||||
|
||||
// FIXME:
|
||||
private boolean pingCheck(com.usatiuk.dhfs.peersync.PeerInfo host, PeerAddress address) {
|
||||
try {
|
||||
|
||||
@@ -51,6 +51,8 @@ public class PersistentPeerDataService {
|
||||
PeerInfoService peerInfoService;
|
||||
@Inject
|
||||
TransactionManager txm;
|
||||
@Inject
|
||||
PeerManager peerManager;
|
||||
|
||||
@ConfigProperty(name = "dhfs.peerdiscovery.preset-uuid")
|
||||
Optional<String> presetUuid;
|
||||
@@ -89,21 +91,6 @@ public class PersistentPeerDataService {
|
||||
Files.write(Path.of(stuffRoot, "self_uuid"), _selfUuid.id().toString().getBytes(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);
|
||||
}
|
||||
|
||||
// private void pushPeerUpdates() {
|
||||
// pushPeerUpdates(null);
|
||||
// }
|
||||
|
||||
// private void pushPeerUpdates(@Nullable JObject<?> obj) {
|
||||
// if (obj != null)
|
||||
// Log.info("Scheduling certificate update after " + obj.getMeta().getName() + " was updated");
|
||||
// executorService.submit(() -> {
|
||||
// updateCerts();
|
||||
// invalidationQueueService.pushInvalidationToAll(PeerDirectory.PeerDirectoryObjName);
|
||||
// for (var p : peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getPeers().stream().toList()))
|
||||
// invalidationQueueService.pushInvalidationToAll(PersistentPeerInfo.getNameFromUuid(p));
|
||||
// });
|
||||
// }
|
||||
|
||||
public PeerId getSelfUuid() {
|
||||
return _selfUuid;
|
||||
}
|
||||
@@ -148,6 +135,7 @@ public class PersistentPeerDataService {
|
||||
}
|
||||
curTx.put(data.withInitialSyncDone(data.initialSyncDone().minus(peerId)));
|
||||
Log.infov("Did reset sync state for {0}", peerId);
|
||||
curTx.onCommit(() -> peerManager.handleConnectionError(peerId));
|
||||
return true;
|
||||
});
|
||||
}
|
||||
@@ -160,7 +148,6 @@ public class PersistentPeerDataService {
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
public List<IpPeerAddress> getPersistentPeerAddresses() {
|
||||
return txm.run(() -> {
|
||||
var data = curTx.get(PersistentRemoteHostsData.class, PersistentRemoteHostsData.KEY).orElse(null);
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package com.usatiuk.dhfs.peertrust;
|
||||
|
||||
import com.usatiuk.dhfs.invalidation.InvalidationQueueService;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNode;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeHolder;
|
||||
import com.usatiuk.dhfs.peersync.PeerInfo;
|
||||
import com.usatiuk.dhfs.peersync.PeerInfoService;
|
||||
@@ -43,7 +42,7 @@ public class PeerInfoCertUpdateTxHook implements PreCommitTxHook {
|
||||
for (var curRef : oldNode.node().children().entrySet()) {
|
||||
if (!n.node().children().containsKey(curRef.getKey())) {
|
||||
Log.infov("Will reset sync state for {0}", curRef.getValue());
|
||||
curTx.onCommit(() -> persistentPeerDataService.resetInitialSyncDone(JKleppmannTreeNodeMetaPeer.nodeIdToPeerId(curRef.getValue())));
|
||||
persistentPeerDataService.resetInitialSyncDone(JKleppmannTreeNodeMetaPeer.nodeIdToPeerId(curRef.getValue()));
|
||||
}
|
||||
}
|
||||
return;
|
||||
@@ -54,9 +53,16 @@ public class PeerInfoCertUpdateTxHook implements PreCommitTxHook {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(remote.data() instanceof PeerInfo))
|
||||
if (!(remote.data() instanceof PeerInfo curPi))
|
||||
return;
|
||||
|
||||
var oldPi = (PeerInfo) ((RemoteObjectDataWrapper) old).data();
|
||||
|
||||
if (oldPi.kickCounterSum() != curPi.kickCounterSum()) {
|
||||
Log.warnv("Peer kicked out: {0} to {1}", key, cur);
|
||||
persistentPeerDataService.resetInitialSyncDone(curPi.id());
|
||||
}
|
||||
|
||||
Log.infov("Changed peer info: {0} to {1}", key, cur);
|
||||
|
||||
curTx.onCommit(() -> invalidationQueueService.pushInvalidationToAll(key));
|
||||
|
||||
@@ -20,7 +20,7 @@ public class IndexUpdateOpHandler implements OpHandler<IndexUpdateOp> {
|
||||
@Override
|
||||
public void handleOp(PeerId from, IndexUpdateOp op) {
|
||||
txm.run(() -> {
|
||||
syncHandler.handleRemoteUpdate(from, op.key(), op.changelog(), null);
|
||||
syncHandler.handleRemoteUpdate(from, op.key(), op.changelog(), op.data());
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
package com.usatiuk.dhfs.remoteobj;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target(ElementType.TYPE)
|
||||
public @interface JDataRemotePush {
|
||||
}
|
||||
@@ -19,8 +19,8 @@ public class RemoteObjPusherTxHook implements PreCommitTxHook {
|
||||
@Override
|
||||
public void onChange(JObjectKey key, JData old, JData cur) {
|
||||
boolean invalidate = switch (cur) {
|
||||
case RemoteObjectMeta remote -> !remote.changelog().equals(((RemoteObjectMeta) old).changelog());
|
||||
case JKleppmannTreePersistentData pd -> !pd.queues().equals(((JKleppmannTreePersistentData) old).queues());
|
||||
case RemoteObjectMeta remote -> remote.changelog() != ((RemoteObjectMeta) old).changelog();
|
||||
case JKleppmannTreePersistentData pd -> pd.queues() != ((JKleppmannTreePersistentData) old).queues();
|
||||
default -> false;
|
||||
};
|
||||
|
||||
|
||||
@@ -132,7 +132,7 @@ public class RemoteObjectDeleter {
|
||||
return true;
|
||||
}
|
||||
|
||||
var knownHosts = peerInfoService.getPeersNoSelf();
|
||||
var knownHosts = peerInfoService.getSynchronizedPeersNoSelf();
|
||||
RemoteObjectMeta finalTarget = target;
|
||||
List<PeerId> missing = knownHosts.stream()
|
||||
.map(PeerInfo::id)
|
||||
@@ -187,7 +187,7 @@ public class RemoteObjectDeleter {
|
||||
if (!obj.seen())
|
||||
return true;
|
||||
|
||||
var knownHosts = peerInfoService.getPeersNoSelf();
|
||||
var knownHosts = peerInfoService.getSynchronizedPeersNoSelf();
|
||||
boolean missing = false;
|
||||
for (var x : knownHosts) {
|
||||
if (!obj.confirmedDeletes().contains(x.id())) {
|
||||
|
||||
@@ -93,6 +93,11 @@ public class RemoteTransaction {
|
||||
curTx.put(newData);
|
||||
}
|
||||
|
||||
public <T extends JDataRemote> void putDataNew(T obj) {
|
||||
curTx.putNew(new RemoteObjectMeta(obj, persistentPeerDataService.getSelfUuid()));
|
||||
curTx.putNew(new RemoteObjectDataWrapper<>(obj));
|
||||
}
|
||||
|
||||
public <T extends JDataRemote> void putData(T obj) {
|
||||
var curMeta = getMeta(obj.key()).orElse(null);
|
||||
|
||||
|
||||
@@ -23,7 +23,10 @@ import org.pcollections.PMap;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.lang.reflect.ParameterizedType;
|
||||
import java.util.*;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@ApplicationScoped
|
||||
@@ -124,52 +127,42 @@ public class SyncHandler {
|
||||
public void resyncAfterCrash(@Observes @Priority(100000) StartupEvent event) {
|
||||
if (shutdownChecker.lastShutdownClean())
|
||||
return;
|
||||
List<JObjectKey> objs = new LinkedList<>();
|
||||
txm.run(() -> {
|
||||
try (var it = curTx.getIterator(IteratorStart.GE, JObjectKey.first())) {
|
||||
while (it.hasNext()) {
|
||||
var key = it.peekNextKey();
|
||||
objs.add(key);
|
||||
// TODO: Nested transactions
|
||||
txm.run(() -> {
|
||||
var proc = curTx.get(JData.class, key).flatMap(o -> Optional.ofNullable(_initialSyncProcessors.get(o.getClass()))).orElse(null);
|
||||
if (proc != null) {
|
||||
proc.handleCrash(key);
|
||||
}
|
||||
Log.infov("Handled crash of {0}", key);
|
||||
|
||||
}, true);
|
||||
it.skip();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
for (var obj : objs) {
|
||||
txm.run(() -> {
|
||||
var proc = curTx.get(JData.class, obj).flatMap(o -> Optional.ofNullable(_initialSyncProcessors.get(o.getClass()))).orElse(null);
|
||||
if (proc != null) {
|
||||
proc.handleCrash(obj);
|
||||
}
|
||||
Log.infov("Handled crash of {0}", obj);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public void doInitialSync(PeerId peer) {
|
||||
List<JObjectKey> objs = new LinkedList<>();
|
||||
txm.run(() -> {
|
||||
Log.tracev("Will do initial sync for {0}", peer);
|
||||
try (var it = curTx.getIterator(IteratorStart.GE, JObjectKey.first())) {
|
||||
while (it.hasNext()) {
|
||||
var key = it.peekNextKey();
|
||||
objs.add(key);
|
||||
// TODO: Nested transactions
|
||||
txm.run(() -> {
|
||||
var proc = curTx.get(JData.class, key).flatMap(o -> Optional.ofNullable(_initialSyncProcessors.get(o.getClass()))).orElse(null);
|
||||
if (proc != null) {
|
||||
proc.prepareForInitialSync(peer, key);
|
||||
}
|
||||
Log.infov("Adding to initial sync for peer {0}: {1}", peer, key);
|
||||
invalidationQueueService.pushInvalidationToOne(peer, key);
|
||||
|
||||
}, true);
|
||||
it.skip();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
for (var obj : objs) {
|
||||
txm.run(() -> {
|
||||
var proc = curTx.get(JData.class, obj).flatMap(o -> Optional.ofNullable(_initialSyncProcessors.get(o.getClass()))).orElse(null);
|
||||
if (proc != null) {
|
||||
proc.prepareForInitialSync(peer, obj);
|
||||
}
|
||||
Log.infov("Adding to initial sync for peer {0}: {1}", peer, obj);
|
||||
invalidationQueueService.pushInvalidationToOne(peer, obj);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,93 +4,52 @@ import jakarta.annotation.Nonnull;
|
||||
import jakarta.annotation.Nullable;
|
||||
|
||||
import java.lang.ref.Cleaner;
|
||||
import java.lang.ref.WeakReference;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.locks.Lock;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
public class DataLocker {
|
||||
private static final AutoCloseableNoThrow DUMMY_LOCK = () -> {
|
||||
};
|
||||
private final ConcurrentHashMap<Object, LockTag> _locks = new ConcurrentHashMap<>();
|
||||
private final ConcurrentHashMap<Object, WeakReference<ReentrantLock>> _locks = new ConcurrentHashMap<>();
|
||||
private static final Cleaner CLEANER = Cleaner.create();
|
||||
|
||||
private Lock getTag(Object data) {
|
||||
var newTag = new ReentrantLock();
|
||||
var newTagRef = new WeakReference<>(newTag);
|
||||
|
||||
while (true) {
|
||||
var oldTagRef = _locks.putIfAbsent(data, newTagRef);
|
||||
var oldTag = oldTagRef != null ? oldTagRef.get() : null;
|
||||
|
||||
if (oldTag == null && oldTagRef != null) {
|
||||
_locks.remove(data, oldTagRef);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (oldTag != null)
|
||||
return oldTag;
|
||||
|
||||
CLEANER.register(newTag, () -> {
|
||||
_locks.remove(data, newTagRef);
|
||||
});
|
||||
return newTag;
|
||||
}
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
public AutoCloseableNoThrow lock(Object data) {
|
||||
while (true) {
|
||||
var newTag = new LockTag();
|
||||
var oldTag = _locks.putIfAbsent(data, newTag);
|
||||
if (oldTag == null) {
|
||||
return new Lock(data, newTag);
|
||||
}
|
||||
try {
|
||||
synchronized (oldTag) {
|
||||
while (!oldTag.released) {
|
||||
if (oldTag.owner == Thread.currentThread()) {
|
||||
return DUMMY_LOCK;
|
||||
}
|
||||
oldTag.wait();
|
||||
// tag.wait(4000L);
|
||||
// if (!tag.released) {
|
||||
// System.out.println("Timeout waiting for lock: " + data);
|
||||
// System.exit(1);
|
||||
// throw new InterruptedException();
|
||||
// }
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
}
|
||||
var lock = getTag(data);
|
||||
lock.lock();
|
||||
return lock::unlock;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public AutoCloseableNoThrow tryLock(Object data) {
|
||||
while (true) {
|
||||
var newTag = new LockTag();
|
||||
var oldTag = _locks.putIfAbsent(data, newTag);
|
||||
if (oldTag == null) {
|
||||
return new Lock(data, newTag);
|
||||
}
|
||||
synchronized (oldTag) {
|
||||
if (!oldTag.released) {
|
||||
if (oldTag.owner == Thread.currentThread()) {
|
||||
return DUMMY_LOCK;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
var lock = getTag(data);
|
||||
if (lock.tryLock()) {
|
||||
return lock::unlock;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private static class LockTag {
|
||||
final Thread owner = Thread.currentThread();
|
||||
// final StackTraceElement[] _creationStack = Thread.currentThread().getStackTrace();
|
||||
boolean released = false;
|
||||
}
|
||||
|
||||
private class Lock implements AutoCloseableNoThrow {
|
||||
private static final Cleaner CLEANER = Cleaner.create();
|
||||
private final Object _key;
|
||||
private final LockTag _tag;
|
||||
|
||||
public Lock(Object key, LockTag tag) {
|
||||
_key = key;
|
||||
_tag = tag;
|
||||
// CLEANER.register(this, () -> {
|
||||
// if (!tag.released) {
|
||||
// Log.error("Lock collected without release: " + key);
|
||||
// }
|
||||
// });
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
synchronized (_tag) {
|
||||
if (_tag.released)
|
||||
return;
|
||||
_tag.released = true;
|
||||
// Notify all because when the object is locked again,
|
||||
// it's a different lock tag
|
||||
_tag.notifyAll();
|
||||
_locks.remove(_key, _tag);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -34,11 +34,9 @@ public class HashSetDelayedBlockingQueue<T> {
|
||||
synchronized (this) {
|
||||
if (_closed) throw new IllegalStateException("Adding to a queue that is closed!");
|
||||
|
||||
if (_set.containsKey(el))
|
||||
if (_set.putIfAbsent(el, new SetElement<>(el, System.currentTimeMillis())) != null)
|
||||
return false;
|
||||
|
||||
_set.put(el, new SetElement<>(el, System.currentTimeMillis()));
|
||||
|
||||
this.notify();
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,33 @@
|
||||
package com.usatiuk.utils;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.function.Function;
|
||||
|
||||
public class ListUtils {
|
||||
|
||||
public static <T, T_V> List<T_V> prependAndMap(T_V item, List<T> suffix, Function<T, T_V> suffixFn) {
|
||||
T_V[] arr = (T_V[]) new Object[suffix.size() + 1];
|
||||
arr[0] = item;
|
||||
for (int i = 0; i < suffix.size(); i++) {
|
||||
arr[i + 1] = suffixFn.apply(suffix.get(i));
|
||||
}
|
||||
return List.of(arr);
|
||||
}
|
||||
|
||||
public static <T> List<T> prepend(T item, List<T> suffix) {
|
||||
T[] arr = (T[]) new Object[suffix.size() + 1];
|
||||
arr[0] = item;
|
||||
for (int i = 0; i < suffix.size(); i++) {
|
||||
arr[i + 1] = suffix.get(i);
|
||||
}
|
||||
return List.of(arr);
|
||||
}
|
||||
|
||||
public static <T, T_V> List<T_V> map(List<T> suffix, Function<T, T_V> suffixFn) {
|
||||
T_V[] arr = (T_V[]) new Object[suffix.size()];
|
||||
for (int i = 0; i < suffix.size(); i++) {
|
||||
arr[i] = suffixFn.apply(suffix.get(i));
|
||||
}
|
||||
return List.of(arr);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,44 @@
|
||||
package com.usatiuk.utils;
|
||||
|
||||
import java.lang.foreign.*;
|
||||
import java.lang.invoke.MethodHandle;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
public class UninitializedByteBuffer {
|
||||
private static final Linker LINKER = Linker.nativeLinker();
|
||||
private static final MethodHandle malloc = LINKER.downcallHandle(
|
||||
LINKER.defaultLookup().find("malloc").orElseThrow(),
|
||||
FunctionDescriptor.of(ValueLayout.ADDRESS, ValueLayout.JAVA_LONG)
|
||||
);
|
||||
private static final MethodHandle free = LINKER.downcallHandle(
|
||||
LINKER.defaultLookup().find("free").orElseThrow(),
|
||||
FunctionDescriptor.ofVoid(ValueLayout.ADDRESS)
|
||||
);
|
||||
|
||||
public static ByteBuffer allocate(int capacity) {
|
||||
UnsafeAccessor.NIO.reserveMemory(capacity, capacity);
|
||||
|
||||
MemorySegment segment = null;
|
||||
try {
|
||||
segment = (MemorySegment) malloc.invokeExact((long) capacity);
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
Consumer<MemorySegment> cleanup = s -> {
|
||||
try {
|
||||
free.invokeExact(s);
|
||||
UnsafeAccessor.NIO.unreserveMemory(capacity, capacity);
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
};
|
||||
var reint = segment.reinterpret(capacity, Arena.ofAuto(), cleanup);
|
||||
return reint.asByteBuffer();
|
||||
}
|
||||
|
||||
public static long getAddress(ByteBuffer buffer) {
|
||||
return UnsafeAccessor.NIO.getBufferAddress(buffer);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
package com.usatiuk.utils;
|
||||
|
||||
import jdk.internal.access.JavaNioAccess;
|
||||
import jdk.internal.access.SharedSecrets;
|
||||
import sun.misc.Unsafe;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
|
||||
public abstract class UnsafeAccessor {
|
||||
public static final JavaNioAccess NIO;
|
||||
public static final Unsafe UNSAFE;
|
||||
|
||||
static {
|
||||
try {
|
||||
NIO = SharedSecrets.getJavaNioAccess();
|
||||
Field f = Unsafe.class.getDeclaredField("theUnsafe");
|
||||
f.setAccessible(true);
|
||||
UNSAFE = (Unsafe) f.get(null);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -25,6 +25,7 @@ echo "Extra options: $EXTRAOPTS_PARSED"
|
||||
|
||||
java \
|
||||
-Xmx512M \
|
||||
--enable-preview \
|
||||
-Ddhfs.objects.writeback.limit=134217728 \
|
||||
-Ddhfs.objects.lru.limit=134217728 \
|
||||
--add-exports java.base/sun.nio.ch=ALL-UNNAMED \
|
||||
@@ -37,7 +38,7 @@ java \
|
||||
-Dquarkus.log.category.\"com.usatiuk\".level=INFO \
|
||||
-Dquarkus.log.category.\"com.usatiuk.dhfs\".level=INFO \
|
||||
-Ddhfs.webui.root="$SCRIPT_DIR"/Webui $EXTRAOPTS_PARSED \
|
||||
-jar "$SCRIPT_DIR"/"DHFS Package"/quarkus-run.jar >quarkus.log 2>&1 &
|
||||
-jar "$SCRIPT_DIR"/"Server"/quarkus-run.jar >quarkus.log 2>&1 &
|
||||
|
||||
echo "Started $!"
|
||||
|
||||
|
||||
48
run-wrapper/run.ps1
Normal file
48
run-wrapper/run.ps1
Normal file
@@ -0,0 +1,48 @@
|
||||
$ErrorActionPreference = 'Stop'
|
||||
|
||||
$PIDFILE = Join-Path $PSScriptRoot ".pid"
|
||||
$EXTRAOPTS = Join-Path $PSScriptRoot "extra-opts"
|
||||
|
||||
if (-Not (Test-Path $EXTRAOPTS)) {
|
||||
New-Item -ItemType File -Path $EXTRAOPTS | Out-Null
|
||||
}
|
||||
|
||||
if (Test-Path $PIDFILE) {
|
||||
$ReadPID = Get-Content $PIDFILE
|
||||
if (Get-Process -Id $ReadPID -ErrorAction SilentlyContinue) {
|
||||
Write-Host "Already running: $ReadPID"
|
||||
exit 2
|
||||
}
|
||||
}
|
||||
|
||||
$ExtraOptsParsed = Get-Content $EXTRAOPTS | Where-Object {$_}
|
||||
|
||||
Write-Host "Extra options: $($ExtraOptsParsed -join ' ')"
|
||||
|
||||
$JAVA_OPTS = @(
|
||||
"-Xmx512M"
|
||||
"--enable-preview"
|
||||
"-Ddhfs.objects.writeback.limit=134217728"
|
||||
"-Ddhfs.objects.lru.limit=134217728"
|
||||
"--add-exports", "java.base/sun.nio.ch=ALL-UNNAMED"
|
||||
"--add-exports", "java.base/jdk.internal.access=ALL-UNNAMED"
|
||||
"--add-opens=java.base/java.nio=ALL-UNNAMED"
|
||||
"-Ddhfs.objects.persistence.files.root=$($PSScriptRoot)\..\data\objects"
|
||||
"-Ddhfs.objects.persistence.stuff.root=$($PSScriptRoot)\..\data\stuff"
|
||||
"-Ddhfs.objects.persistence.lmdb.size=1000000000"
|
||||
"-Ddhfs.fuse.root=Z:\"
|
||||
"-Dquarkus.http.host=0.0.0.0"
|
||||
'-Dquarkus.log.category.\"com.usatiuk\".level=INFO'
|
||||
'-Dquarkus.log.category.\"com.usatiuk.dhfs\".level=INFO'
|
||||
"-Ddhfs.webui.root=$($PSScriptRoot)\Webui"
|
||||
) + $ExtraOptsParsed + @(
|
||||
"-jar", "`"$PSScriptRoot\Server\quarkus-run.jar`""
|
||||
)
|
||||
|
||||
$Process = Start-Process -FilePath "java" -ArgumentList $JAVA_OPTS `
|
||||
-RedirectStandardOutput "$PSScriptRoot\quarkus.log" `
|
||||
-RedirectStandardError "$PSScriptRoot\quarkus.log.err" `
|
||||
-NoNewWindow -PassThru
|
||||
|
||||
Write-Host "Started $($Process.Id)"
|
||||
$Process.Id | Out-File -FilePath $PIDFILE
|
||||
24
run-wrapper/stop.ps1
Normal file
24
run-wrapper/stop.ps1
Normal file
@@ -0,0 +1,24 @@
|
||||
$ErrorActionPreference = 'Stop'
|
||||
|
||||
$PIDFILE = Join-Path $PSScriptRoot ".pid"
|
||||
|
||||
if (-Not (Test-Path $PIDFILE)) {
|
||||
Write-Host "Not running"
|
||||
exit 2
|
||||
}
|
||||
|
||||
$ReadPID = Get-Content $PIDFILE
|
||||
|
||||
if (-Not (Get-Process -Id $ReadPID -ErrorAction SilentlyContinue)) {
|
||||
Write-Host "Not running"
|
||||
Remove-Item $PIDFILE -Force
|
||||
exit 2
|
||||
}
|
||||
|
||||
Write-Host "Killing $ReadPID"
|
||||
|
||||
# TODO: Graceful shutdown
|
||||
|
||||
Stop-Process -Id $ReadPID
|
||||
|
||||
Remove-Item $PIDFILE -Force
|
||||
@@ -40,10 +40,10 @@ wget https://nightly.link/usatiuk/dhfs/actions/runs/$LATEST/Run%20wrapper.zip
|
||||
|
||||
unzip "Run wrapper.zip"
|
||||
rm "Run wrapper.zip"
|
||||
tar xvf "run-wrapper.tar.gz" --strip-components=2
|
||||
tar xvf "run-wrapper.tar.gz" --strip-components 2
|
||||
rm "run-wrapper.tar.gz"
|
||||
|
||||
rm -rf "DHFS Package"
|
||||
rm -rf "Server"
|
||||
rm -rf "Webui"
|
||||
rm -rf "NativeLibs"
|
||||
|
||||
|
||||
50
run-wrapper/update.ps1
Normal file
50
run-wrapper/update.ps1
Normal file
@@ -0,0 +1,50 @@
|
||||
$ErrorActionPreference = 'Stop'
|
||||
|
||||
$PIDFILE = Join-Path $PSScriptRoot ".pid"
|
||||
$VERSION_FILE = Join-Path $PSScriptRoot "version"
|
||||
|
||||
if (Test-Path $PIDFILE) {
|
||||
$ReadPID = Get-Content $PIDFILE
|
||||
if (Get-Process -Id $ReadPID -ErrorAction SilentlyContinue) {
|
||||
Write-Host "Already running: $ReadPID"
|
||||
exit 2
|
||||
}
|
||||
}
|
||||
|
||||
$response = Invoke-RestMethod -Uri "https://api.github.com/repos/usatiuk/dhfs/actions/runs?branch=main&status=completed&per_page=1"
|
||||
|
||||
$LATEST = $response.workflow_runs[0].id
|
||||
Write-Host "Latest: $LATEST"
|
||||
|
||||
$CUR = (Get-Content $VERSION_FILE -Raw).Trim()
|
||||
Write-Host "Current: $CUR"
|
||||
|
||||
if ([long]$CUR -ge [long]$LATEST) {
|
||||
Write-Host "Already latest!"
|
||||
exit 1
|
||||
}
|
||||
|
||||
Write-Host "Downloading..."
|
||||
|
||||
Set-Location $PSScriptRoot
|
||||
|
||||
$zipFile = "Run wrapper.zip"
|
||||
$tarFile = "run-wrapper.tar.gz"
|
||||
$dhfsDir = "dhfs"
|
||||
|
||||
Remove-Item $zipFile, $tarFile -Force -ErrorAction SilentlyContinue
|
||||
Remove-Item $dhfsDir -Recurse -Force -ErrorAction SilentlyContinue
|
||||
|
||||
Invoke-WebRequest -Uri "https://nightly.link/usatiuk/dhfs/actions/runs/$LATEST/Run%20wrapper.zip" -OutFile $zipFile
|
||||
|
||||
Expand-Archive -LiteralPath $zipFile -DestinationPath $PSScriptRoot
|
||||
Remove-Item $zipFile -Force
|
||||
|
||||
tar -xf $tarFile --strip-components=2
|
||||
Remove-Item $tarFile -Force
|
||||
|
||||
Remove-Item "Server", "Webui", "NativeLibs" -Recurse -Force -ErrorAction SilentlyContinue
|
||||
Move-Item "$dhfsDir\app\*" . -Force
|
||||
Remove-Item $dhfsDir -Recurse -Force
|
||||
|
||||
Write-Host "Update complete!"
|
||||
Reference in New Issue
Block a user