mirror of
https://github.com/usatiuk/dhfs.git
synced 2025-10-29 04:57:48 +01:00
Compare commits
1 Commits
b89b182c58
...
cursed-ite
| Author | SHA1 | Date | |
|---|---|---|---|
| 5cd0e5f045 |
26
.github/workflows/server.yml
vendored
26
.github/workflows/server.yml
vendored
@@ -20,21 +20,26 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
|
||||
- name: Install sudo for ACT
|
||||
run: apt-get update && apt-get install -y sudo
|
||||
if: env.ACT=='true'
|
||||
|
||||
- name: Install FUSE
|
||||
run: sudo apt-get update && sudo apt-get install -y libfuse2 libfuse3-dev libfuse3-3 fuse3
|
||||
- name: Install fuse and maven
|
||||
run: sudo apt-get update && sudo apt-get install -y libfuse2
|
||||
|
||||
- name: User allow other for fuse
|
||||
run: echo "user_allow_other" | sudo tee -a /etc/fuse.conf
|
||||
|
||||
- name: Dump fuse.conf
|
||||
run: cat /etc/fuse.conf
|
||||
- name: Download maven
|
||||
run: |
|
||||
cd "$HOME"
|
||||
mkdir maven-bin
|
||||
curl -s -L https://dlcdn.apache.org/maven/maven-3/3.9.9/binaries/apache-maven-3.9.9-bin.tar.gz | tar xvz --strip-components=1 -C maven-bin
|
||||
echo "$HOME"/maven-bin/bin >> $GITHUB_PATH
|
||||
|
||||
- name: Maven info
|
||||
run: |
|
||||
echo $GITHUB_PATH
|
||||
echo $PATH
|
||||
mvn -v
|
||||
|
||||
- name: Set up JDK 21
|
||||
uses: actions/setup-java@v4
|
||||
@@ -43,9 +48,6 @@ jobs:
|
||||
distribution: "zulu"
|
||||
cache: maven
|
||||
|
||||
- name: Build LazyFS
|
||||
run: cd thirdparty/lazyfs/ && ./build.sh
|
||||
|
||||
- name: Test with Maven
|
||||
run: cd dhfs-parent && mvn -T $(nproc) --batch-mode --update-snapshots package verify
|
||||
|
||||
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,3 +0,0 @@
|
||||
[submodule "thirdparty/lazyfs/lazyfs"]
|
||||
path = thirdparty/lazyfs/lazyfs
|
||||
url = git@github.com:dsrhaslab/lazyfs.git
|
||||
@@ -1,17 +1,16 @@
|
||||
<component name="ProjectRunConfigurationManager">
|
||||
<configuration default="false" name="Main 2" type="QsApplicationConfigurationType" factoryName="QuarkusApplication">
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsapp.Main"/>
|
||||
<module name="dhfs-app"/>
|
||||
<option name="VM_PARAMETERS"
|
||||
value="-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011"/>
|
||||
<extension name="coverage">
|
||||
<pattern>
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*"/>
|
||||
<option name="ENABLED" value="true"/>
|
||||
</pattern>
|
||||
</extension>
|
||||
<method v="2">
|
||||
<option name="Make" enabled="true"/>
|
||||
</method>
|
||||
</configuration>
|
||||
<configuration default="false" name="Main 2" type="QsApplicationConfigurationType" factoryName="QuarkusApplication">
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.app.Main" />
|
||||
<module name="dhfs-app" />
|
||||
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:8080:9011" />
|
||||
<extension name="coverage">
|
||||
<pattern>
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.*" />
|
||||
<option name="ENABLED" value="true" />
|
||||
</pattern>
|
||||
</extension>
|
||||
<method v="2">
|
||||
<option name="Make" enabled="true" />
|
||||
</method>
|
||||
</configuration>
|
||||
</component>
|
||||
@@ -1,18 +1,16 @@
|
||||
<component name="ProjectRunConfigurationManager">
|
||||
<configuration default="false" name="Main" type="QsApplicationConfigurationType" factoryName="QuarkusApplication"
|
||||
nameIsGenerated="true">
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsapp.Main"/>
|
||||
<module name="dhfs-app"/>
|
||||
<option name="VM_PARAMETERS"
|
||||
value="-XX:+UnlockDiagnosticVMOptions -XX:+UseParallelGC -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=true -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021 -Dquarkus.http.host=0.0.0.0"/>
|
||||
<extension name="coverage">
|
||||
<pattern>
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*"/>
|
||||
<option name="ENABLED" value="true"/>
|
||||
</pattern>
|
||||
</extension>
|
||||
<method v="2">
|
||||
<option name="Make" enabled="true"/>
|
||||
</method>
|
||||
</configuration>
|
||||
<configuration default="false" name="Main" type="QsApplicationConfigurationType" factoryName="QuarkusApplication" nameIsGenerated="true">
|
||||
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.app.Main" />
|
||||
<module name="dhfs-app" />
|
||||
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions --enable-preview -XX:+UseParallelGC -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=true -Dquarkus.http.port=8080 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021 -Dquarkus.http.host=0.0.0.0" />
|
||||
<extension name="coverage">
|
||||
<pattern>
|
||||
<option name="PATTERN" value="com.usatiuk.dhfs.*" />
|
||||
<option name="ENABLED" value="true" />
|
||||
</pattern>
|
||||
</extension>
|
||||
<method v="2">
|
||||
<option name="Make" enabled="true" />
|
||||
</method>
|
||||
</configuration>
|
||||
</component>
|
||||
@@ -13,11 +13,6 @@
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
<artifactId>junit-jupiter-params</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.testcontainers</groupId>
|
||||
<artifactId>testcontainers</artifactId>
|
||||
@@ -77,6 +72,26 @@
|
||||
<artifactId>quarkus-junit5</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.SerCeMan</groupId>
|
||||
<artifactId>jnr-fuse</artifactId>
|
||||
<version>44ed40f8ce</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-ffi</artifactId>
|
||||
<version>2.2.16</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-posix</artifactId>
|
||||
<version>3.1.19</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-constants</artifactId>
|
||||
<version>0.10.4</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
@@ -107,11 +122,31 @@
|
||||
<artifactId>commons-math3</artifactId>
|
||||
<version>3.6.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>kleppmanntree</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>objects</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>dhfs-fs</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>dhfs-fuse</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>sync-base</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>utils</artifactId>
|
||||
@@ -139,13 +174,16 @@
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-failsafe-plugin</artifactId>
|
||||
<configuration>
|
||||
<forkCount>1C</forkCount>
|
||||
<reuseForks>false</reuseForks>
|
||||
<parallel>classes</parallel>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.parallel.enabled>
|
||||
false
|
||||
true
|
||||
</junit.jupiter.execution.parallel.enabled>
|
||||
<junit.jupiter.execution.parallel.mode.default>
|
||||
concurrent
|
||||
</junit.jupiter.execution.parallel.mode.default>
|
||||
<junit.jupiter.execution.parallel.config.dynamic.factor>
|
||||
0.5
|
||||
</junit.jupiter.execution.parallel.config.dynamic.factor>
|
||||
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
|
||||
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
|
||||
</systemPropertyVariables>
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsapp;
|
||||
package com.usatiuk.dhfs.app;
|
||||
|
||||
import io.quarkus.runtime.Quarkus;
|
||||
import io.quarkus.runtime.QuarkusApplication;
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsfs;
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusTestProfile;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsapp;
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
@@ -18,18 +18,6 @@ public class TestDataCleaner {
|
||||
@ConfigProperty(name = "dhfs.objects.persistence.files.root")
|
||||
String tempDirectory;
|
||||
|
||||
public static void purgeDirectory(File dir) {
|
||||
try {
|
||||
for (File file : Objects.requireNonNull(dir.listFiles())) {
|
||||
if (file.isDirectory())
|
||||
purgeDirectory(file);
|
||||
file.delete();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
Log.error("Couldn't purge directory " + dir, e);
|
||||
}
|
||||
}
|
||||
|
||||
void init(@Observes @Priority(1) StartupEvent event) throws IOException {
|
||||
try {
|
||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||
@@ -41,4 +29,16 @@ public class TestDataCleaner {
|
||||
void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException {
|
||||
purgeDirectory(Path.of(tempDirectory).toFile());
|
||||
}
|
||||
|
||||
public static void purgeDirectory(File dir) {
|
||||
try {
|
||||
for (File file : Objects.requireNonNull(dir.listFiles())) {
|
||||
if (file.isDirectory())
|
||||
purgeDirectory(file);
|
||||
file.delete();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
Log.error("Couldn't purge directory " + dir, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsapp.integration;
|
||||
package com.usatiuk.dhfs.integration;
|
||||
|
||||
import com.github.dockerjava.api.model.Device;
|
||||
import io.quarkus.logging.Log;
|
||||
@@ -32,11 +32,9 @@ public class DhfsFuseIT {
|
||||
String c1uuid;
|
||||
String c2uuid;
|
||||
|
||||
Network network;
|
||||
|
||||
@BeforeEach
|
||||
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
|
||||
network = Network.newNetwork();
|
||||
Network network = Network.newNetwork();
|
||||
container1 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
|
||||
@@ -67,39 +65,22 @@ public class DhfsFuseIT {
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
private void checkConsistency() {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
Log.info("Listing consistency");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*/*");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*/*");
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/*/*");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/*/*");
|
||||
Log.info(ls1);
|
||||
Log.info(cat1);
|
||||
Log.info(ls2);
|
||||
Log.info(cat2);
|
||||
|
||||
return ls1.equals(ls2) && cat1.equals(cat2);
|
||||
});
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
void stop() {
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
|
||||
network.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -264,8 +245,8 @@ public class DhfsFuseIT {
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request DELETE " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /dhfs_test/fuse/testf1").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo jioadsd > /dhfs_test/fuse/newfile1").getExitCode());
|
||||
@@ -280,8 +261,8 @@ public class DhfsFuseIT {
|
||||
container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
@@ -325,33 +306,6 @@ public class DhfsFuseIT {
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void dirConflictTest2() throws IOException, InterruptedException, TimeoutException {
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.disconnectFromNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
client.disconnectFromNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 1);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 1);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/a && echo fdsaio >> /dhfs_test/fuse/a/testf").getExitCode());
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/a && echo exgrg >> /dhfs_test/fuse/a/testf").getExitCode());
|
||||
|
||||
client.connectToNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
|
||||
Log.warn("Waiting for connections");
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 1);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 1);
|
||||
Log.warn("Connected");
|
||||
|
||||
checkConsistency();
|
||||
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/a*/*");
|
||||
Assertions.assertTrue(ls1.getStdout().contains("fdsaio"));
|
||||
Assertions.assertTrue(ls1.getStdout().contains("exgrg"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void dirCycleTest() throws IOException, InterruptedException, TimeoutException {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsapp.integration;
|
||||
package com.usatiuk.dhfs.integration;
|
||||
|
||||
import com.github.dockerjava.api.model.Device;
|
||||
import io.quarkus.logging.Log;
|
||||
@@ -35,15 +35,13 @@ public class DhfsFusex3IT {
|
||||
String c2uuid;
|
||||
String c3uuid;
|
||||
|
||||
Network network;
|
||||
|
||||
// This calculation is somewhat racy, so keep it hardcoded for now
|
||||
long emptyFileCount = 9;
|
||||
|
||||
@BeforeEach
|
||||
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
|
||||
// TODO: Dedup
|
||||
network = Network.newNetwork();
|
||||
Network network = Network.newNetwork();
|
||||
|
||||
container1 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
@@ -93,26 +91,26 @@ public class DhfsFusex3IT {
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
var c2curl1 = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
var c2curl3 = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c3uuid);
|
||||
" --data '{\"uuid\":\"" + c3uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
var c3curl = container3.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
|
||||
@@ -133,7 +131,6 @@ public class DhfsFusex3IT {
|
||||
@AfterEach
|
||||
void stop() {
|
||||
Stream.of(container1, container2, container3).parallel().forEach(GenericContainer::stop);
|
||||
network.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -193,8 +190,8 @@ public class DhfsFusex3IT {
|
||||
var c3curl = container3.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request DELETE " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
Thread.sleep(10000);
|
||||
|
||||
@@ -255,22 +252,21 @@ public class DhfsFusex3IT {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf").getStdout()));
|
||||
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.disconnectFromNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
client.disconnectFromNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
client.disconnectFromNetworkCmd().withContainerId(container3.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
|
||||
client.pauseContainerCmd(container1.getContainerId()).exec();
|
||||
client.pauseContainerCmd(container2.getContainerId()).exec();
|
||||
// Pauses needed as otherwise docker buffers some incoming packets
|
||||
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
|
||||
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /dhfs_test/fuse/testf").getExitCode());
|
||||
client.pauseContainerCmd(container3.getContainerId()).exec();
|
||||
client.unpauseContainerCmd(container2.getContainerId()).exec();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /dhfs_test/fuse/testf").getExitCode());
|
||||
client.pauseContainerCmd(container2.getContainerId()).exec();
|
||||
client.unpauseContainerCmd(container1.getContainerId()).exec();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /dhfs_test/fuse/testf").getExitCode());
|
||||
|
||||
client.connectToNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
client.connectToNetworkCmd().withContainerId(container3.getContainerId()).withNetworkId(network.getId()).exec();
|
||||
|
||||
client.unpauseContainerCmd(container2.getContainerId()).exec();
|
||||
client.unpauseContainerCmd(container3.getContainerId()).exec();
|
||||
Log.warn("Waiting for connections");
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsapp.integration;
|
||||
package com.usatiuk.dhfs.integration;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
@@ -66,7 +66,7 @@ public class DhfsImage implements Future<String> {
|
||||
.run("apt update && apt install -y libfuse2 curl gcc")
|
||||
.copy("/app", "/app")
|
||||
.copy("/libs", "/libs")
|
||||
.cmd("java", "-ea", "-Xmx256M", "-XX:TieredStopAtLevel=1", "-XX:+UseParallelGC",
|
||||
.cmd("java", "-ea", "-Xmx128M",
|
||||
"--add-exports", "java.base/sun.nio.ch=ALL-UNNAMED",
|
||||
"--add-exports", "java.base/jdk.internal.access=ALL-UNNAMED",
|
||||
"--add-opens=java.base/java.nio=ALL-UNNAMED",
|
||||
@@ -75,13 +75,12 @@ public class DhfsImage implements Future<String> {
|
||||
"-Ddhfs.objects.deletion.delay=0",
|
||||
"-Ddhfs.objects.deletion.can-delete-retry-delay=1000",
|
||||
"-Ddhfs.objects.ref_verification=true",
|
||||
"-Ddhfs.objects.sync.timeout=30",
|
||||
"-Ddhfs.objects.write_log=true",
|
||||
"-Ddhfs.objects.sync.timeout=10",
|
||||
"-Ddhfs.objects.sync.ping.timeout=5",
|
||||
"-Ddhfs.objects.reconnect_interval=1s",
|
||||
"-Ddhfs.sync.cert-check=false",
|
||||
"-Dquarkus.log.category.\"com.usatiuk\".level=TRACE",
|
||||
"-Dquarkus.log.category.\"com.usatiuk.dhfs\".level=TRACE",
|
||||
"-Dquarkus.log.category.\"com.usatiuk.objects.transaction\".level=INFO",
|
||||
"-Ddhfs.objects.periodic-push-op-interval=5s",
|
||||
"-Ddhfs.fuse.root=/dhfs_test/fuse",
|
||||
"-Ddhfs.objects.persistence.files.root=/dhfs_test/data",
|
||||
@@ -1,7 +1,7 @@
|
||||
package com.usatiuk.dhfsapp.integration;
|
||||
package com.usatiuk.dhfs.integration;
|
||||
|
||||
import com.github.dockerjava.api.model.Device;
|
||||
import com.usatiuk.dhfsapp.TestDataCleaner;
|
||||
import com.usatiuk.dhfs.TestDataCleaner;
|
||||
import io.quarkus.logging.Log;
|
||||
import org.junit.jupiter.api.*;
|
||||
import org.slf4j.LoggerFactory;
|
||||
@@ -18,7 +18,10 @@ import java.nio.file.Files;
|
||||
import java.time.Duration;
|
||||
import java.util.Objects;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.concurrent.CyclicBarrier;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.awaitility.Awaitility.await;
|
||||
@@ -36,18 +39,12 @@ public class KillIT {
|
||||
File data1;
|
||||
File data2;
|
||||
|
||||
Network network;
|
||||
|
||||
ExecutorService executor;
|
||||
|
||||
@BeforeEach
|
||||
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
|
||||
executor = Executors.newCachedThreadPool();
|
||||
|
||||
data1 = Files.createTempDirectory("").toFile();
|
||||
data2 = Files.createTempDirectory("").toFile();
|
||||
|
||||
network = Network.newNetwork();
|
||||
Network network = Network.newNetwork();
|
||||
|
||||
container1 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
@@ -81,14 +78,14 @@ public class KillIT {
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
@@ -99,28 +96,11 @@ public class KillIT {
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
|
||||
TestDataCleaner.purgeDirectory(data1);
|
||||
TestDataCleaner.purgeDirectory(data2);
|
||||
executor.close();
|
||||
network.close();
|
||||
}
|
||||
|
||||
private void checkConsistency() {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
Log.info("Listing consistency");
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
Log.info(ls1);
|
||||
Log.info(cat1);
|
||||
Log.info(ls2);
|
||||
Log.info(cat2);
|
||||
|
||||
return ls1.equals(ls2) && cat1.equals(cat2);
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void killTest(TestInfo testInfo) throws Exception {
|
||||
var executor = Executors.newFixedThreadPool(2);
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var ret1 = executor.submit(() -> {
|
||||
try {
|
||||
@@ -144,11 +124,24 @@ public class KillIT {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency();
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
Log.info("Listing consistency");
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
Log.info(ls1);
|
||||
Log.info(cat1);
|
||||
Log.info(ls2);
|
||||
Log.info(cat2);
|
||||
|
||||
return ls1.equals(ls2) && cat1.equals(cat2);
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void killTestDirs(TestInfo testInfo) throws Exception {
|
||||
var executor = Executors.newFixedThreadPool(2);
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var ret1 = executor.submit(() -> {
|
||||
try {
|
||||
@@ -172,64 +165,18 @@ public class KillIT {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency();
|
||||
}
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
Log.info("Listing consistency");
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
Log.info(ls1);
|
||||
Log.info(cat1);
|
||||
Log.info(ls2);
|
||||
Log.info(cat2);
|
||||
|
||||
@Test
|
||||
void killTest2(TestInfo testInfo) throws Exception {
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var ret1 = executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.await();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
return ls1.equals(ls2) && cat1.equals(cat2);
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(10000);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting");
|
||||
container2.start();
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency();
|
||||
}
|
||||
|
||||
@Test
|
||||
void killTestDirs2(TestInfo testInfo) throws Exception {
|
||||
var barrier = new CyclicBarrier(2);
|
||||
var ret1 = executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.await();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(10000);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting");
|
||||
container2.start();
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency();
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsapp.integration;
|
||||
package com.usatiuk.dhfs.integration;
|
||||
|
||||
import com.github.dockerjava.api.model.Device;
|
||||
import org.junit.jupiter.api.*;
|
||||
@@ -29,11 +29,9 @@ public class ResyncIT {
|
||||
String c1uuid;
|
||||
String c2uuid;
|
||||
|
||||
Network network;
|
||||
|
||||
@BeforeEach
|
||||
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
|
||||
network = Network.newNetwork();
|
||||
Network network = Network.newNetwork();
|
||||
|
||||
container1 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
@@ -57,7 +55,6 @@ public class ResyncIT {
|
||||
@AfterEach
|
||||
void stop() {
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
|
||||
network.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -75,14 +72,14 @@ public class ResyncIT {
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
@@ -115,14 +112,14 @@ public class ResyncIT {
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
@@ -155,14 +152,14 @@ public class ResyncIT {
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers");
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
@@ -1,215 +0,0 @@
|
||||
package com.usatiuk.dhfsapp.integration;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
|
||||
import java.io.*;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class LazyFs {
|
||||
private static final String lazyFsPath;
|
||||
|
||||
static {
|
||||
lazyFsPath = System.getProperty("lazyFsPath");
|
||||
System.out.println("LazyFs Path: " + lazyFsPath);
|
||||
}
|
||||
|
||||
private final String mountRoot;
|
||||
private final String dataRoot;
|
||||
private final String name;
|
||||
private final File configFile;
|
||||
private final File fifoFile;
|
||||
private Thread errPiper;
|
||||
private Thread outPiper;
|
||||
private CountDownLatch startLatch;
|
||||
private Process fs;
|
||||
public LazyFs(String name, String mountRoot, String dataRoot) {
|
||||
this.name = name;
|
||||
this.mountRoot = mountRoot;
|
||||
this.dataRoot = dataRoot;
|
||||
|
||||
try {
|
||||
configFile = File.createTempFile("lazyfs", ".conf");
|
||||
configFile.deleteOnExit();
|
||||
|
||||
fifoFile = new File("/tmp/" + ThreadLocalRandom.current().nextLong() + ".faultsfifo");
|
||||
fifoFile.deleteOnExit();
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
Runtime.getRuntime().addShutdownHook(new Thread(this::stop));
|
||||
}
|
||||
|
||||
private String fifoPath() {
|
||||
return fifoFile.getAbsolutePath();
|
||||
}
|
||||
|
||||
public void start(String extraOpts) {
|
||||
var lfsPath = Path.of(lazyFsPath).resolve("build").resolve("lazyfs");
|
||||
if (!lfsPath.toFile().isFile())
|
||||
throw new IllegalStateException("LazyFs binary does not exist: " + lfsPath.toAbsolutePath());
|
||||
if (!lfsPath.toFile().canExecute())
|
||||
throw new IllegalStateException("LazyFs binary is not executable: " + lfsPath.toAbsolutePath());
|
||||
|
||||
try (var rwFile = new RandomAccessFile(configFile, "rw");
|
||||
var channel = rwFile.getChannel()) {
|
||||
channel.truncate(0);
|
||||
var config = "[faults]\n" +
|
||||
"fifo_path=\"" + fifoPath() + "\"\n" +
|
||||
"[cache]\n" +
|
||||
"apply_eviction=false\n" +
|
||||
"[cache.simple]\n" +
|
||||
"custom_size=\"1gb\"\n" +
|
||||
"blocks_per_page=1\n" +
|
||||
"[filesystem]\n" +
|
||||
"log_all_operations=false\n" +
|
||||
"logfile=\"\"\n" + extraOpts;
|
||||
rwFile.write(config.getBytes());
|
||||
Log.info("LazyFs config: \n" + config);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
var argList = new ArrayList<String>();
|
||||
|
||||
argList.add(lfsPath.toString());
|
||||
argList.add(Path.of(mountRoot).toString());
|
||||
argList.add("--config-path");
|
||||
argList.add(configFile.getAbsolutePath());
|
||||
argList.add("-o");
|
||||
argList.add("allow_other");
|
||||
argList.add("-o");
|
||||
argList.add("modules=subdir");
|
||||
argList.add("-o");
|
||||
argList.add("subdir=" + Path.of(dataRoot).toAbsolutePath().toString());
|
||||
try {
|
||||
Log.info("Starting LazyFs " + argList);
|
||||
fs = Runtime.getRuntime().exec(argList.toArray(String[]::new));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
startLatch = new CountDownLatch(1);
|
||||
|
||||
outPiper = new Thread(() -> {
|
||||
try {
|
||||
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.getInputStream()))) {
|
||||
String line;
|
||||
|
||||
while ((line = input.readLine()) != null) {
|
||||
if (line.contains("running LazyFS"))
|
||||
startLatch.countDown();
|
||||
System.out.println(line);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
Log.info("Exception in LazyFs piper", e);
|
||||
}
|
||||
Log.info("LazyFs out piper finished");
|
||||
});
|
||||
outPiper.start();
|
||||
errPiper = new Thread(() -> {
|
||||
try {
|
||||
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.getErrorStream()))) {
|
||||
String line;
|
||||
|
||||
while ((line = input.readLine()) != null) {
|
||||
System.out.println(line);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
Log.info("Exception in LazyFs piper", e);
|
||||
}
|
||||
Log.info("LazyFs err piper finished");
|
||||
});
|
||||
errPiper.start();
|
||||
|
||||
try {
|
||||
if (!startLatch.await(30, TimeUnit.SECONDS))
|
||||
throw new RuntimeException("StartLatch timed out");
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
Log.info("LazyFs started");
|
||||
}
|
||||
|
||||
public void start() {
|
||||
start("");
|
||||
}
|
||||
|
||||
private String mdbPath() {
|
||||
return Path.of(dataRoot).resolve("objects").resolve("data.mdb").toAbsolutePath().toString();
|
||||
}
|
||||
|
||||
public void startTornOp() {
|
||||
start("\n" +
|
||||
"[[injection]]\n" +
|
||||
"type=\"torn-seq\"\n" +
|
||||
"op=\"write\"\n" +
|
||||
"file=\"" + mdbPath() + "\"\n" +
|
||||
"persist=[1,4]\n" +
|
||||
"occurrence=3");
|
||||
}
|
||||
|
||||
public void startTornSeq() {
|
||||
start("[[injection]]\n" +
|
||||
"type=\"torn-op\"\n" +
|
||||
"file=\"" + mdbPath() + "\"\n" +
|
||||
"occurrence=3\n" +
|
||||
"parts=3 #or parts_bytes=[4096,3600,1260]\n" +
|
||||
"persist=[1,3]");
|
||||
}
|
||||
|
||||
public void crash() {
|
||||
try {
|
||||
var cmd = "echo \"lazyfs::crash::timing=after::op=write::from_rgx=*\" > " + fifoPath();
|
||||
Log.info("Running command: " + cmd);
|
||||
Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd}).waitFor();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public void stop() {
|
||||
try {
|
||||
synchronized (this) {
|
||||
Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + mountRoot}).waitFor();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
// Doesn't actually work?
|
||||
//
|
||||
// public void crashop() {
|
||||
// try {
|
||||
// var cmd = "echo \"lazyfs::torn-op::file=" + Path.of(lazyFsDataPath).toAbsolutePath().toString() + "/objects/data.mdb::persist=1,3::parts=3::occurrence=5\" > /tmp/faults.fifo";
|
||||
// System.out.println("Running command: " + cmd);
|
||||
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd});
|
||||
// Thread.sleep(1000);
|
||||
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + dataRoot});
|
||||
// Thread.sleep(1000);
|
||||
// } catch (Exception e) {
|
||||
// throw new RuntimeException(e);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// public void crashseq() {
|
||||
// try {
|
||||
// var cmd = "echo \"lazyfs::torn-seq::op=write::file=" + Path.of(lazyFsDataPath).toAbsolutePath().toString() + "/objects/data.mdb::persist=1,4::occurrence=2\" > /tmp/faults.fifo";
|
||||
// System.out.println("Running command: " + cmd);
|
||||
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd});
|
||||
// Thread.sleep(1000);
|
||||
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + dataRoot});
|
||||
// Thread.sleep(1000);
|
||||
// } catch (Exception e) {
|
||||
// throw new RuntimeException(e);
|
||||
// }
|
||||
// }
|
||||
}
|
||||
|
||||
@@ -1,489 +0,0 @@
|
||||
package com.usatiuk.dhfsapp.integration;
|
||||
|
||||
import com.github.dockerjava.api.model.Device;
|
||||
import com.usatiuk.dhfsapp.TestDataCleaner;
|
||||
import io.quarkus.logging.Log;
|
||||
import org.junit.jupiter.api.*;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
import org.junit.jupiter.params.provider.EnumSource;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.testcontainers.DockerClientFactory;
|
||||
import org.testcontainers.containers.GenericContainer;
|
||||
import org.testcontainers.containers.Network;
|
||||
import org.testcontainers.containers.output.Slf4jLogConsumer;
|
||||
import org.testcontainers.containers.output.WaitingConsumer;
|
||||
import org.testcontainers.containers.wait.strategy.Wait;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.time.Duration;
|
||||
import java.util.Objects;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.awaitility.Awaitility.await;
|
||||
|
||||
public class LazyFsIT {
|
||||
GenericContainer<?> container1;
|
||||
GenericContainer<?> container2;
|
||||
|
||||
WaitingConsumer waitingConsumer1;
|
||||
WaitingConsumer waitingConsumer2;
|
||||
|
||||
String c1uuid;
|
||||
String c2uuid;
|
||||
|
||||
File data1;
|
||||
File data2;
|
||||
File data1Lazy;
|
||||
File data2Lazy;
|
||||
|
||||
LazyFs lazyFs1;
|
||||
LazyFs lazyFs2;
|
||||
|
||||
ExecutorService executor;
|
||||
Network network;
|
||||
|
||||
@BeforeEach
|
||||
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
|
||||
executor = Executors.newCachedThreadPool();
|
||||
data1 = Files.createTempDirectory("dhfsdata").toFile();
|
||||
data2 = Files.createTempDirectory("dhfsdata").toFile();
|
||||
data1Lazy = Files.createTempDirectory("lazyfsroot").toFile();
|
||||
data2Lazy = Files.createTempDirectory("lazyfsroot").toFile();
|
||||
|
||||
network = Network.newNetwork();
|
||||
|
||||
lazyFs1 = new LazyFs(testInfo.getDisplayName(), data1.toString(), data1Lazy.toString());
|
||||
lazyFs1.start();
|
||||
lazyFs2 = new LazyFs(testInfo.getDisplayName(), data2.toString(), data2Lazy.toString());
|
||||
lazyFs2.start();
|
||||
|
||||
container1 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
|
||||
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
|
||||
.withFileSystemBind(data1.getAbsolutePath(), "/dhfs_test/data");
|
||||
container2 = new GenericContainer<>(DhfsImage.getInstance())
|
||||
.withPrivilegedMode(true)
|
||||
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
|
||||
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
|
||||
.withFileSystemBind(data2.getAbsolutePath(), "/dhfs_test/data");
|
||||
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::start);
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
|
||||
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
|
||||
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||
|
||||
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
|
||||
|
||||
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||
"curl --header \"Content-Type: application/json\" " +
|
||||
" --request PUT " +
|
||||
" --data '{}' " +
|
||||
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
|
||||
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
void stop() {
|
||||
lazyFs1.stop();
|
||||
lazyFs2.stop();
|
||||
|
||||
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
|
||||
TestDataCleaner.purgeDirectory(data1);
|
||||
TestDataCleaner.purgeDirectory(data1Lazy);
|
||||
TestDataCleaner.purgeDirectory(data2);
|
||||
TestDataCleaner.purgeDirectory(data2Lazy);
|
||||
|
||||
executor.close();
|
||||
network.close();
|
||||
}
|
||||
|
||||
private void checkConsistency(String testName) {
|
||||
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||
Log.info("Listing consistency " + testName + "\n"
|
||||
+ ls1 + "\n"
|
||||
+ cat1 + "\n"
|
||||
+ ls2 + "\n"
|
||||
+ cat2 + "\n");
|
||||
|
||||
return ls1.equals(ls2) && cat1.equals(cat2);
|
||||
});
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(CrashType.class)
|
||||
void killTest(CrashType crashType, TestInfo testInfo) throws Exception {
|
||||
var barrier = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
lazyFs1.crash();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
lazyFs1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
switch (crashType) {
|
||||
case CRASH -> lazyFs1.start();
|
||||
case TORN_OP -> lazyFs1.startTornOp();
|
||||
case TORN_SEQ -> lazyFs1.startTornSeq();
|
||||
}
|
||||
container1.start();
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
Log.info("Failed to connect: " + testInfo.getDisplayName());
|
||||
// Sometimes it doesn't get mounted properly for some reason
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test2; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
Log.info("Killing");
|
||||
if (crashType.equals(CrashType.CRASH)) {
|
||||
Thread.sleep(3000);
|
||||
lazyFs1.crash();
|
||||
}
|
||||
try {
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
// Sometimes crash doesn't work
|
||||
Log.info("Failed to crash: " + testInfo.getDisplayName());
|
||||
if (crashType.equals(CrashType.CRASH))
|
||||
throw e;
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
lazyFs1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
lazyFs1.start();
|
||||
container1.start();
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency(testInfo.getDisplayName());
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(CrashType.class)
|
||||
void killTestDirs(CrashType crashType, TestInfo testInfo) throws Exception {
|
||||
var barrier = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
lazyFs1.crash();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
lazyFs1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
switch (crashType) {
|
||||
case CRASH -> lazyFs1.start();
|
||||
case TORN_OP -> lazyFs1.startTornOp();
|
||||
case TORN_SEQ -> lazyFs1.startTornSeq();
|
||||
}
|
||||
container1.start();
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
Log.info("Failed to connect: " + testInfo.getDisplayName());
|
||||
// Sometimes it doesn't get mounted properly for some reason
|
||||
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/2test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
Log.info("Killing");
|
||||
if (crashType.equals(CrashType.CRASH)) {
|
||||
Thread.sleep(3000);
|
||||
lazyFs1.crash();
|
||||
}
|
||||
try {
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
// Sometimes crash doesn't work
|
||||
Log.info("Failed to crash: " + testInfo.getDisplayName());
|
||||
if (crashType.equals(CrashType.CRASH))
|
||||
throw e;
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
client.killContainerCmd(container1.getContainerId()).exec();
|
||||
container1.stop();
|
||||
lazyFs1.stop();
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
lazyFs1.start();
|
||||
container1.start();
|
||||
|
||||
waitingConsumer1 = new WaitingConsumer();
|
||||
loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency(testInfo.getDisplayName());
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(CrashType.class)
|
||||
void killTest2(CrashType crashType, TestInfo testInfo) throws Exception {
|
||||
var barrier = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting1 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
lazyFs2.crash();
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting1");
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
lazyFs2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
switch (crashType) {
|
||||
case CRASH -> lazyFs2.start();
|
||||
case TORN_OP -> lazyFs2.startTornOp();
|
||||
case TORN_SEQ -> lazyFs2.startTornSeq();
|
||||
}
|
||||
container2.start();
|
||||
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
Log.info("Failed to connect: " + testInfo.getDisplayName());
|
||||
// Sometimes it doesn't get mounted properly for some reason
|
||||
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
var barrier2 = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier2.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting2 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test2; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier2.await();
|
||||
Log.info("Killing");
|
||||
Thread.sleep(3000);
|
||||
if (crashType.equals(CrashType.CRASH)) {
|
||||
lazyFs2.crash();
|
||||
}
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting2");
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
// Sometimes crash doesn't work
|
||||
Log.info("Failed to crash: " + testInfo.getDisplayName());
|
||||
if (crashType.equals(CrashType.CRASH))
|
||||
throw e;
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
lazyFs2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
lazyFs2.start();
|
||||
container2.start();
|
||||
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency(testInfo.getDisplayName());
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(CrashType.class)
|
||||
void killTestDirs2(CrashType crashType, TestInfo testInfo) throws Exception {
|
||||
var barrier = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting1 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier.await();
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
lazyFs2.crash();
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting1");
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
var client = DockerClientFactory.instance().client();
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
lazyFs2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
switch (crashType) {
|
||||
case CRASH -> lazyFs2.start();
|
||||
case TORN_OP -> lazyFs2.startTornOp();
|
||||
case TORN_SEQ -> lazyFs2.startTornSeq();
|
||||
}
|
||||
container2.start();
|
||||
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
Log.info("Failed to connect: " + testInfo.getDisplayName());
|
||||
// Sometimes it doesn't get mounted properly for some reason
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
|
||||
var barrier2 = new CountDownLatch(1);
|
||||
executor.submit(() -> {
|
||||
try {
|
||||
Log.info("Writing to container 1");
|
||||
barrier2.countDown();
|
||||
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting2 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/2test$counter; done");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
barrier2.await();
|
||||
Thread.sleep(3000);
|
||||
Log.info("Killing");
|
||||
if (crashType.equals(CrashType.CRASH)) {
|
||||
lazyFs2.crash();
|
||||
}
|
||||
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting2");
|
||||
try {
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
|
||||
} catch (TimeoutException e) {
|
||||
// Sometimes crash doesn't work
|
||||
Log.info("Failed to crash: " + testInfo.getDisplayName());
|
||||
if (crashType.equals(CrashType.CRASH))
|
||||
throw e;
|
||||
Assumptions.assumeTrue(false);
|
||||
}
|
||||
client.killContainerCmd(container2.getContainerId()).exec();
|
||||
container2.stop();
|
||||
lazyFs2.stop();
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||
Log.info("Restart");
|
||||
lazyFs2.start();
|
||||
container2.start();
|
||||
|
||||
waitingConsumer2 = new WaitingConsumer();
|
||||
loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||
|
||||
checkConsistency(testInfo.getDisplayName());
|
||||
}
|
||||
|
||||
|
||||
private static enum CrashType {
|
||||
CRASH,
|
||||
TORN_OP,
|
||||
TORN_SEQ
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
@@ -72,6 +72,26 @@
|
||||
<artifactId>quarkus-junit5</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.SerCeMan</groupId>
|
||||
<artifactId>jnr-fuse</artifactId>
|
||||
<version>44ed40f8ce</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-ffi</artifactId>
|
||||
<version>2.2.16</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-posix</artifactId>
|
||||
<version>3.1.19</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-constants</artifactId>
|
||||
<version>0.10.4</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
@@ -102,11 +122,26 @@
|
||||
<artifactId>commons-math3</artifactId>
|
||||
<version>3.6.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>kleppmanntree</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>objects</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>sync-base</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>utils</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemote;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
|
||||
import com.usatiuk.dhfs.JDataRemote;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.repository.JDataRemoteDto;
|
||||
|
||||
public record ChunkData(JObjectKey key, ByteString data) implements JDataRemote, JDataRemoteDto {
|
||||
@Override
|
||||
@@ -1,9 +1,9 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.dhfs.ProtoSerializer;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.persistence.ChunkDataP;
|
||||
import com.usatiuk.dhfs.persistence.JObjectKeyP;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
@Singleton
|
||||
@@ -1,10 +1,10 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.dhfs.JDataRemote;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.jmap.JMapHolder;
|
||||
import com.usatiuk.dhfs.jmap.JMapLongKey;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemote;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.repository.JDataRemoteDto;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Set;
|
||||
@@ -28,10 +28,6 @@ public record File(JObjectKey key, long mode, long cTime, long mTime,
|
||||
return new File(key, mode, cTime, mTime, symlink);
|
||||
}
|
||||
|
||||
public File withCurrentMTime() {
|
||||
return new File(key, mode, cTime, System.currentTimeMillis(), symlink);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<JObjectKey> collectRefsTo() {
|
||||
return Set.of();
|
||||
@@ -1,8 +1,8 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemote;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
|
||||
import com.usatiuk.dhfs.JDataRemote;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.repository.JDataRemoteDto;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.List;
|
||||
@@ -1,7 +1,7 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.dhfs.jmap.JMapHelper;
|
||||
import com.usatiuk.dhfs.syncmap.DtoMapper;
|
||||
import com.usatiuk.dhfs.repository.syncmap.DtoMapper;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.jmap.JMapHelper;
|
||||
import com.usatiuk.dhfs.jmap.JMapLongKey;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
@@ -1,8 +1,8 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.dhfs.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.persistence.FileDtoP;
|
||||
import com.usatiuk.utils.SerializationHelper;
|
||||
import com.usatiuk.dhfs.utils.SerializationHelper;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
import java.io.IOException;
|
||||
@@ -1,15 +1,20 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.dhfs.PeerId;
|
||||
import com.usatiuk.dhfs.RemoteObjectDataWrapper;
|
||||
import com.usatiuk.dhfs.RemoteObjectMeta;
|
||||
import com.usatiuk.dhfs.RemoteTransaction;
|
||||
import com.usatiuk.dhfs.files.service.DhfsFileService;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile;
|
||||
import com.usatiuk.dhfs.jmap.JMapHelper;
|
||||
import com.usatiuk.dhfs.peersync.PeerId;
|
||||
import com.usatiuk.dhfs.peersync.PersistentPeerDataService;
|
||||
import com.usatiuk.dhfs.remoteobj.*;
|
||||
import com.usatiuk.dhfsfs.service.DhfsFileService;
|
||||
import com.usatiuk.kleppmanntree.AlreadyExistsException;
|
||||
import com.usatiuk.dhfs.repository.ObjSyncHandler;
|
||||
import com.usatiuk.dhfs.repository.PersistentPeerDataService;
|
||||
import com.usatiuk.dhfs.repository.SyncHelper;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.transaction.LockingStrategy;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import com.usatiuk.kleppmanntree.AlreadyExistsException;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
@@ -43,11 +48,11 @@ public class FileSyncHandler implements ObjSyncHandler<File, FileDto> {
|
||||
DhfsFileService fileService;
|
||||
|
||||
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs")).orElseThrow();
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"));
|
||||
}
|
||||
|
||||
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC).orElseThrow();
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC);
|
||||
}
|
||||
|
||||
private void resolveConflict(PeerId from, JObjectKey key, PMap<PeerId, Long> receivedChangelog,
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsfs.service;
|
||||
package com.usatiuk.dhfs.files.service;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
@@ -30,7 +30,7 @@ public interface DhfsFileService {
|
||||
|
||||
long size(JObjectKey fileUuid);
|
||||
|
||||
ByteString read(JObjectKey fileUuid, long offset, int length);
|
||||
Optional<ByteString> read(JObjectKey fileUuid, long offset, int length);
|
||||
|
||||
Long write(JObjectKey fileUuid, long offset, ByteString data);
|
||||
|
||||
@@ -1,28 +1,27 @@
|
||||
package com.usatiuk.dhfsfs.service;
|
||||
package com.usatiuk.dhfs.files.service;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import com.usatiuk.dhfs.JDataRemote;
|
||||
import com.usatiuk.dhfs.RemoteObjectMeta;
|
||||
import com.usatiuk.dhfs.RemoteTransaction;
|
||||
import com.usatiuk.dhfs.files.objects.ChunkData;
|
||||
import com.usatiuk.dhfs.files.objects.File;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNode;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeHolder;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMetaDirectory;
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile;
|
||||
import com.usatiuk.dhfs.jmap.JMapEntry;
|
||||
import com.usatiuk.dhfs.jmap.JMapHelper;
|
||||
import com.usatiuk.dhfs.jmap.JMapLongKey;
|
||||
import com.usatiuk.dhfs.remoteobj.JDataRemote;
|
||||
import com.usatiuk.dhfs.remoteobj.RemoteObjectMeta;
|
||||
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
|
||||
import com.usatiuk.dhfsfs.objects.ChunkData;
|
||||
import com.usatiuk.dhfsfs.objects.File;
|
||||
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaDirectory;
|
||||
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaFile;
|
||||
import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace;
|
||||
import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.transaction.LockingStrategy;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import com.usatiuk.objects.transaction.TransactionManager;
|
||||
import com.usatiuk.utils.StatusRuntimeExceptionNoStacktrace;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
@@ -74,11 +73,11 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
JMapHelper jMapHelper;
|
||||
|
||||
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), () -> new JKleppmannTreeNodeMetaDirectory(""));
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"));
|
||||
}
|
||||
|
||||
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC, () -> new JKleppmannTreeNodeMetaDirectory(""));
|
||||
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC);
|
||||
}
|
||||
|
||||
private ChunkData createChunk(ByteString bytes) {
|
||||
@@ -95,21 +94,21 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
private JKleppmannTreeNode getDirEntryW(String name) {
|
||||
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
|
||||
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
|
||||
var ret = curTx.get(JKleppmannTreeNode.class, res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
|
||||
return ret;
|
||||
}
|
||||
|
||||
private JKleppmannTreeNode getDirEntryR(String name) {
|
||||
var res = getTreeR().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
|
||||
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
|
||||
var ret = curTx.get(JKleppmannTreeNode.class, res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
|
||||
return ret;
|
||||
}
|
||||
|
||||
private Optional<JKleppmannTreeNode> getDirEntryOpt(String name) {
|
||||
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||
if (res == null) return Optional.empty();
|
||||
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node);
|
||||
var ret = curTx.get(JKleppmannTreeNode.class, res);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -126,7 +125,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
} else {
|
||||
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
|
||||
}
|
||||
} else if (ref instanceof JKleppmannTreeNodeHolder) {
|
||||
} else if (ref instanceof JKleppmannTreeNode) {
|
||||
ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY);
|
||||
} else {
|
||||
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
|
||||
@@ -141,7 +140,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
try {
|
||||
var ret = getDirEntryR(name);
|
||||
return switch (ret.meta()) {
|
||||
case JKleppmannTreeNodeMetaFile f -> Optional.of(f.fileIno());
|
||||
case JKleppmannTreeNodeMetaFile f -> Optional.of(f.getFileIno());
|
||||
case JKleppmannTreeNodeMetaDirectory f -> Optional.of(ret.key());
|
||||
default -> Optional.empty();
|
||||
};
|
||||
@@ -190,7 +189,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
return getTreeW().findParent(w -> {
|
||||
if (w.meta() instanceof JKleppmannTreeNodeMetaFile f)
|
||||
return f.fileIno().equals(ino);
|
||||
return f.getFileIno().equals(ino);
|
||||
return false;
|
||||
});
|
||||
});
|
||||
@@ -243,12 +242,12 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var dent = curTx.get(JData.class, uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
|
||||
|
||||
if (dent instanceof JKleppmannTreeNodeHolder) {
|
||||
if (dent instanceof JKleppmannTreeNode) {
|
||||
return true;
|
||||
} else if (dent instanceof RemoteObjectMeta) {
|
||||
var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null);
|
||||
if (remote instanceof File f) {
|
||||
remoteTx.putData(f.withMode(mode).withCurrentMTime());
|
||||
remoteTx.putData(f.withMode(mode).withMTime(System.currentTimeMillis()));
|
||||
return true;
|
||||
} else {
|
||||
throw new IllegalArgumentException(uuid + " is not a file");
|
||||
@@ -272,7 +271,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
}
|
||||
|
||||
@Override
|
||||
public ByteString read(JObjectKey fileUuid, long offset, int length) {
|
||||
public Optional<ByteString> read(JObjectKey fileUuid, long offset, int length) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
if (length < 0)
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
|
||||
@@ -282,12 +281,12 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
|
||||
if (file == null) {
|
||||
Log.error("File not found when trying to read: " + fileUuid);
|
||||
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to read: " + fileUuid));
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
|
||||
if (!it.hasNext())
|
||||
return ByteString.empty();
|
||||
return Optional.of(ByteString.empty());
|
||||
|
||||
// if (it.peekNextKey().key() != offset) {
|
||||
// Log.warnv("Read over the end of file: {0} {1} {2}, next chunk: {3}", fileUuid, offset, length, it.peekNextKey());
|
||||
@@ -325,10 +324,10 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
chunk = it.next();
|
||||
}
|
||||
|
||||
return buf;
|
||||
return Optional.of(buf);
|
||||
} catch (Exception e) {
|
||||
Log.error("Error reading file: " + fileUuid, e);
|
||||
throw new StatusRuntimeException(Status.INTERNAL.withDescription("Error reading file: " + fileUuid));
|
||||
return Optional.empty();
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -445,16 +444,16 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
}
|
||||
|
||||
for (var e : removedChunks.entrySet()) {
|
||||
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
|
||||
}
|
||||
|
||||
for (var e : newChunks.entrySet()) {
|
||||
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
|
||||
}
|
||||
|
||||
remoteTx.putData(file.withCurrentMTime());
|
||||
remoteTx.putData(file);
|
||||
|
||||
return (long) data.size();
|
||||
});
|
||||
@@ -536,16 +535,16 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
// file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis());
|
||||
|
||||
for (var e : removedChunks.entrySet()) {
|
||||
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
|
||||
}
|
||||
|
||||
for (var e : newChunks.entrySet()) {
|
||||
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
|
||||
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
|
||||
}
|
||||
|
||||
remoteTx.putData(file.withCurrentMTime());
|
||||
remoteTx.putData(file);
|
||||
return true;
|
||||
});
|
||||
}
|
||||
@@ -596,7 +595,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
public ByteString readlinkBS(JObjectKey uuid) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var fileOpt = remoteTx.getData(File.class, uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid)));
|
||||
return read(uuid, 0, Math.toIntExact(size(uuid)));
|
||||
return read(uuid, 0, Math.toIntExact(size(uuid))).get();
|
||||
});
|
||||
}
|
||||
|
||||
@@ -629,7 +628,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
var dent = curTx.get(JData.class, fileUuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
|
||||
|
||||
// FIXME:
|
||||
if (dent instanceof JKleppmannTreeNodeHolder) {
|
||||
if (dent instanceof JKleppmannTreeNode) {
|
||||
return true;
|
||||
} else if (dent instanceof RemoteObjectMeta) {
|
||||
var remote = remoteTx.getData(JDataRemote.class, fileUuid).orElse(null);
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsfs.service;
|
||||
package com.usatiuk.dhfs.files.service;
|
||||
|
||||
public class DirectoryNotEmptyException extends RuntimeException {
|
||||
@Override
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsfs.service;
|
||||
package com.usatiuk.dhfs.files.service;
|
||||
|
||||
public record GetattrRes(long mtime, long ctime, long mode, GetattrType type) {
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsfs.service;
|
||||
package com.usatiuk.dhfs.files.service;
|
||||
|
||||
public enum GetattrType {
|
||||
FILE,
|
||||
@@ -1,18 +0,0 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
public record JKleppmannTreeNodeMetaDirectory(String name) implements JKleppmannTreeNodeMeta {
|
||||
public JKleppmannTreeNodeMeta withName(String name) {
|
||||
return new JKleppmannTreeNodeMetaDirectory(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<JObjectKey> collectRefsTo() {
|
||||
return List.of();
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package com.usatiuk.dhfsfs.objects;
|
||||
|
||||
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
public record JKleppmannTreeNodeMetaFile(String name, JObjectKey fileIno) implements JKleppmannTreeNodeMeta {
|
||||
@Override
|
||||
public JKleppmannTreeNodeMeta withName(String name) {
|
||||
return new JKleppmannTreeNodeMetaFile(name, fileIno);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<JObjectKey> collectRefsTo() {
|
||||
return List.of(fileIno);
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsapp;
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusTestProfile;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsfs;
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsfs.benchmarks;
|
||||
package com.usatiuk.dhfs.benchmarks;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
|
||||
@@ -1,8 +1,8 @@
|
||||
package com.usatiuk.dhfsfs.benchmarks;
|
||||
package com.usatiuk.dhfs.benchmarks;
|
||||
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import com.usatiuk.dhfsfs.TempDataProfile;
|
||||
import com.usatiuk.dhfsfs.service.DhfsFileService;
|
||||
import com.usatiuk.dhfs.TempDataProfile;
|
||||
import com.usatiuk.dhfs.files.service.DhfsFileService;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import io.quarkus.test.junit.QuarkusTest;
|
||||
import io.quarkus.test.junit.TestProfile;
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsfs;
|
||||
package com.usatiuk.dhfs.files;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusTest;
|
||||
import io.quarkus.test.junit.TestProfile;
|
||||
@@ -1,8 +1,9 @@
|
||||
package com.usatiuk.dhfsfs;
|
||||
package com.usatiuk.dhfs.files;
|
||||
|
||||
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
|
||||
import com.usatiuk.dhfsfs.objects.File;
|
||||
import com.usatiuk.dhfsfs.service.DhfsFileService;
|
||||
import com.usatiuk.dhfs.RemoteTransaction;
|
||||
import com.usatiuk.dhfs.TempDataProfile;
|
||||
import com.usatiuk.dhfs.files.objects.File;
|
||||
import com.usatiuk.dhfs.files.service.DhfsFileService;
|
||||
import com.usatiuk.kleppmanntree.AlreadyExistsException;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import com.usatiuk.objects.transaction.TransactionManager;
|
||||
@@ -89,7 +90,7 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
// for (int start = 0; start < all.length(); start++) {
|
||||
// for (int end = start; end <= all.length(); end++) {
|
||||
// var read = fileService.read(fuuid.toString(), start, end - start);
|
||||
// Assertions.assertArrayEquals(all.substring(start, end).getBytes(), read.toByteArray());
|
||||
// Assertions.assertArrayEquals(all.substring(start, end).getBytes(), read.get().toByteArray());
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
@@ -110,21 +111,17 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
|
||||
var uuid = ret.get();
|
||||
|
||||
var curMtime = fileService.getattr(uuid).get().mtime();
|
||||
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 2, 8).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 2, 8).get().toByteArray());
|
||||
fileService.write(uuid, 4, new byte[]{10, 11, 12});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
fileService.write(uuid, 10, new byte[]{13, 14});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray());
|
||||
fileService.write(uuid, 6, new byte[]{15, 16});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray());
|
||||
fileService.write(uuid, 3, new byte[]{17, 18});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 17, 18, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).toByteArray());
|
||||
|
||||
var newMtime = fileService.getattr(uuid).get().mtime();
|
||||
Assertions.assertTrue(newMtime > curMtime);
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 17, 18, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray());
|
||||
|
||||
fileService.unlink("/writeTest");
|
||||
Assertions.assertFalse(fileService.open("/writeTest").isPresent());
|
||||
@@ -138,7 +135,7 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
var uuid = ret.get();
|
||||
|
||||
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
|
||||
fileService.unlink("/removeTest");
|
||||
Assertions.assertFalse(fileService.open("/removeTest").isPresent());
|
||||
@@ -152,12 +149,12 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
var uuid = ret.get();
|
||||
|
||||
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
|
||||
fileService.truncate(uuid, 20);
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray());
|
||||
fileService.write(uuid, 5, new byte[]{10, 11, 12, 13, 14, 15, 16, 17});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray());
|
||||
}
|
||||
|
||||
@RepeatedTest(100)
|
||||
@@ -169,12 +166,12 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
var uuid = ret.get();
|
||||
|
||||
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
|
||||
fileService.truncate(uuid, 20);
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray());
|
||||
fileService.write(uuid, 10, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, fileService.read(uuid, 0, 20).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, fileService.read(uuid, 0, 20).get().toByteArray());
|
||||
} finally {
|
||||
fileService.unlink("/truncateTest2");
|
||||
}
|
||||
@@ -188,10 +185,10 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
var uuid = ret.get();
|
||||
|
||||
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
|
||||
fileService.truncate(uuid, 7);
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6,}, fileService.read(uuid, 0, 20).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6,}, fileService.read(uuid, 0, 20).get().toByteArray());
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -201,14 +198,14 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
var uuid = ret.get();
|
||||
|
||||
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
|
||||
Assertions.assertTrue(fileService.rename("/moveTest", "/movedTest"));
|
||||
Assertions.assertFalse(fileService.open("/moveTest").isPresent());
|
||||
Assertions.assertTrue(fileService.open("/movedTest").isPresent());
|
||||
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
|
||||
fileService.read(fileService.open("/movedTest").get(), 0, 10).toByteArray());
|
||||
fileService.read(fileService.open("/movedTest").get(), 0, 10).get().toByteArray());
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -221,9 +218,9 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
var uuid2 = ret2.get();
|
||||
|
||||
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
fileService.write(uuid2, 0, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29});
|
||||
Assertions.assertArrayEquals(new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29}, fileService.read(uuid2, 0, 10).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29}, fileService.read(uuid2, 0, 10).get().toByteArray());
|
||||
|
||||
|
||||
jObjectTxManager.run(() -> {
|
||||
@@ -237,7 +234,7 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
Assertions.assertTrue(fileService.open("/moveOverTest2").isPresent());
|
||||
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
|
||||
fileService.read(fileService.open("/moveOverTest2").get(), 0, 10).toByteArray());
|
||||
fileService.read(fileService.open("/moveOverTest2").get(), 0, 10).get().toByteArray());
|
||||
|
||||
// await().atMost(5, TimeUnit.SECONDS).until(() -> {
|
||||
// jObjectTxManager.run(() -> {
|
||||
@@ -255,8 +252,8 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
var uuid = ret.get();
|
||||
|
||||
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{}, fileService.read(uuid, 20, 10).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{}, fileService.read(uuid, 20, 10).get().toByteArray());
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -266,13 +263,13 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
var uuid = ret.get();
|
||||
|
||||
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
fileService.write(uuid, 20, new byte[]{10, 11, 12, 13, 14, 15, 16, 17, 18, 19});
|
||||
Assertions.assertArrayEquals(new byte[]{
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
10, 11, 12, 13, 14, 15, 16, 17, 18, 19
|
||||
}, fileService.read(uuid, 0, 30).toByteArray());
|
||||
}, fileService.read(uuid, 0, 30).get().toByteArray());
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -282,7 +279,7 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
var uuid = ret.get();
|
||||
|
||||
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||
|
||||
// var oldfile = jObjectManager.get(uuid).orElseThrow(IllegalStateException::new);
|
||||
// var chunk = oldfile.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.extractRefs()).stream().toList().get(0);
|
||||
@@ -297,6 +294,6 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
||||
Assertions.assertTrue(fileService.open("/movedTest2").isPresent());
|
||||
|
||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
|
||||
fileService.read(fileService.open("/movedTest2").get(), 0, 10).toByteArray());
|
||||
fileService.read(fileService.open("/movedTest2").get(), 0, 10).get().toByteArray());
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsfs;
|
||||
package com.usatiuk.dhfs.files;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusTest;
|
||||
import io.quarkus.test.junit.TestProfile;
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsfs;
|
||||
package com.usatiuk.dhfs.files;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusTest;
|
||||
import io.quarkus.test.junit.TestProfile;
|
||||
@@ -73,9 +73,24 @@
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.serceman</groupId>
|
||||
<groupId>com.github.SerCeMan</groupId>
|
||||
<artifactId>jnr-fuse</artifactId>
|
||||
<version>0.5.8</version>
|
||||
<version>44ed40f8ce</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-ffi</artifactId>
|
||||
<version>2.2.16</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-posix</artifactId>
|
||||
<version>3.1.19</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-constants</artifactId>
|
||||
<version>0.10.4</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
@@ -107,11 +122,26 @@
|
||||
<artifactId>commons-math3</artifactId>
|
||||
<version>3.6.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>kleppmanntree</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>objects</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>dhfs-fs</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>sync-base</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>utils</artifactId>
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package com.usatiuk.dhfsfuse;
|
||||
package com.usatiuk.dhfs.fuse;
|
||||
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import com.sun.security.auth.module.UnixSystem;
|
||||
import com.usatiuk.dhfsfs.service.DhfsFileService;
|
||||
import com.usatiuk.dhfsfs.service.DirectoryNotEmptyException;
|
||||
import com.usatiuk.dhfsfs.service.GetattrRes;
|
||||
import com.usatiuk.dhfs.files.service.DhfsFileService;
|
||||
import com.usatiuk.dhfs.files.service.DirectoryNotEmptyException;
|
||||
import com.usatiuk.dhfs.files.service.GetattrRes;
|
||||
import com.usatiuk.kleppmanntree.AlreadyExistsException;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import io.grpc.Status;
|
||||
@@ -40,8 +40,6 @@ import static jnr.posix.FileStat.*;
|
||||
public class DhfsFuse extends FuseStubFS {
|
||||
private static final int blksize = 1048576;
|
||||
private static final int iosize = 1048576;
|
||||
private final ConcurrentHashMap<Long, JObjectKey> _openHandles = new ConcurrentHashMap<>();
|
||||
private final AtomicLong _fh = new AtomicLong(1);
|
||||
@ConfigProperty(name = "dhfs.fuse.root")
|
||||
String root;
|
||||
@ConfigProperty(name = "dhfs.fuse.enabled")
|
||||
@@ -55,6 +53,9 @@ public class DhfsFuse extends FuseStubFS {
|
||||
@Inject
|
||||
DhfsFileService fileService;
|
||||
|
||||
private final ConcurrentHashMap<Long, JObjectKey> _openHandles = new ConcurrentHashMap<>();
|
||||
private final AtomicLong _fh = new AtomicLong(1);
|
||||
|
||||
private long allocateHandle(JObjectKey key) {
|
||||
while (true) {
|
||||
var newFh = _fh.getAndIncrement();
|
||||
@@ -72,48 +73,40 @@ public class DhfsFuse extends FuseStubFS {
|
||||
|
||||
void init(@Observes @Priority(100000) StartupEvent event) {
|
||||
if (!enabled) return;
|
||||
Paths.get(root).toFile().mkdirs();
|
||||
|
||||
if (!Paths.get(root).toFile().isDirectory())
|
||||
throw new IllegalStateException("Could not create directory " + root);
|
||||
|
||||
Log.info("Mounting with root " + root);
|
||||
|
||||
var uid = new UnixSystem().getUid();
|
||||
var gid = new UnixSystem().getGid();
|
||||
|
||||
var opts = new ArrayList<String>();
|
||||
|
||||
if (SystemUtils.IS_OS_WINDOWS) {
|
||||
// Assuming macFuse
|
||||
if (SystemUtils.IS_OS_MAC) {
|
||||
opts.add("-o");
|
||||
opts.add("auto_cache");
|
||||
opts.add("-o");
|
||||
opts.add("uid=-1");
|
||||
opts.add("-o");
|
||||
opts.add("gid=-1");
|
||||
} else {
|
||||
Paths.get(root).toFile().mkdirs();
|
||||
|
||||
if (!Paths.get(root).toFile().isDirectory())
|
||||
throw new IllegalStateException("Could not create directory " + root);
|
||||
|
||||
var uid = new UnixSystem().getUid();
|
||||
var gid = new UnixSystem().getGid();
|
||||
|
||||
// Assuming macFuse
|
||||
if (SystemUtils.IS_OS_MAC) {
|
||||
opts.add("-o");
|
||||
opts.add("iosize=" + iosize);
|
||||
} else if (SystemUtils.IS_OS_LINUX) {
|
||||
// FIXME: There's something else missing: the writes still seem to be 32k max
|
||||
opts.add("iosize=" + iosize);
|
||||
} else if (SystemUtils.IS_OS_LINUX) {
|
||||
// FIXME: There's something else missing: the writes still seem to be 32k max
|
||||
// opts.add("-o");
|
||||
// opts.add("large_read");
|
||||
opts.add("-o");
|
||||
opts.add("big_writes");
|
||||
opts.add("-o");
|
||||
opts.add("max_read=" + iosize);
|
||||
opts.add("-o");
|
||||
opts.add("max_write=" + iosize);
|
||||
}
|
||||
opts.add("-o");
|
||||
opts.add("auto_cache");
|
||||
opts.add("big_writes");
|
||||
opts.add("-o");
|
||||
opts.add("uid=" + uid);
|
||||
opts.add("max_read=" + iosize);
|
||||
opts.add("-o");
|
||||
opts.add("gid=" + gid);
|
||||
opts.add("max_write=" + iosize);
|
||||
}
|
||||
opts.add("-o");
|
||||
opts.add("auto_cache");
|
||||
opts.add("-o");
|
||||
opts.add("uid=" + uid);
|
||||
opts.add("-o");
|
||||
opts.add("gid=" + gid);
|
||||
|
||||
mount(Paths.get(root), false, debug, opts.toArray(String[]::new));
|
||||
}
|
||||
|
||||
@@ -231,8 +224,8 @@ public class DhfsFuse extends FuseStubFS {
|
||||
var fileKey = getFromHandle(fi.fh.get());
|
||||
var read = fileService.read(fileKey, offset, (int) size);
|
||||
if (read.isEmpty()) return 0;
|
||||
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size));
|
||||
return read.size();
|
||||
UnsafeByteOperations.unsafeWriteTo(read.get(), new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size));
|
||||
return read.get().size();
|
||||
} catch (Throwable e) {
|
||||
Log.error("When reading " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsfuse;
|
||||
package com.usatiuk.dhfs.fuse;
|
||||
|
||||
import com.google.protobuf.ByteOutput;
|
||||
import jnr.ffi.Pointer;
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsfuse;
|
||||
package com.usatiuk.dhfs.fuse;
|
||||
|
||||
import jakarta.inject.Singleton;
|
||||
import jdk.internal.access.JavaNioAccess;
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsfuse;
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
import io.quarkus.test.junit.QuarkusTestProfile;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfsfuse;
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
@@ -1,5 +1,6 @@
|
||||
package com.usatiuk.dhfsfuse;
|
||||
package com.usatiuk.dhfs.fuse;
|
||||
|
||||
import com.usatiuk.dhfs.TempDataProfile;
|
||||
import io.quarkus.test.junit.QuarkusTest;
|
||||
import io.quarkus.test.junit.TestProfile;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
@@ -1,5 +1,7 @@
|
||||
package com.usatiuk.kleppmanntree;
|
||||
|
||||
import jakarta.annotation.Nonnull;
|
||||
import jakarta.annotation.Nullable;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.*;
|
||||
@@ -15,6 +17,7 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
private final PeerInterface<PeerIdT> _peers;
|
||||
private final Clock<TimestampT> _clock;
|
||||
private final OpRecorder<TimestampT, PeerIdT, MetaT, NodeIdT> _opRecorder;
|
||||
private HashMap<NodeIdT, TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT>> _undoCtx = null;
|
||||
|
||||
public KleppmannTree(StorageInterface<TimestampT, PeerIdT, MetaT, NodeIdT> storage,
|
||||
PeerInterface<PeerIdT> peers,
|
||||
@@ -86,6 +89,7 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
node.withParent(null)
|
||||
.withLastEffectiveOp(null)
|
||||
);
|
||||
_undoCtx.put(node.key(), node);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -213,16 +217,31 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
}
|
||||
assert cmp != 0;
|
||||
if (cmp < 0) {
|
||||
if (log.containsKey(op.timestamp())) return;
|
||||
var toUndo = log.newestSlice(op.timestamp(), false);
|
||||
for (var entry : toUndo.reversed()) {
|
||||
undoOp(entry.getValue());
|
||||
try {
|
||||
if (log.containsKey(op.timestamp())) return;
|
||||
var toUndo = log.newestSlice(op.timestamp(), false);
|
||||
_undoCtx = new HashMap<>();
|
||||
for (var entry : toUndo.reversed()) {
|
||||
undoOp(entry.getValue());
|
||||
}
|
||||
try {
|
||||
doAndPut(op, failCreatingIfExists);
|
||||
} finally {
|
||||
for (var entry : toUndo) {
|
||||
redoOp(entry);
|
||||
}
|
||||
|
||||
if (!_undoCtx.isEmpty()) {
|
||||
for (var e : _undoCtx.entrySet()) {
|
||||
LOGGER.log(Level.FINE, "Dropping node " + e.getKey());
|
||||
_storage.removeNode(e.getKey());
|
||||
}
|
||||
}
|
||||
_undoCtx = null;
|
||||
}
|
||||
} finally {
|
||||
tryTrimLog();
|
||||
}
|
||||
doAndPut(op, failCreatingIfExists);
|
||||
for (var entry : toUndo) {
|
||||
redoOp(entry);
|
||||
}
|
||||
tryTrimLog();
|
||||
} else {
|
||||
doAndPut(op, failCreatingIfExists);
|
||||
tryTrimLog();
|
||||
@@ -245,7 +264,8 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
} catch (AlreadyExistsException aex) {
|
||||
throw aex;
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("Error computing effects for op " + op.toString(), e);
|
||||
LOGGER.log(Level.SEVERE, "Error computing effects for op" + op.toString(), e);
|
||||
computed = new LogRecord<>(op, null);
|
||||
}
|
||||
|
||||
if (computed.effects() != null)
|
||||
@@ -254,6 +274,24 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
}
|
||||
|
||||
private TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> getNewNode(NodeIdT key, NodeIdT parent, MetaT meta) {
|
||||
if (_undoCtx != null) {
|
||||
var node = _undoCtx.get(key);
|
||||
if (node != null) {
|
||||
try {
|
||||
if (!node.children().isEmpty()) {
|
||||
LOGGER.log(Level.WARNING, "Not empty children for undone node " + key);
|
||||
}
|
||||
node = node.withParent(parent).withMeta(meta);
|
||||
} catch (Exception e) {
|
||||
LOGGER.log(Level.SEVERE, "Error while fixing up node " + key, e);
|
||||
node = null;
|
||||
}
|
||||
}
|
||||
if (node != null) {
|
||||
_undoCtx.remove(key);
|
||||
return node;
|
||||
}
|
||||
}
|
||||
return _storage.createNewNode(key, parent, meta);
|
||||
}
|
||||
|
||||
@@ -334,6 +372,10 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
var conflictNode = _storage.getById(conflictNodeId);
|
||||
MetaT conflictNodeMeta = conflictNode.meta();
|
||||
|
||||
if (Objects.equals(conflictNodeMeta, op.newMeta())) {
|
||||
return new LogRecord<>(op, null);
|
||||
}
|
||||
|
||||
LOGGER.finer(() -> "Node creation conflict: " + conflictNode);
|
||||
|
||||
String newConflictNodeName = op.newName() + ".conflict." + conflictNode.key();
|
||||
@@ -358,14 +400,18 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
|
||||
if (oldMeta != null
|
||||
&& op.newMeta() != null
|
||||
&& !oldMeta.getClass().equals(op.newMeta().getClass())) {
|
||||
throw new RuntimeException("Class mismatch for meta for node " + node.key());
|
||||
LOGGER.log(Level.SEVERE, "Class mismatch for meta for node " + node.key());
|
||||
return new LogRecord<>(op, null);
|
||||
}
|
||||
|
||||
var replaceNodeId = newParent.children().get(op.newName());
|
||||
if (replaceNodeId != null) {
|
||||
var replaceNode = _storage.getById(replaceNodeId);
|
||||
var replaceNodeMeta = replaceNode.meta();
|
||||
|
||||
if (Objects.equals(replaceNodeMeta, op.newMeta())) {
|
||||
return new LogRecord<>(op, null);
|
||||
}
|
||||
|
||||
LOGGER.finer(() -> "Node replacement: " + replaceNode);
|
||||
|
||||
return new LogRecord<>(op, List.of(
|
||||
|
||||
@@ -10,14 +10,14 @@ public record LogEffect<TimestampT extends Comparable<TimestampT>, PeerIdT exten
|
||||
NodeIdT childId) implements Serializable {
|
||||
public String oldName() {
|
||||
if (oldInfo.oldMeta() != null) {
|
||||
return oldInfo.oldMeta().name();
|
||||
return oldInfo.oldMeta().getName();
|
||||
}
|
||||
return childId.toString();
|
||||
}
|
||||
|
||||
public String newName() {
|
||||
if (newMeta != null) {
|
||||
return newMeta.name();
|
||||
return newMeta.getName();
|
||||
}
|
||||
return childId.toString();
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package com.usatiuk.kleppmanntree;
|
||||
import java.io.Serializable;
|
||||
|
||||
public interface NodeMeta extends Serializable {
|
||||
String name();
|
||||
String getName();
|
||||
|
||||
NodeMeta withName(String name);
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ public record OpMove<TimestampT extends Comparable<TimestampT>, PeerIdT extends
|
||||
NodeIdT childId) implements Serializable {
|
||||
public String newName() {
|
||||
if (newMeta != null)
|
||||
return newMeta.name();
|
||||
return newMeta.getName();
|
||||
return childId.toString();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ public interface TreeNode<TimestampT extends Comparable<TimestampT>, PeerIdT ext
|
||||
|
||||
default String name() {
|
||||
var meta = meta();
|
||||
if (meta != null) return meta.name();
|
||||
if (meta != null) return meta.getName();
|
||||
return key().toString();
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ public abstract class TestNodeMeta implements NodeMeta {
|
||||
}
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
public String getName() {
|
||||
return _name;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,13 +1,6 @@
|
||||
package com.usatiuk.objects;
|
||||
|
||||
import com.usatiuk.objects.iterators.Data;
|
||||
|
||||
public sealed interface JDataVersionedWrapper extends Data<JDataVersionedWrapper> permits JDataVersionedWrapperLazy, JDataVersionedWrapperImpl {
|
||||
@Override
|
||||
default JDataVersionedWrapper value() {
|
||||
return this;
|
||||
}
|
||||
|
||||
public sealed interface JDataVersionedWrapper permits JDataVersionedWrapperLazy, JDataVersionedWrapperImpl {
|
||||
JData data();
|
||||
|
||||
long version();
|
||||
|
||||
@@ -3,9 +3,9 @@ package com.usatiuk.objects;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
public final class JDataVersionedWrapperLazy implements JDataVersionedWrapper {
|
||||
private JData _data;
|
||||
private final long _version;
|
||||
private final int _estimatedSize;
|
||||
private JData _data;
|
||||
private Supplier<JData> _producer;
|
||||
|
||||
public JDataVersionedWrapperLazy(long version, int estimatedSize, Supplier<JData> producer) {
|
||||
|
||||
@@ -24,8 +24,6 @@ public class JDataVersionedWrapperSerializer implements ObjectSerializer<JDataVe
|
||||
public JDataVersionedWrapper deserialize(ByteString data) {
|
||||
var version = data.substring(0, Long.BYTES).asReadOnlyByteBuffer().getLong();
|
||||
var rawData = data.substring(Long.BYTES);
|
||||
return new JDataVersionedWrapperLazy(version, rawData.size(),
|
||||
() -> dataSerializer.deserialize(rawData)
|
||||
);
|
||||
return new JDataVersionedWrapperLazy(version, rawData.size(), () -> dataSerializer.deserialize(rawData));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ package com.usatiuk.objects;
|
||||
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.utils.SerializationHelper;
|
||||
import com.usatiuk.dhfs.utils.SerializationHelper;
|
||||
import io.quarkus.arc.DefaultBean;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
import com.usatiuk.utils.AutoCloseableNoThrow;
|
||||
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.Iterator;
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
public interface Data<V> extends MaybeTombstone<V> {
|
||||
V value();
|
||||
import java.util.Optional;
|
||||
|
||||
public record Data<V>(V value) implements MaybeTombstone<V> {
|
||||
@Override
|
||||
public Optional<V> opt() {
|
||||
return Optional.of(value);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
public record DataWrapper<V>(V value) implements Data<V> {
|
||||
}
|
||||
@@ -1,6 +1,12 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@FunctionalInterface
|
||||
public interface IterProdFn<K extends Comparable<K>, V> {
|
||||
CloseableKvIterator<K, V> get(IteratorStart start, K key);
|
||||
|
||||
default Stream<CloseableKvIterator<K, MaybeTombstone<V>>> getFlat(IteratorStart start, K key) {
|
||||
return Stream.of(new MappingKvIterator<>(get(start, key), Data::new));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,20 +39,20 @@ public class KeyPredicateKvIterator<K extends Comparable<K>, V> extends Reversib
|
||||
}
|
||||
|
||||
|
||||
// switch (start) {
|
||||
// case LT -> {
|
||||
//// assert _next == null || _next.getKey().compareTo(startKey) < 0;
|
||||
// }
|
||||
// case LE -> {
|
||||
//// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
|
||||
// }
|
||||
// case GT -> {
|
||||
// assert _next == null || _next.compareTo(startKey) > 0;
|
||||
// }
|
||||
// case GE -> {
|
||||
// assert _next == null || _next.compareTo(startKey) >= 0;
|
||||
// }
|
||||
// }
|
||||
switch (start) {
|
||||
case LT -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) < 0;
|
||||
}
|
||||
case LE -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
|
||||
}
|
||||
case GT -> {
|
||||
assert _next == null || _next.compareTo(startKey) > 0;
|
||||
}
|
||||
case GE -> {
|
||||
assert _next == null || _next.compareTo(startKey) >= 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void fillNext() {
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
public interface MaybeTombstone<T> {
|
||||
Optional<T> opt();
|
||||
}
|
||||
|
||||
@@ -1,29 +1,37 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import org.apache.commons.lang3.mutable.MutableInt;
|
||||
import org.apache.commons.lang3.mutable.MutableObject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.TreeMap;
|
||||
import java.util.*;
|
||||
|
||||
public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, MaybeTombstone<V>> {
|
||||
private record IteratorEntry<K extends Comparable<K>, V>(int priority,
|
||||
CloseableKvIterator<K, MaybeTombstone<V>> iterator) {
|
||||
public IteratorEntry<K, V> reversed() {
|
||||
return new IteratorEntry<>(priority, iterator.reversed());
|
||||
}
|
||||
}
|
||||
|
||||
public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvIterator<K, V> {
|
||||
private final NavigableMap<K, IteratorEntry<K, V>> _sortedIterators = new TreeMap<>();
|
||||
private final String _name;
|
||||
private final List<IteratorEntry<K, V>> _iterators;
|
||||
|
||||
public MergingKvIterator(String name, IteratorStart startType, K startKey, List<IterProdFn<K, V>> iterators) {
|
||||
_goingForward = true;
|
||||
_name = name;
|
||||
|
||||
// Why streams are so slow?
|
||||
{
|
||||
IteratorEntry<K, V>[] iteratorEntries = new IteratorEntry[iterators.size()];
|
||||
for (int i = 0; i < iterators.size(); i++) {
|
||||
iteratorEntries[i] = new IteratorEntry<>(i, iterators.get(i).get(startType, startKey));
|
||||
}
|
||||
_iterators = List.of(iteratorEntries);
|
||||
var iteratorsTmp = iterators.stream().flatMap(i -> i.getFlat(startType, startKey));
|
||||
MutableInt i = new MutableInt(0);
|
||||
ArrayList<IteratorEntry<K, V>> tmp = new ArrayList<>(16);
|
||||
iteratorsTmp.forEach(i2 -> {
|
||||
tmp.add(new IteratorEntry<>(i.getAndIncrement(), i2));
|
||||
});
|
||||
_iterators = List.copyOf(tmp);
|
||||
}
|
||||
|
||||
if (startType == IteratorStart.LT || startType == IteratorStart.LE) {
|
||||
@@ -73,21 +81,21 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
|
||||
advanceIterator(iterator);
|
||||
}
|
||||
|
||||
// Log.tracev("{0} Initialized: {1}", _name, _sortedIterators);
|
||||
// switch (startType) {
|
||||
//// case LT -> {
|
||||
//// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) < 0;
|
||||
//// }
|
||||
//// case LE -> {
|
||||
//// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) <= 0;
|
||||
//// }
|
||||
// case GT -> {
|
||||
// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(startKey) > 0;
|
||||
Log.tracev("{0} Initialized: {1}", _name, _sortedIterators);
|
||||
switch (startType) {
|
||||
// case LT -> {
|
||||
// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) < 0;
|
||||
// }
|
||||
// case GE -> {
|
||||
// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(startKey) >= 0;
|
||||
// case LE -> {
|
||||
// assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(initialStartKey) <= 0;
|
||||
// }
|
||||
// }
|
||||
case GT -> {
|
||||
assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(startKey) > 0;
|
||||
}
|
||||
case GE -> {
|
||||
assert _sortedIterators.isEmpty() || _sortedIterators.firstKey().compareTo(startKey) >= 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@SafeVarargs
|
||||
@@ -98,7 +106,7 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
|
||||
private void advanceIterator(IteratorEntry<K, V> iteratorEntry) {
|
||||
while (iteratorEntry.iterator().hasNext()) {
|
||||
K key = iteratorEntry.iterator().peekNextKey();
|
||||
// Log.tracev("{0} Advance peeked: {1}-{2}", _name, iteratorEntry, key);
|
||||
Log.tracev("{0} Advance peeked: {1}-{2}", _name, iteratorEntry, key);
|
||||
|
||||
MutableObject<IteratorEntry<K, V>> mutableBoolean = new MutableObject<>(null);
|
||||
|
||||
@@ -118,7 +126,7 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
|
||||
});
|
||||
|
||||
if (newVal != iteratorEntry) {
|
||||
// Log.tracev("{0} Skipped: {1}", _name, iteratorEntry.iterator().peekNextKey());
|
||||
Log.tracev("{0} Skipped: {1}", _name, iteratorEntry.iterator().peekNextKey());
|
||||
iteratorEntry.iterator().skip();
|
||||
continue;
|
||||
}
|
||||
@@ -134,7 +142,7 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
|
||||
@Override
|
||||
protected void reverse() {
|
||||
var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry();
|
||||
// Log.tracev("{0} Reversing from {1}", _name, cur);
|
||||
Log.tracev("{0} Reversing from {1}", _name, cur);
|
||||
_goingForward = !_goingForward;
|
||||
_sortedIterators.clear();
|
||||
for (IteratorEntry<K, V> iterator : _iterators) {
|
||||
@@ -169,7 +177,7 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
|
||||
}
|
||||
cur.getValue().iterator().skip();
|
||||
advanceIterator(cur.getValue());
|
||||
// Log.tracev("{0} Skip: {1}, next: {2}", _name, cur, _sortedIterators);
|
||||
Log.tracev("{0} Skip: {1}, next: {2}", _name, cur, _sortedIterators);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -178,7 +186,7 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Pair<K, V> nextImpl() {
|
||||
protected Pair<K, MaybeTombstone<V>> nextImpl() {
|
||||
var cur = _goingForward ? _sortedIterators.pollFirstEntry() : _sortedIterators.pollLastEntry();
|
||||
if (cur == null) {
|
||||
throw new NoSuchElementException();
|
||||
@@ -208,12 +216,6 @@ public class MergingKvIterator<K extends Comparable<K>, V> extends ReversibleKvI
|
||||
private interface FirstMatchState<K extends Comparable<K>, V> {
|
||||
}
|
||||
|
||||
private record IteratorEntry<K extends Comparable<K>, V>(int priority, CloseableKvIterator<K, V> iterator) {
|
||||
public IteratorEntry<K, V> reversed() {
|
||||
return new IteratorEntry<>(priority, iterator.reversed());
|
||||
}
|
||||
}
|
||||
|
||||
private record FirstMatchNone<K extends Comparable<K>, V>() implements FirstMatchState<K, V> {
|
||||
}
|
||||
|
||||
|
||||
@@ -9,22 +9,22 @@ public class NavigableMapKvIterator<K extends Comparable<K>, V> extends Reversib
|
||||
private Iterator<Map.Entry<K, V>> _iterator;
|
||||
private Map.Entry<K, V> _next;
|
||||
|
||||
public NavigableMapKvIterator(NavigableMap<K, ? extends V> map, IteratorStart start, K key) {
|
||||
_map = (NavigableMap<K, V>) map;
|
||||
public NavigableMapKvIterator(NavigableMap<K, V> map, IteratorStart start, K key) {
|
||||
_map = map;
|
||||
SortedMap<K, V> _view;
|
||||
_goingForward = true;
|
||||
switch (start) {
|
||||
case GE -> _view = _map.tailMap(key, true);
|
||||
case GT -> _view = _map.tailMap(key, false);
|
||||
case GE -> _view = map.tailMap(key, true);
|
||||
case GT -> _view = map.tailMap(key, false);
|
||||
case LE -> {
|
||||
var floorKey = _map.floorKey(key);
|
||||
var floorKey = map.floorKey(key);
|
||||
if (floorKey == null) _view = _map;
|
||||
else _view = _map.tailMap(floorKey, true);
|
||||
else _view = map.tailMap(floorKey, true);
|
||||
}
|
||||
case LT -> {
|
||||
var lowerKey = map.lowerKey(key);
|
||||
if (lowerKey == null) _view = _map;
|
||||
else _view = _map.tailMap(lowerKey, true);
|
||||
else _view = map.tailMap(lowerKey, true);
|
||||
}
|
||||
default -> throw new IllegalArgumentException("Unknown start type");
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.NoSuchElementException;
|
||||
@@ -43,20 +44,20 @@ public class PredicateKvIterator<K extends Comparable<K>, V, V_T> extends Revers
|
||||
}
|
||||
|
||||
|
||||
// switch (start) {
|
||||
// case LT -> {
|
||||
//// assert _next == null || _next.getKey().compareTo(startKey) < 0;
|
||||
// }
|
||||
// case LE -> {
|
||||
//// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
|
||||
// }
|
||||
// case GT -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) > 0;
|
||||
// }
|
||||
// case GE -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) >= 0;
|
||||
// }
|
||||
// }
|
||||
switch (start) {
|
||||
case LT -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) < 0;
|
||||
}
|
||||
case LE -> {
|
||||
// assert _next == null || _next.getKey().compareTo(startKey) <= 0;
|
||||
}
|
||||
case GT -> {
|
||||
assert _next == null || _next.getKey().compareTo(startKey) > 0;
|
||||
}
|
||||
case GE -> {
|
||||
assert _next == null || _next.getKey().compareTo(startKey) >= 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void fillNext() {
|
||||
@@ -80,8 +81,8 @@ public class PredicateKvIterator<K extends Comparable<K>, V, V_T> extends Revers
|
||||
else if (!_goingForward && !wasAtEnd)
|
||||
_backing.skipPrev();
|
||||
|
||||
// if (!wasAtEnd)
|
||||
// Log.tracev("Skipped in reverse: {0}", _next);
|
||||
if (!wasAtEnd)
|
||||
Log.tracev("Skipped in reverse: {0}", _next);
|
||||
|
||||
_next = null;
|
||||
_checkedNext = false;
|
||||
|
||||
@@ -1,4 +1,10 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
public interface Tombstone<V> extends MaybeTombstone<V> {
|
||||
import java.util.Optional;
|
||||
|
||||
public record Tombstone<V>() implements MaybeTombstone<V> {
|
||||
@Override
|
||||
public Optional<V> opt() {
|
||||
return Optional.empty();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
public record TombstoneImpl<V>() implements Tombstone<V> {
|
||||
}
|
||||
@@ -1,22 +1,83 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public abstract class TombstoneMergingKvIterator {
|
||||
public static <K extends Comparable<K>, V> CloseableKvIterator<K, V> of(String name, IteratorStart startType, K startKey, List<IterProdFn<K, MaybeTombstone<V>>> iterators) {
|
||||
return new PredicateKvIterator<K, MaybeTombstone<V>, V>(
|
||||
new MergingKvIterator<K, MaybeTombstone<V>>(name + "-merging", startType, startKey, iterators),
|
||||
public class TombstoneMergingKvIterator<K extends Comparable<K>, V> implements CloseableKvIterator<K, V> {
|
||||
private final CloseableKvIterator<K, V> _backing;
|
||||
private final String _name;
|
||||
|
||||
public TombstoneMergingKvIterator(String name, IteratorStart startType, K startKey, List<IterProdFn<K, V>> iterators) {
|
||||
_name = name;
|
||||
_backing = new PredicateKvIterator<>(
|
||||
new MergingKvIterator<>(name + "-merging", startType, startKey, iterators),
|
||||
startType, startKey,
|
||||
pair -> {
|
||||
// Log.tracev("{0} - Processing pair {1}", name, pair);
|
||||
if (pair instanceof Tombstone<V>) {
|
||||
Log.tracev("{0} - Processing pair {1}", _name, pair);
|
||||
if (pair instanceof Tombstone) {
|
||||
return null;
|
||||
}
|
||||
return ((Data<V>) pair).value();
|
||||
});
|
||||
}
|
||||
|
||||
public static <K extends Comparable<K>, V> CloseableKvIterator<K, V> of(String name, IteratorStart startType, K startKey, IterProdFn<K, MaybeTombstone<V>>... iterators) {
|
||||
return of(name, startType, startKey, List.of(iterators));
|
||||
@SafeVarargs
|
||||
public TombstoneMergingKvIterator(String name, IteratorStart startType, K startKey, IterProdFn<K, V>... iterators) {
|
||||
this(name, startType, startKey, List.of(iterators));
|
||||
}
|
||||
|
||||
@Override
|
||||
public K peekNextKey() {
|
||||
return _backing.peekNextKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skip() {
|
||||
_backing.skip();
|
||||
}
|
||||
|
||||
@Override
|
||||
public K peekPrevKey() {
|
||||
return _backing.peekPrevKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<K, V> prev() {
|
||||
return _backing.prev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPrev() {
|
||||
return _backing.hasPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skipPrev() {
|
||||
_backing.skipPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
_backing.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return _backing.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<K, V> next() {
|
||||
return _backing.next();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TombstoneMergingKvIterator{" +
|
||||
"_backing=" + _backing +
|
||||
", _name='" + _name + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,19 @@
|
||||
package com.usatiuk.objects.snapshot;
|
||||
|
||||
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IterProdFn;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.utils.AutoCloseableNoThrow;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.Optional;
|
||||
|
||||
public interface Snapshot<K extends Comparable<K>, V> extends AutoCloseableNoThrow {
|
||||
CloseableKvIterator<K, V> getIterator(IteratorStart start, K key);
|
||||
IterProdFn<K, V> getIterator();
|
||||
|
||||
default CloseableKvIterator<K, V> getIterator(IteratorStart start, K key) {
|
||||
return getIterator().get(start, key);
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
Optional<V> readObject(K name);
|
||||
|
||||
@@ -0,0 +1,29 @@
|
||||
package com.usatiuk.objects.snapshot;
|
||||
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.stores.WritebackObjectPersistentStore;
|
||||
import com.usatiuk.objects.transaction.TxRecord;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.Collection;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
@Singleton
|
||||
public class SnapshotManager {
|
||||
@Inject
|
||||
WritebackObjectPersistentStore writebackStore;
|
||||
|
||||
public Snapshot<JObjectKey, JDataVersionedWrapper> createSnapshot() {
|
||||
return writebackStore.getSnapshot();
|
||||
}
|
||||
|
||||
// This should not be called for the same objects concurrently
|
||||
public Consumer<Runnable> commitTx(Collection<TxRecord.TxObjectRecord<?>> writes) {
|
||||
return writebackStore.commitTx(writes);
|
||||
}
|
||||
}
|
||||
@@ -21,18 +21,55 @@ import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@ApplicationScoped
|
||||
public class CachingObjectPersistentStore {
|
||||
private final AtomicReference<Cache> _cache;
|
||||
@Inject
|
||||
SerializingObjectPersistentStore delegate;
|
||||
@ConfigProperty(name = "dhfs.objects.lru.print-stats")
|
||||
boolean printStats;
|
||||
|
||||
private record Cache(TreePMap<JObjectKey, CacheEntry> map,
|
||||
int size,
|
||||
long version,
|
||||
int sizeLimit) {
|
||||
public Cache withPut(JObjectKey key, Optional<JDataVersionedWrapper> obj) {
|
||||
int objSize = obj.map(JDataVersionedWrapper::estimateSize).orElse(16);
|
||||
|
||||
int newSize = size() + objSize;
|
||||
var entry = new CacheEntry(obj.<MaybeTombstone<JDataVersionedWrapper>>map(Data::new).orElse(new Tombstone<>()), objSize);
|
||||
|
||||
var old = map.get(key);
|
||||
if (old != null)
|
||||
newSize -= old.size();
|
||||
|
||||
TreePMap<JObjectKey, CacheEntry> newCache = map().plus(key, entry);
|
||||
|
||||
while (newSize > sizeLimit) {
|
||||
var del = newCache.firstEntry();
|
||||
newCache = newCache.minusFirstEntry();
|
||||
newSize -= del.getValue().size();
|
||||
}
|
||||
return new Cache(
|
||||
newCache,
|
||||
newSize,
|
||||
version,
|
||||
sizeLimit
|
||||
);
|
||||
}
|
||||
|
||||
public Cache withVersion(long version) {
|
||||
return new Cache(map, size, version, sizeLimit);
|
||||
}
|
||||
}
|
||||
|
||||
private final AtomicReference<Cache> _cache;
|
||||
private ExecutorService _commitExecutor;
|
||||
private ExecutorService _statusExecutor;
|
||||
private AtomicLong _cached = new AtomicLong();
|
||||
private AtomicLong _cacheTries = new AtomicLong();
|
||||
|
||||
public CachingObjectPersistentStore(@ConfigProperty(name = "dhfs.objects.lru.limit") int sizeLimit) {
|
||||
_cache = new AtomicReference<>(
|
||||
new Cache(TreePMap.empty(), 0, -1, sizeLimit)
|
||||
@@ -105,10 +142,10 @@ public class CachingObjectPersistentStore {
|
||||
Snapshot<JObjectKey, JDataVersionedWrapper> finalBacking = backing;
|
||||
Cache finalCurCache = curCache;
|
||||
return new Snapshot<JObjectKey, JDataVersionedWrapper>() {
|
||||
private final Cache _curCache = finalCurCache;
|
||||
private final Snapshot<JObjectKey, JDataVersionedWrapper> _backing = finalBacking;
|
||||
private boolean _invalid = false;
|
||||
private boolean _closed = false;
|
||||
private final Cache _curCache = finalCurCache;
|
||||
private final Snapshot<JObjectKey, JDataVersionedWrapper> _backing = finalBacking;
|
||||
|
||||
private void doCache(JObjectKey key, Optional<JDataVersionedWrapper> obj) {
|
||||
_cacheTries.incrementAndGet();
|
||||
@@ -150,10 +187,43 @@ public class CachingObjectPersistentStore {
|
||||
}
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return TombstoneMergingKvIterator.<JObjectKey, JDataVersionedWrapper>of("cache", start, key,
|
||||
(mS, mK) -> new NavigableMapKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>(_curCache.map(), mS, mK),
|
||||
(mS, mK) -> new CachingKvIterator(_backing.getIterator(start, key)));
|
||||
public IterProdFn<JObjectKey, JDataVersionedWrapper> getIterator() {
|
||||
IterProdFn<JObjectKey, JDataVersionedWrapper> cacheItProdFn = new IterProdFn<JObjectKey, JDataVersionedWrapper>() {
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> get(IteratorStart start, JObjectKey key) {
|
||||
throw new UnsupportedOperationException("Not implemented");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>> getFlat(IteratorStart start, JObjectKey key) {
|
||||
return Stream.of(
|
||||
new MappingKvIterator<>(
|
||||
new NavigableMapKvIterator<>(_curCache.map(), start, key),
|
||||
e -> {
|
||||
// Log.tracev("Taken from cache: {0}", e);
|
||||
return e.object();
|
||||
}
|
||||
)
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
IterProdFn<JObjectKey, JDataVersionedWrapper> backingItProdFn = (mS, mK) -> new CachingKvIterator(_backing.getIterator(mS, mK));
|
||||
|
||||
return new IterProdFn<JObjectKey, JDataVersionedWrapper>() {
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> get(IteratorStart start, JObjectKey key) {
|
||||
return new TombstoneMergingKvIterator<>("cache", start, key, cacheItProdFn, backingItProdFn);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>> getFlat(IteratorStart start, JObjectKey key) {
|
||||
return Stream.concat(
|
||||
cacheItProdFn.getFlat(start, key),
|
||||
backingItProdFn.getFlat(start, key)
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
@@ -161,12 +231,12 @@ public class CachingObjectPersistentStore {
|
||||
public Optional<JDataVersionedWrapper> readObject(JObjectKey name) {
|
||||
var cached = _curCache.map().get(name);
|
||||
if (cached != null) {
|
||||
return switch (cached) {
|
||||
case CacheEntryPresent data -> Optional.of(data.value());
|
||||
case CacheEntryMiss tombstone -> {
|
||||
return switch (cached.object()) {
|
||||
case Data<JDataVersionedWrapper> data -> Optional.of(data.value());
|
||||
case Tombstone<JDataVersionedWrapper> tombstone -> {
|
||||
yield Optional.empty();
|
||||
}
|
||||
default -> throw new IllegalStateException("Unexpected value: " + cached);
|
||||
default -> throw new IllegalStateException("Unexpected value: " + cached.object());
|
||||
};
|
||||
}
|
||||
var read = _backing.readObject(name);
|
||||
@@ -185,7 +255,7 @@ public class CachingObjectPersistentStore {
|
||||
_backing.close();
|
||||
}
|
||||
|
||||
private class CachingKvIterator implements CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>> {
|
||||
private class CachingKvIterator implements CloseableKvIterator<JObjectKey, JDataVersionedWrapper> {
|
||||
private final CloseableKvIterator<JObjectKey, JDataVersionedWrapper> _delegate;
|
||||
|
||||
private CachingKvIterator(CloseableKvIterator<JObjectKey, JDataVersionedWrapper> delegate) {
|
||||
@@ -218,10 +288,10 @@ public class CachingObjectPersistentStore {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<JObjectKey, MaybeTombstone<JDataVersionedWrapper>> prev() {
|
||||
public Pair<JObjectKey, JDataVersionedWrapper> prev() {
|
||||
var prev = _delegate.prev();
|
||||
maybeCache(prev.getKey(), Optional.of(prev.getValue()));
|
||||
return (Pair<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>) (Pair<JObjectKey, ?>) prev;
|
||||
return prev;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -235,10 +305,10 @@ public class CachingObjectPersistentStore {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<JObjectKey, MaybeTombstone<JDataVersionedWrapper>> next() {
|
||||
public Pair<JObjectKey, JDataVersionedWrapper> next() {
|
||||
var next = _delegate.next();
|
||||
maybeCache(next.getKey(), Optional.of(next.getValue()));
|
||||
return (Pair<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>) (Pair<JObjectKey, ?>) next;
|
||||
return next;
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -251,53 +321,6 @@ public class CachingObjectPersistentStore {
|
||||
}
|
||||
}
|
||||
|
||||
private interface CacheEntry extends MaybeTombstone<JDataVersionedWrapper> {
|
||||
int size();
|
||||
}
|
||||
|
||||
private record Cache(TreePMap<JObjectKey, CacheEntry> map,
|
||||
int size,
|
||||
long version,
|
||||
int sizeLimit) {
|
||||
public Cache withPut(JObjectKey key, Optional<JDataVersionedWrapper> obj) {
|
||||
var entry = obj.<CacheEntry>map(o -> new CacheEntryPresent(o, o.estimateSize())).orElse(new CacheEntryMiss());
|
||||
|
||||
int newSize = size() + entry.size();
|
||||
|
||||
var old = map.get(key);
|
||||
if (old != null)
|
||||
newSize -= old.size();
|
||||
|
||||
TreePMap<JObjectKey, CacheEntry> newCache = map();
|
||||
|
||||
while (newSize > sizeLimit) {
|
||||
var del = newCache.firstEntry();
|
||||
newCache = newCache.minusFirstEntry();
|
||||
newSize -= del.getValue().size();
|
||||
}
|
||||
|
||||
newCache = newCache.plus(key, entry);
|
||||
return new Cache(
|
||||
newCache,
|
||||
newSize,
|
||||
version,
|
||||
sizeLimit
|
||||
);
|
||||
}
|
||||
|
||||
public Cache withVersion(long version) {
|
||||
return new Cache(map, size, version, sizeLimit);
|
||||
}
|
||||
}
|
||||
|
||||
private record CacheEntryPresent(JDataVersionedWrapper value,
|
||||
int size) implements CacheEntry, Data<JDataVersionedWrapper> {
|
||||
}
|
||||
|
||||
private record CacheEntryMiss() implements CacheEntry, Tombstone<JDataVersionedWrapper> {
|
||||
@Override
|
||||
public int size() {
|
||||
return 64;
|
||||
}
|
||||
private record CacheEntry(MaybeTombstone<JDataVersionedWrapper> object, int size) {
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,15 +2,15 @@ package com.usatiuk.objects.stores;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import com.usatiuk.dhfs.utils.RefcountedCloseable;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.JObjectKeyMax;
|
||||
import com.usatiuk.objects.JObjectKeyMin;
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IterProdFn;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.iterators.KeyPredicateKvIterator;
|
||||
import com.usatiuk.objects.iterators.ReversibleKvIterator;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import com.usatiuk.utils.RefcountedCloseable;
|
||||
import io.quarkus.arc.properties.IfBuildProperty;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
@@ -42,9 +42,6 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
private static final String DB_VER_OBJ_NAME_STR = "__DB_VER_OBJ";
|
||||
private static final ByteBuffer DB_VER_OBJ_NAME;
|
||||
|
||||
@ConfigProperty(name = "dhfs.objects.persistence.lmdb.size", defaultValue = "1000000000000")
|
||||
long lmdbSize;
|
||||
|
||||
static {
|
||||
byte[] tmp = DB_VER_OBJ_NAME_STR.getBytes(StandardCharsets.ISO_8859_1);
|
||||
var bb = ByteBuffer.allocateDirect(tmp.length);
|
||||
@@ -68,7 +65,7 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
_root.toFile().mkdirs();
|
||||
}
|
||||
_env = create()
|
||||
.setMapSize(lmdbSize)
|
||||
.setMapSize(1_000_000_000_000L)
|
||||
.setMaxDbs(1)
|
||||
.open(_root.toFile(), EnvFlags.MDB_NOTLS);
|
||||
_db = _env.openDbi(DB_NAME, MDB_CREATE);
|
||||
@@ -124,9 +121,9 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
private boolean _closed = false;
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, ByteString> getIterator(IteratorStart start, JObjectKey key) {
|
||||
public IterProdFn<JObjectKey, ByteString> getIterator() {
|
||||
assert !_closed;
|
||||
return new KeyPredicateKvIterator<>(new LmdbKvIterator(_txn.ref(), start, key), start, key, (k) -> !k.value().equals(DB_VER_OBJ_NAME_STR));
|
||||
return (start, key) -> new KeyPredicateKvIterator<>(new LmdbKvIterator(_txn.ref(), start, key), start, key, (k) -> !k.value().equals(DB_VER_OBJ_NAME_STR));
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
@@ -211,7 +208,6 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
// private final Exception _allocationStacktrace = new Exception();
|
||||
private final Exception _allocationStacktrace = null;
|
||||
private boolean _hasNext = false;
|
||||
private JObjectKey _peekedNextKey = null;
|
||||
|
||||
LmdbKvIterator(RefcountedCloseable<Txn<ByteBuffer>> txn, IteratorStart start, JObjectKey key) {
|
||||
_txn = txn;
|
||||
@@ -281,24 +277,24 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
}
|
||||
}
|
||||
|
||||
// var realGot = JObjectKey.fromByteBuffer(_cursor.key());
|
||||
// _cursor.key().flip();
|
||||
//
|
||||
// switch (start) {
|
||||
// case LT -> {
|
||||
//// assert !_hasNext || realGot.compareTo(key) < 0;
|
||||
// }
|
||||
// case LE -> {
|
||||
//// assert !_hasNext || realGot.compareTo(key) <= 0;
|
||||
// }
|
||||
// case GT -> {
|
||||
// assert !_hasNext || realGot.compareTo(key) > 0;
|
||||
// }
|
||||
// case GE -> {
|
||||
// assert !_hasNext || realGot.compareTo(key) >= 0;
|
||||
// }
|
||||
// }
|
||||
// Log.tracev("got: {0}, hasNext: {1}", realGot, _hasNext);
|
||||
var realGot = JObjectKey.fromByteBuffer(_cursor.key());
|
||||
_cursor.key().flip();
|
||||
|
||||
switch (start) {
|
||||
case LT -> {
|
||||
// assert !_hasNext || realGot.compareTo(key) < 0;
|
||||
}
|
||||
case LE -> {
|
||||
// assert !_hasNext || realGot.compareTo(key) <= 0;
|
||||
}
|
||||
case GT -> {
|
||||
assert !_hasNext || realGot.compareTo(key) > 0;
|
||||
}
|
||||
case GE -> {
|
||||
assert !_hasNext || realGot.compareTo(key) >= 0;
|
||||
}
|
||||
}
|
||||
Log.tracev("got: {0}, hasNext: {1}", realGot, _hasNext);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -327,7 +323,6 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
}
|
||||
}
|
||||
_goingForward = !_goingForward;
|
||||
_peekedNextKey = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -335,12 +330,8 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
if (!_hasNext) {
|
||||
throw new NoSuchElementException("No more elements");
|
||||
}
|
||||
if (_peekedNextKey != null) {
|
||||
return _peekedNextKey;
|
||||
}
|
||||
var ret = JObjectKey.fromByteBuffer(_cursor.key());
|
||||
_cursor.key().flip();
|
||||
_peekedNextKey = ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -350,7 +341,6 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
_hasNext = _cursor.next();
|
||||
else
|
||||
_hasNext = _cursor.prev();
|
||||
_peekedNextKey = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -371,8 +361,7 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
||||
_hasNext = _cursor.next();
|
||||
else
|
||||
_hasNext = _cursor.prev();
|
||||
// Log.tracev("Read: {0}, hasNext: {1}", ret, _hasNext);
|
||||
_peekedNextKey = null;
|
||||
Log.tracev("Read: {0}, hasNext: {1}", ret, _hasNext);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,8 +2,7 @@ package com.usatiuk.objects.stores;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.iterators.IterProdFn;
|
||||
import com.usatiuk.objects.iterators.NavigableMapKvIterator;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import io.quarkus.arc.properties.IfBuildProperty;
|
||||
@@ -37,8 +36,8 @@ public class MemoryObjectPersistentStore implements ObjectPersistentStore {
|
||||
private final long _lastCommitId = MemoryObjectPersistentStore.this._lastCommitId;
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, ByteString> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return new NavigableMapKvIterator<>(_objects, start, key);
|
||||
public IterProdFn<JObjectKey, ByteString> getIterator() {
|
||||
return (start, key) -> new NavigableMapKvIterator<>(_objects, start, key);
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
|
||||
@@ -2,10 +2,13 @@ package com.usatiuk.objects.stores;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
// Persistent storage of objects
|
||||
// All changes are written as sequential transactions
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
package com.usatiuk.objects.stores;
|
||||
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.Tombstone;
|
||||
|
||||
public record PendingDelete(JObjectKey key,
|
||||
long bundleId) implements PendingWriteEntry, Tombstone<JDataVersionedWrapper> {
|
||||
public record PendingDelete(JObjectKey key, long bundleId) implements PendingWriteEntry {
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package com.usatiuk.objects.stores;
|
||||
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.iterators.Data;
|
||||
|
||||
public record PendingWrite(JDataVersionedWrapper value,
|
||||
long bundleId) implements PendingWriteEntry, Data<JDataVersionedWrapper> {
|
||||
public record PendingWrite(JDataVersionedWrapper data, long bundleId) implements PendingWriteEntry {
|
||||
}
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
package com.usatiuk.objects.stores;
|
||||
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.iterators.MaybeTombstone;
|
||||
|
||||
public interface PendingWriteEntry extends MaybeTombstone<JDataVersionedWrapper> {
|
||||
public interface PendingWriteEntry {
|
||||
long bundleId();
|
||||
}
|
||||
|
||||
@@ -4,8 +4,7 @@ import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.ObjectSerializer;
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import com.usatiuk.objects.iterators.IterProdFn;
|
||||
import com.usatiuk.objects.iterators.MappingKvIterator;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
@@ -33,8 +32,8 @@ public class SerializingObjectPersistentStore {
|
||||
private final Snapshot<JObjectKey, ByteString> _backing = delegateStore.getSnapshot();
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return new MappingKvIterator<>(_backing.getIterator(start, key), d -> serializer.deserialize(d));
|
||||
public IterProdFn<JObjectKey, JDataVersionedWrapper> getIterator() {
|
||||
return (start, key) -> new MappingKvIterator<>(_backing.getIterator(start, key), d -> serializer.deserialize(d));
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
|
||||
@@ -27,15 +27,25 @@ import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@ApplicationScoped
|
||||
public class WritebackObjectPersistentStore {
|
||||
private final LinkedList<TxBundle> _pendingBundles = new LinkedList<>();
|
||||
private final LinkedHashMap<Long, TxBundle> _notFlushedBundles = new LinkedHashMap<>();
|
||||
|
||||
private record PendingWriteData(TreePMap<JObjectKey, PendingWriteEntry> pendingWrites,
|
||||
long lastFlushedId,
|
||||
long lastCommittedId) {
|
||||
}
|
||||
|
||||
private final AtomicReference<PendingWriteData> _pendingWrites = new AtomicReference<>(null);
|
||||
|
||||
private final Object _flushWaitSynchronizer = new Object();
|
||||
|
||||
private final AtomicLong _lastWrittenId = new AtomicLong(-1);
|
||||
private final AtomicLong _lastCommittedId = new AtomicLong();
|
||||
|
||||
private final AtomicLong _waitedTotal = new AtomicLong(0);
|
||||
@Inject
|
||||
CachingObjectPersistentStore cachedStore;
|
||||
@@ -72,7 +82,6 @@ public class WritebackObjectPersistentStore {
|
||||
lastTxId = s.id();
|
||||
}
|
||||
_lastCommittedId.set(lastTxId);
|
||||
_lastWrittenId.set(lastTxId);
|
||||
_pendingWrites.set(new PendingWriteData(TreePMap.empty(), lastTxId, lastTxId));
|
||||
_ready = true;
|
||||
}
|
||||
@@ -341,10 +350,37 @@ public class WritebackObjectPersistentStore {
|
||||
private final long txId = finalPw.lastCommittedId();
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
|
||||
return TombstoneMergingKvIterator.<JObjectKey, JDataVersionedWrapper>of("writeback-ps", start, key,
|
||||
(tS, tK) -> new NavigableMapKvIterator<>(_pendingWrites, tS, tK),
|
||||
(tS, tK) -> (CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>) (CloseableKvIterator<JObjectKey, ?>) _cache.getIterator(tS, tK));
|
||||
public IterProdFn<JObjectKey, JDataVersionedWrapper> getIterator() {
|
||||
IterProdFn<JObjectKey, JDataVersionedWrapper> cacheItProdFn = new IterProdFn<JObjectKey, JDataVersionedWrapper>() {
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> get(IteratorStart start, JObjectKey key) {
|
||||
throw new UnsupportedOperationException("Not supported yet.");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>> getFlat(IteratorStart start, JObjectKey key) {
|
||||
return Stream.of(new MappingKvIterator<>(
|
||||
new NavigableMapKvIterator<>(_pendingWrites, start, key),
|
||||
e -> switch (e) {
|
||||
case PendingWrite pw -> new Data<>(pw.data());
|
||||
case PendingDelete d -> new Tombstone<>();
|
||||
default -> throw new IllegalStateException("Unexpected value: " + e);
|
||||
}));
|
||||
}
|
||||
};
|
||||
|
||||
return new IterProdFn<JObjectKey, JDataVersionedWrapper>() {
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> get(IteratorStart start, JObjectKey key) {
|
||||
return new TombstoneMergingKvIterator<>("writeback-ps", start, key,
|
||||
cacheItProdFn, _cache.getIterator());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>> getFlat(IteratorStart start, JObjectKey key) {
|
||||
return Stream.concat(cacheItProdFn.getFlat(start, key), _cache.getIterator().getFlat(start, key));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
@@ -353,7 +389,7 @@ public class WritebackObjectPersistentStore {
|
||||
var cached = _pendingWrites.get(name);
|
||||
if (cached != null) {
|
||||
return switch (cached) {
|
||||
case PendingWrite c -> Optional.of(c.value());
|
||||
case PendingWrite c -> Optional.of(c.data());
|
||||
case PendingDelete d -> {
|
||||
yield Optional.empty();
|
||||
}
|
||||
@@ -384,11 +420,6 @@ public class WritebackObjectPersistentStore {
|
||||
public interface VerboseReadResult {
|
||||
}
|
||||
|
||||
private record PendingWriteData(TreePMap<JObjectKey, PendingWriteEntry> pendingWrites,
|
||||
long lastFlushedId,
|
||||
long lastCommittedId) {
|
||||
}
|
||||
|
||||
private static class TxBundle {
|
||||
private final LinkedHashMap<JObjectKey, BundleEntry> _entries = new LinkedHashMap<>();
|
||||
private final ArrayList<Runnable> _callbacks = new ArrayList<>();
|
||||
|
||||
@@ -4,6 +4,7 @@ import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.CloseableKvIterator;
|
||||
import com.usatiuk.objects.iterators.IteratorStart;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
|
||||
import com.google.common.collect.Streams;
|
||||
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
|
||||
import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import com.usatiuk.objects.stores.WritebackObjectPersistentStore;
|
||||
import com.usatiuk.utils.AutoCloseableNoThrow;
|
||||
import com.usatiuk.objects.snapshot.SnapshotManager;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
@@ -13,23 +14,34 @@ import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.enterprise.inject.Instance;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Singleton;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
// Manages all access to com.usatiuk.objects.JData objects.
|
||||
// In particular, it serves as a source of truth for what is committed to the backing storage.
|
||||
// All data goes through it, it is responsible for transaction atomicity
|
||||
// TODO: persistent tx id
|
||||
@ApplicationScoped
|
||||
public class JObjectManager {
|
||||
private final List<PreCommitTxHook> _preCommitTxHooks;
|
||||
|
||||
private record CommitHookIterationData(PreCommitTxHook hook,
|
||||
Map<JObjectKey, TxRecord.TxObjectRecord<?>> lastWrites,
|
||||
Map<JObjectKey, TxRecord.TxObjectRecord<?>> pendingWrites) {
|
||||
}
|
||||
|
||||
@Inject
|
||||
WritebackObjectPersistentStore writebackObjectPersistentStore;
|
||||
SnapshotManager snapshotManager;
|
||||
@Inject
|
||||
TransactionFactory transactionFactory;
|
||||
@Inject
|
||||
LockManager lockManager;
|
||||
private boolean _ready = false;
|
||||
|
||||
JObjectManager(Instance<PreCommitTxHook> preCommitTxHooks) {
|
||||
_preCommitTxHooks = List.copyOf(preCommitTxHooks.stream().sorted(Comparator.comparingInt(PreCommitTxHook::getPriority)).toList());
|
||||
Log.debugv("Pre-commit hooks: {0}", String.join("->", _preCommitTxHooks.stream().map(Objects::toString).toList()));
|
||||
@@ -168,36 +180,25 @@ public class JObjectManager {
|
||||
toUnlock.add(lock);
|
||||
}
|
||||
|
||||
commitSnapshot = writebackObjectPersistentStore.getSnapshot();
|
||||
commitSnapshot = snapshotManager.createSnapshot();
|
||||
} else {
|
||||
Log.trace("Committing transaction - no changes");
|
||||
|
||||
long version = 0L;
|
||||
|
||||
for (var read : readSet.values()) {
|
||||
version = Math.max(version, read.data().map(JDataVersionedWrapper::version).orElse(0L));
|
||||
if (read instanceof TransactionObjectLocked<?> locked) {
|
||||
locked.lock().close();
|
||||
}
|
||||
}
|
||||
|
||||
long finalVersion = version;
|
||||
Consumer<Runnable> fenceFn = r -> {
|
||||
writebackObjectPersistentStore.asyncFence(finalVersion, r);
|
||||
};
|
||||
|
||||
return Pair.of(
|
||||
Stream.concat(
|
||||
tx.getOnCommit().stream(),
|
||||
Stream.<Runnable>of(() -> {
|
||||
for (var f : tx.getOnFlush())
|
||||
fenceFn.accept(f);
|
||||
})
|
||||
tx.getOnFlush().stream()
|
||||
).toList(),
|
||||
new TransactionHandle() {
|
||||
@Override
|
||||
public void onFlush(Runnable runnable) {
|
||||
fenceFn.accept(runnable);
|
||||
runnable.run();
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -218,7 +219,7 @@ public class JObjectManager {
|
||||
// TODO: Every write gets a dependency due to hooks
|
||||
continue;
|
||||
// assert false;
|
||||
// throw new TxCommitException("Serialization hazard: " + dep.isEmpty() + " vs " + read.getValue().value().isEmpty());
|
||||
// throw new TxCommitException("Serialization hazard: " + dep.isEmpty() + " vs " + read.getValue().data().isEmpty());
|
||||
}
|
||||
|
||||
if (current.get().version() > snapshotId) {
|
||||
@@ -232,9 +233,8 @@ public class JObjectManager {
|
||||
Log.tracev("Skipped dependency checks: no changes");
|
||||
}
|
||||
|
||||
var addFlushCallback = writebackObjectPersistentStore.commitTx(writes.values());
|
||||
var addFlushCallback = snapshotManager.commitTx(writes.values());
|
||||
|
||||
// TODO: is it ok to possibly run it inside transaction?
|
||||
for (var callback : tx.getOnFlush()) {
|
||||
addFlushCallback.accept(callback);
|
||||
}
|
||||
@@ -271,8 +271,30 @@ public class JObjectManager {
|
||||
tx.close();
|
||||
}
|
||||
|
||||
private record CommitHookIterationData(PreCommitTxHook hook,
|
||||
Map<JObjectKey, TxRecord.TxObjectRecord<?>> lastWrites,
|
||||
Map<JObjectKey, TxRecord.TxObjectRecord<?>> pendingWrites) {
|
||||
}
|
||||
// private class TransactionObjectSourceImpl implements TransactionObjectSource {
|
||||
// private final long _txId;
|
||||
//
|
||||
// private TransactionObjectSourceImpl(long txId) {
|
||||
// _txId = txId;
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public <T extends JData> TransactionObject<T> get(Class<T> type, JObjectKey key) {
|
||||
// var got = getObj(type, key);
|
||||
// if (got.data().isPresent() && got.data().get().version() > _txId) {
|
||||
// throw new TxCommitException("Serialization race for " + key + ": " + got.data().get().version() + " vs " + _txId);
|
||||
// }
|
||||
// return got;
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public <T extends JData> TransactionObject<T> getWriteLocked(Class<T> type, JObjectKey key) {
|
||||
// var got = getObjLock(type, key);
|
||||
// if (got.data().isPresent() && got.data().get().version() > _txId) {
|
||||
// got.lock().close();
|
||||
// throw new TxCommitException("Serialization race for " + key + ": " + got.data().get().version() + " vs " + _txId);
|
||||
// }
|
||||
// return got;
|
||||
// }
|
||||
// }
|
||||
}
|
||||
@@ -1,10 +1,11 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.utils.AutoCloseableNoThrow;
|
||||
import com.usatiuk.utils.DataLocker;
|
||||
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
|
||||
import com.usatiuk.dhfs.utils.DataLocker;
|
||||
import jakarta.annotation.Nonnull;
|
||||
import jakarta.annotation.Nullable;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
@Singleton
|
||||
|
||||
@@ -5,7 +5,7 @@ import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.iterators.*;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import com.usatiuk.objects.stores.WritebackObjectPersistentStore;
|
||||
import com.usatiuk.objects.snapshot.SnapshotManager;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Singleton;
|
||||
@@ -13,11 +13,12 @@ import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@Singleton
|
||||
public class TransactionFactoryImpl implements TransactionFactory {
|
||||
@Inject
|
||||
WritebackObjectPersistentStore writebackObjectPersistentStore;
|
||||
SnapshotManager snapshotManager;
|
||||
@Inject
|
||||
LockManager lockManager;
|
||||
@ConfigProperty(name = "dhfs.objects.transaction.never-lock")
|
||||
@@ -64,7 +65,7 @@ public class TransactionFactoryImpl implements TransactionFactory {
|
||||
private Map<JObjectKey, TxRecord.TxObjectRecord<?>> _newWrites = new HashMap<>();
|
||||
|
||||
private TransactionImpl() {
|
||||
_snapshot = writebackObjectPersistentStore.getSnapshot();
|
||||
_snapshot = snapshotManager.createSnapshot();
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -94,12 +95,15 @@ public class TransactionFactoryImpl implements TransactionFactory {
|
||||
|
||||
@Override
|
||||
public <T extends JData> Optional<T> getFromSource(Class<T> type, JObjectKey key) {
|
||||
return _readSet.computeIfAbsent(key, k -> {
|
||||
var read = _snapshot.readObject(k);
|
||||
return new TransactionObjectNoLock<>(read);
|
||||
})
|
||||
.data()
|
||||
.map(w -> type.cast(w.data()));
|
||||
var got = _readSet.get(key);
|
||||
|
||||
if (got == null) {
|
||||
var read = _snapshot.readObject(key);
|
||||
_readSet.put(key, new TransactionObjectNoLock<>(read));
|
||||
return read.map(JDataVersionedWrapper::data).map(type::cast);
|
||||
}
|
||||
|
||||
return got.data().map(JDataVersionedWrapper::data).map(type::cast);
|
||||
}
|
||||
|
||||
public <T extends JData> Optional<T> getWriteLockedFromSource(Class<T> type, JObjectKey key) {
|
||||
@@ -157,26 +161,52 @@ public class TransactionFactoryImpl implements TransactionFactory {
|
||||
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, JData> getIterator(IteratorStart start, JObjectKey key) {
|
||||
|
||||
Log.tracev("Getting tx iterator with start={0}, key={1}", start, key);
|
||||
return new ReadTrackingIterator(TombstoneMergingKvIterator.<JObjectKey, ReadTrackingInternalCrap>of("tx", start, key,
|
||||
(tS, tK) -> new MappingKvIterator<>(new NavigableMapKvIterator<>(_writes, tS, tK),
|
||||
t -> switch (t) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write ->
|
||||
new DataWrapper<>(new ReadTrackingInternalCrapTx(write.data()));
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> new TombstoneImpl<>();
|
||||
case null, default -> null;
|
||||
}),
|
||||
(tS, tK) -> new MappingKvIterator<>(_snapshot.getIterator(tS, tK),
|
||||
d -> new DataWrapper<ReadTrackingInternalCrap>(new ReadTrackingInternalCrapSource(d)))));
|
||||
return new ReadTrackingIterator(new TombstoneMergingKvIterator<>("tx", start, key,
|
||||
new IterProdFn<JObjectKey, ReadTrackingInternalCrap>() {
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, ReadTrackingInternalCrap> get(IteratorStart start, JObjectKey key) {
|
||||
throw new UnsupportedOperationException("Not implemented");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<CloseableKvIterator<JObjectKey, MaybeTombstone<ReadTrackingInternalCrap>>> getFlat(IteratorStart start, JObjectKey key) {
|
||||
return Stream.of(new MappingKvIterator<>(new NavigableMapKvIterator<>(_writes, start, key),
|
||||
t -> switch (t) {
|
||||
case TxRecord.TxObjectRecordWrite<?> write ->
|
||||
new Data<>(new ReadTrackingInternalCrapTx(write.data()));
|
||||
case TxRecord.TxObjectRecordDeleted deleted -> new Tombstone<>();
|
||||
case null, default -> null;
|
||||
}));
|
||||
}
|
||||
},
|
||||
new IterProdFn<JObjectKey, ReadTrackingInternalCrap>() {
|
||||
@Override
|
||||
public CloseableKvIterator<JObjectKey, ReadTrackingInternalCrap> get(IteratorStart start, JObjectKey key) {
|
||||
throw new UnsupportedOperationException("Not implemented");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<CloseableKvIterator<JObjectKey, MaybeTombstone<ReadTrackingInternalCrap>>> getFlat(IteratorStart start, JObjectKey key) {
|
||||
return _snapshot.getIterator().getFlat(start, key).<CloseableKvIterator<JObjectKey, MaybeTombstone<ReadTrackingInternalCrap>>>map(
|
||||
i -> new MappingKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>, MaybeTombstone<ReadTrackingInternalCrap>>(i,
|
||||
d ->
|
||||
switch (d) {
|
||||
case Data<JDataVersionedWrapper> data ->
|
||||
new Data<ReadTrackingInternalCrap>(new ReadTrackingInternalCrapSource(data.value()));
|
||||
case Tombstone<JDataVersionedWrapper> tombstone ->
|
||||
new Tombstone<ReadTrackingInternalCrap>();
|
||||
default ->
|
||||
throw new IllegalStateException("Unexpected value: " + d);
|
||||
})
|
||||
);
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void put(JData obj) {
|
||||
var read = _readSet.get(obj.key());
|
||||
if (read != null && (read.data().map(JDataVersionedWrapper::data).orElse(null) == obj)) {
|
||||
return;
|
||||
}
|
||||
|
||||
_writes.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj));
|
||||
_newWrites.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj));
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
|
||||
import com.usatiuk.utils.VoidFn;
|
||||
import com.usatiuk.dhfs.utils.VoidFn;
|
||||
import io.quarkus.logging.Log;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
@@ -17,27 +17,30 @@ public interface TransactionManager {
|
||||
return supplier.get();
|
||||
}
|
||||
|
||||
while (true) {
|
||||
begin();
|
||||
boolean commit = false;
|
||||
try {
|
||||
var ret = supplier.get();
|
||||
commit = true;
|
||||
commit();
|
||||
return ret;
|
||||
} catch (TxCommitException txCommitException) {
|
||||
if (!commit)
|
||||
rollback();
|
||||
if (tries == 0) {
|
||||
Log.error("Transaction commit failed", txCommitException);
|
||||
throw txCommitException;
|
||||
}
|
||||
tries--;
|
||||
} catch (Throwable e) {
|
||||
if (!commit)
|
||||
rollback();
|
||||
throw e;
|
||||
begin();
|
||||
T ret;
|
||||
try {
|
||||
ret = supplier.get();
|
||||
} catch (TxCommitException txCommitException) {
|
||||
rollback();
|
||||
if (tries == 0) {
|
||||
Log.error("Transaction commit failed", txCommitException);
|
||||
throw txCommitException;
|
||||
}
|
||||
return runTries(supplier, tries - 1);
|
||||
} catch (Throwable e) {
|
||||
rollback();
|
||||
throw e;
|
||||
}
|
||||
try {
|
||||
commit();
|
||||
return ret;
|
||||
} catch (TxCommitException txCommitException) {
|
||||
if (tries == 0) {
|
||||
Log.error("Transaction commit failed", txCommitException);
|
||||
throw txCommitException;
|
||||
}
|
||||
return runTries(supplier, tries - 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,29 +55,29 @@ public interface TransactionManager {
|
||||
};
|
||||
}
|
||||
|
||||
while (true) {
|
||||
begin();
|
||||
boolean commit = false;
|
||||
try {
|
||||
fn.apply();
|
||||
commit = true;
|
||||
var ret = commit();
|
||||
return ret;
|
||||
} catch (TxCommitException txCommitException) {
|
||||
if (!commit)
|
||||
rollback();
|
||||
if (tries == 0) {
|
||||
Log.error("Transaction commit failed", txCommitException);
|
||||
throw txCommitException;
|
||||
}
|
||||
tries--;
|
||||
} catch (Throwable e) {
|
||||
if (!commit)
|
||||
rollback();
|
||||
throw e;
|
||||
begin();
|
||||
try {
|
||||
fn.apply();
|
||||
} catch (TxCommitException txCommitException) {
|
||||
rollback();
|
||||
if (tries == 0) {
|
||||
Log.error("Transaction commit failed", txCommitException);
|
||||
throw txCommitException;
|
||||
}
|
||||
return runTries(fn, tries - 1);
|
||||
} catch (Throwable e) {
|
||||
rollback();
|
||||
throw e;
|
||||
}
|
||||
try {
|
||||
return commit();
|
||||
} catch (TxCommitException txCommitException) {
|
||||
if (tries == 0) {
|
||||
Log.error("Transaction commit failed", txCommitException);
|
||||
throw txCommitException;
|
||||
}
|
||||
return runTries(fn, tries - 1);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
default TransactionHandle run(VoidFn fn) {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package com.usatiuk.objects.transaction;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Singleton;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
@@ -2,7 +2,7 @@ package com.usatiuk.objects.transaction;
|
||||
|
||||
import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.utils.AutoCloseableNoThrow;
|
||||
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JDataVersionedWrapper;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.snapshot.Snapshot;
|
||||
import com.usatiuk.utils.AutoCloseableNoThrow;
|
||||
import com.usatiuk.dhfs.utils.AutoCloseableNoThrow;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
|
||||
@@ -7,8 +7,11 @@ import io.quarkus.test.junit.QuarkusTest;
|
||||
import io.quarkus.test.junit.TestProfile;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
class ObjectsIterateAllTestProfiles {
|
||||
|
||||
@@ -1,264 +1,262 @@
|
||||
package com.usatiuk.objects.iterators;
|
||||
|
||||
import net.jqwik.api.*;
|
||||
import net.jqwik.api.state.Action;
|
||||
import net.jqwik.api.state.ActionChain;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
public class MergingKvIteratorPbtTest {
|
||||
@Property
|
||||
public void checkMergingIterator(@ForAll("actions") ActionChain<MergingIteratorModel> actions) {
|
||||
actions.run();
|
||||
}
|
||||
|
||||
@Provide
|
||||
Arbitrary<ActionChain<MergingIteratorModel>> actions(@ForAll("lists") List<List<Map.Entry<Integer, Integer>>> list,
|
||||
@ForAll IteratorStart iteratorStart, @ForAll("startKey") Integer startKey) {
|
||||
return ActionChain.startWith(() -> new MergingIteratorModel(list, iteratorStart, startKey))
|
||||
.withAction(new NextAction())
|
||||
.withAction(new PeekNextKeyAction())
|
||||
.withAction(new SkipAction())
|
||||
.withAction(new PeekPrevKeyAction())
|
||||
.withAction(new SkipPrevAction())
|
||||
.withAction(new PrevAction())
|
||||
.withAction(new HasNextAction())
|
||||
.withAction(new HasPrevAction());
|
||||
}
|
||||
|
||||
@Provide
|
||||
Arbitrary<List<List<Map.Entry<Integer, Integer>>>> lists() {
|
||||
return Arbitraries.entries(Arbitraries.integers().between(-50, 50), Arbitraries.integers().between(-50, 50))
|
||||
.list().uniqueElements(Map.Entry::getKey).ofMinSize(0).ofMaxSize(20)
|
||||
.list().ofMinSize(1).ofMaxSize(5);
|
||||
}
|
||||
|
||||
@Provide
|
||||
Arbitrary<Integer> startKey() {
|
||||
return Arbitraries.integers().between(-51, 51);
|
||||
}
|
||||
|
||||
static class MergingIteratorModel implements CloseableKvIterator<Integer, Integer> {
|
||||
private final CloseableKvIterator<Integer, Integer> mergedIterator;
|
||||
private final CloseableKvIterator<Integer, Integer> mergingIterator;
|
||||
|
||||
private MergingIteratorModel(List<List<Map.Entry<Integer, Integer>>> pairs, IteratorStart startType, Integer startKey) {
|
||||
TreeMap<Integer, Integer> perfectMerged = new TreeMap<>();
|
||||
for (List<Map.Entry<Integer, Integer>> list : pairs) {
|
||||
for (Map.Entry<Integer, Integer> pair : list) {
|
||||
perfectMerged.putIfAbsent(pair.getKey(), pair.getValue());
|
||||
}
|
||||
}
|
||||
mergedIterator = new NavigableMapKvIterator<>(perfectMerged, startType, startKey);
|
||||
mergingIterator = new MergingKvIterator<>("test", startType, startKey, pairs.stream().<IterProdFn<Integer, Integer>>map(
|
||||
list -> (IteratorStart start, Integer key) -> new NavigableMapKvIterator<>(new TreeMap<Integer, Integer>(Map.ofEntries(list.toArray(Map.Entry[]::new))), start, key)
|
||||
).toList());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer peekNextKey() {
|
||||
var mergedKey = mergedIterator.peekNextKey();
|
||||
var mergingKey = mergingIterator.peekNextKey();
|
||||
Assertions.assertEquals(mergedKey, mergingKey);
|
||||
return mergedKey;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skip() {
|
||||
mergedIterator.skip();
|
||||
mergingIterator.skip();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer peekPrevKey() {
|
||||
var mergedKey = mergedIterator.peekPrevKey();
|
||||
var mergingKey = mergingIterator.peekPrevKey();
|
||||
Assertions.assertEquals(mergedKey, mergingKey);
|
||||
return mergedKey;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<Integer, Integer> prev() {
|
||||
var mergedKey = mergedIterator.prev();
|
||||
var mergingKey = mergingIterator.prev();
|
||||
Assertions.assertEquals(mergedKey, mergingKey);
|
||||
return mergedKey;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPrev() {
|
||||
var mergedKey = mergedIterator.hasPrev();
|
||||
var mergingKey = mergingIterator.hasPrev();
|
||||
Assertions.assertEquals(mergedKey, mergingKey);
|
||||
return mergedKey;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skipPrev() {
|
||||
mergedIterator.skipPrev();
|
||||
mergingIterator.skipPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
mergedIterator.close();
|
||||
mergingIterator.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
var mergedKey = mergedIterator.hasNext();
|
||||
var mergingKey = mergingIterator.hasNext();
|
||||
Assertions.assertEquals(mergedKey, mergingKey);
|
||||
return mergedKey;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<Integer, Integer> next() {
|
||||
var mergedKey = mergedIterator.next();
|
||||
var mergingKey = mergingIterator.next();
|
||||
Assertions.assertEquals(mergedKey, mergingKey);
|
||||
return mergedKey;
|
||||
}
|
||||
}
|
||||
|
||||
static class PeekNextKeyAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
@Override
|
||||
public void mutate(MergingIteratorModel state) {
|
||||
state.peekNextKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean precondition(MergingIteratorModel state) {
|
||||
return state.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String description() {
|
||||
return "Peek next key";
|
||||
}
|
||||
}
|
||||
|
||||
static class SkipAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
@Override
|
||||
public void mutate(MergingIteratorModel state) {
|
||||
state.skip();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean precondition(MergingIteratorModel state) {
|
||||
return state.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String description() {
|
||||
return "Skip next key";
|
||||
}
|
||||
}
|
||||
|
||||
static class PeekPrevKeyAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
@Override
|
||||
public void mutate(MergingIteratorModel state) {
|
||||
state.peekPrevKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean precondition(MergingIteratorModel state) {
|
||||
return state.hasPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String description() {
|
||||
return "Peek prev key";
|
||||
}
|
||||
}
|
||||
|
||||
static class SkipPrevAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
@Override
|
||||
public void mutate(MergingIteratorModel state) {
|
||||
state.skipPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean precondition(MergingIteratorModel state) {
|
||||
return state.hasPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String description() {
|
||||
return "Skip prev key";
|
||||
}
|
||||
}
|
||||
|
||||
static class PrevAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
@Override
|
||||
public void mutate(MergingIteratorModel state) {
|
||||
state.prev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean precondition(MergingIteratorModel state) {
|
||||
return state.hasPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String description() {
|
||||
return "Prev key";
|
||||
}
|
||||
}
|
||||
|
||||
static class NextAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
@Override
|
||||
public void mutate(MergingIteratorModel state) {
|
||||
state.next();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean precondition(MergingIteratorModel state) {
|
||||
return state.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String description() {
|
||||
return "Next key";
|
||||
}
|
||||
}
|
||||
|
||||
static class HasNextAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
@Override
|
||||
public void mutate(MergingIteratorModel state) {
|
||||
state.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean precondition(MergingIteratorModel state) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String description() {
|
||||
return "Has next key";
|
||||
}
|
||||
}
|
||||
|
||||
static class HasPrevAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
@Override
|
||||
public void mutate(MergingIteratorModel state) {
|
||||
state.hasPrev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean precondition(MergingIteratorModel state) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String description() {
|
||||
return "Has prev key";
|
||||
}
|
||||
}
|
||||
}
|
||||
//package com.usatiuk.objects.iterators;
|
||||
//
|
||||
//import net.jqwik.api.*;
|
||||
//import net.jqwik.api.state.Action;
|
||||
//import net.jqwik.api.state.ActionChain;
|
||||
//import org.apache.commons.lang3.tuple.Pair;
|
||||
//import org.junit.jupiter.api.Assertions;
|
||||
//
|
||||
//import java.util.*;
|
||||
//
|
||||
//public class MergingKvIteratorPbtTest {
|
||||
// static class MergingIteratorModel implements CloseableKvIterator<Integer, Integer> {
|
||||
// private final CloseableKvIterator<Integer, Integer> mergedIterator;
|
||||
// private final CloseableKvIterator<Integer, Integer> mergingIterator;
|
||||
//
|
||||
// private MergingIteratorModel(List<List<Map.Entry<Integer, Integer>>> pairs, IteratorStart startType, Integer startKey) {
|
||||
// TreeMap<Integer, Integer> perfectMerged = new TreeMap<>();
|
||||
// for (List<Map.Entry<Integer, Integer>> list : pairs) {
|
||||
// for (Map.Entry<Integer, Integer> pair : list) {
|
||||
// perfectMerged.putIfAbsent(pair.getKey(), pair.getValue());
|
||||
// }
|
||||
// }
|
||||
// mergedIterator = new NavigableMapKvIterator<>(perfectMerged, startType, startKey);
|
||||
// mergingIterator = new MergingKvIterator<>("test", startType, startKey, pairs.stream().<IterProdFn<Integer, Integer>>map(
|
||||
// list -> (IteratorStart start, Integer key) -> new NavigableMapKvIterator<>(new TreeMap<Integer, Integer>(Map.ofEntries(list.toArray(Map.Entry[]::new))), start, key)
|
||||
// ).toList());
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public Integer peekNextKey() {
|
||||
// var mergedKey = mergedIterator.peekNextKey();
|
||||
// var mergingKey = mergingIterator.peekNextKey();
|
||||
// Assertions.assertEquals(mergedKey, mergingKey);
|
||||
// return mergedKey;
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public void skip() {
|
||||
// mergedIterator.skip();
|
||||
// mergingIterator.skip();
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public Integer peekPrevKey() {
|
||||
// var mergedKey = mergedIterator.peekPrevKey();
|
||||
// var mergingKey = mergingIterator.peekPrevKey();
|
||||
// Assertions.assertEquals(mergedKey, mergingKey);
|
||||
// return mergedKey;
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public Pair<Integer, Integer> prev() {
|
||||
// var mergedKey = mergedIterator.prev();
|
||||
// var mergingKey = mergingIterator.prev();
|
||||
// Assertions.assertEquals(mergedKey, mergingKey);
|
||||
// return mergedKey;
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public boolean hasPrev() {
|
||||
// var mergedKey = mergedIterator.hasPrev();
|
||||
// var mergingKey = mergingIterator.hasPrev();
|
||||
// Assertions.assertEquals(mergedKey, mergingKey);
|
||||
// return mergedKey;
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public void skipPrev() {
|
||||
// mergedIterator.skipPrev();
|
||||
// mergingIterator.skipPrev();
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public void close() {
|
||||
// mergedIterator.close();
|
||||
// mergingIterator.close();
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public boolean hasNext() {
|
||||
// var mergedKey = mergedIterator.hasNext();
|
||||
// var mergingKey = mergingIterator.hasNext();
|
||||
// Assertions.assertEquals(mergedKey, mergingKey);
|
||||
// return mergedKey;
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public Pair<Integer, Integer> next() {
|
||||
// var mergedKey = mergedIterator.next();
|
||||
// var mergingKey = mergingIterator.next();
|
||||
// Assertions.assertEquals(mergedKey, mergingKey);
|
||||
// return mergedKey;
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// static class PeekNextKeyAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
// @Override
|
||||
// public void mutate(MergingIteratorModel state) {
|
||||
// state.peekNextKey();
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public boolean precondition(MergingIteratorModel state) {
|
||||
// return state.hasNext();
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public String description() {
|
||||
// return "Peek next key";
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// static class SkipAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
// @Override
|
||||
// public void mutate(MergingIteratorModel state) {
|
||||
// state.skip();
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public boolean precondition(MergingIteratorModel state) {
|
||||
// return state.hasNext();
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public String description() {
|
||||
// return "Skip next key";
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// static class PeekPrevKeyAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
// @Override
|
||||
// public void mutate(MergingIteratorModel state) {
|
||||
// state.peekPrevKey();
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public boolean precondition(MergingIteratorModel state) {
|
||||
// return state.hasPrev();
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public String description() {
|
||||
// return "Peek prev key";
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// static class SkipPrevAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
// @Override
|
||||
// public void mutate(MergingIteratorModel state) {
|
||||
// state.skipPrev();
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public boolean precondition(MergingIteratorModel state) {
|
||||
// return state.hasPrev();
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public String description() {
|
||||
// return "Skip prev key";
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// static class PrevAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
// @Override
|
||||
// public void mutate(MergingIteratorModel state) {
|
||||
// state.prev();
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public boolean precondition(MergingIteratorModel state) {
|
||||
// return state.hasPrev();
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public String description() {
|
||||
// return "Prev key";
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// static class NextAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
// @Override
|
||||
// public void mutate(MergingIteratorModel state) {
|
||||
// state.next();
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public boolean precondition(MergingIteratorModel state) {
|
||||
// return state.hasNext();
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public String description() {
|
||||
// return "Next key";
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// static class HasNextAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
// @Override
|
||||
// public void mutate(MergingIteratorModel state) {
|
||||
// state.hasNext();
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public boolean precondition(MergingIteratorModel state) {
|
||||
// return true;
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public String description() {
|
||||
// return "Has next key";
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// static class HasPrevAction extends Action.JustMutate<MergingIteratorModel> {
|
||||
// @Override
|
||||
// public void mutate(MergingIteratorModel state) {
|
||||
// state.hasPrev();
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public boolean precondition(MergingIteratorModel state) {
|
||||
// return true;
|
||||
// }
|
||||
//
|
||||
// @Override
|
||||
// public String description() {
|
||||
// return "Has prev key";
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// @Property
|
||||
// public void checkMergingIterator(@ForAll("actions") ActionChain<MergingIteratorModel> actions) {
|
||||
// actions.run();
|
||||
// }
|
||||
//
|
||||
// @Provide
|
||||
// Arbitrary<ActionChain<MergingIteratorModel>> actions(@ForAll("lists") List<List<Map.Entry<Integer, Integer>>> list,
|
||||
// @ForAll IteratorStart iteratorStart, @ForAll("startKey") Integer startKey) {
|
||||
// return ActionChain.startWith(() -> new MergingIteratorModel(list, iteratorStart, startKey))
|
||||
// .withAction(new NextAction())
|
||||
// .withAction(new PeekNextKeyAction())
|
||||
// .withAction(new SkipAction())
|
||||
// .withAction(new PeekPrevKeyAction())
|
||||
// .withAction(new SkipPrevAction())
|
||||
// .withAction(new PrevAction())
|
||||
// .withAction(new HasNextAction())
|
||||
// .withAction(new HasPrevAction());
|
||||
// }
|
||||
//
|
||||
// @Provide
|
||||
// Arbitrary<List<List<Map.Entry<Integer, Integer>>>> lists() {
|
||||
// return Arbitraries.entries(Arbitraries.integers().between(-50, 50), Arbitraries.integers().between(-50, 50))
|
||||
// .list().uniqueElements(Map.Entry::getKey).ofMinSize(0).ofMaxSize(20)
|
||||
// .list().ofMinSize(1).ofMaxSize(5);
|
||||
// }
|
||||
//
|
||||
// @Provide
|
||||
// Arbitrary<Integer> startKey() {
|
||||
// return Arbitraries.integers().between(-51, 51);
|
||||
// }
|
||||
//}
|
||||
@@ -35,6 +35,13 @@
|
||||
<dhfs.native-libs-dir>${project.parent.build.outputDirectory}/native</dhfs.native-libs-dir>
|
||||
</properties>
|
||||
|
||||
<repositories>
|
||||
<repository>
|
||||
<id>jitpack.io</id>
|
||||
<url>https://jitpack.io</url>
|
||||
</repository>
|
||||
</repositories>
|
||||
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
@@ -144,7 +151,6 @@
|
||||
</native.image.path>
|
||||
<java.util.logging.manager>org.jboss.logmanager.LogManager</java.util.logging.manager>
|
||||
<buildDirectory>${project.build.directory}</buildDirectory>
|
||||
<lazyFsPath>${project.basedir}/../../thirdparty/lazyfs/lazyfs/lazyfs</lazyFsPath>
|
||||
<nativeLibsDirectory>${dhfs.native-libs-dir}</nativeLibsDirectory>
|
||||
<maven.home>${maven.home}</maven.home>
|
||||
</systemPropertyVariables>
|
||||
@@ -178,8 +184,8 @@
|
||||
--initialize-at-run-time=jnr.ffi.util.ref.FinalizableReferenceQueue,
|
||||
--initialize-at-run-time=jnr.ffi.provider.jffi.NativeFinalizer,
|
||||
--initialize-at-run-time=jnr.ffi.provider.jffi.NativeFinalizer$SingletonHolder,
|
||||
--initialize-at-run-time=com.usatiuk.utils.RefcountedCloseable,
|
||||
--initialize-at-run-time=com.usatiuk.utils.DataLocker$Lock,
|
||||
--initialize-at-run-time=com.usatiuk.dhfs.utils.RefcountedCloseable,
|
||||
--initialize-at-run-time=com.usatiuk.dhfs.utils.DataLocker$Lock,
|
||||
--initialize-at-run-time=com.usatiuk.objects.stores.LmdbObjectPersistentStore$LmdbKvIterator,
|
||||
--initialize-at-run-time=com.usatiuk.objects.stores.LmdbObjectPersistentStore,
|
||||
--initialize-at-run-time=com.google.protobuf.UnsafeUtil
|
||||
|
||||
@@ -13,10 +13,6 @@
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-smallrye-openapi</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.testcontainers</groupId>
|
||||
<artifactId>testcontainers</artifactId>
|
||||
@@ -76,6 +72,26 @@
|
||||
<artifactId>quarkus-junit5</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.SerCeMan</groupId>
|
||||
<artifactId>jnr-fuse</artifactId>
|
||||
<version>44ed40f8ce</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-ffi</artifactId>
|
||||
<version>2.2.16</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-posix</artifactId>
|
||||
<version>3.1.19</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-constants</artifactId>
|
||||
<version>0.10.4</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
package com.usatiuk.dhfs.remoteobj;
|
||||
|
||||
import com.usatiuk.dhfs.peersync.PeerId;
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
public interface ConflictResolver {
|
||||
void resolve(PeerId fromPeer, RemoteObjectMeta ours, RemoteObjectMeta theirs);
|
||||
@@ -1,12 +1,11 @@
|
||||
package com.usatiuk.dhfs.refcount;
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
import com.usatiuk.dhfs.remoteobj.RemoteObjectDeleter;
|
||||
import com.usatiuk.dhfs.remoteobj.RemoteObjectMeta;
|
||||
import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.transaction.PreCommitTxHook;
|
||||
import com.usatiuk.objects.transaction.Transaction;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
package com.usatiuk.dhfs.refcount;
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
import com.usatiuk.dhfs.jmap.JMapRef;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.JObjectKeyImpl;
|
||||
|
||||
public record JDataNormalRef(JObjectKey obj) implements JDataRef {
|
||||
@Override
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfs.refcount;
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package com.usatiuk.dhfs.refcount;
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
import com.usatiuk.objects.JData;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
@@ -1,6 +1,7 @@
|
||||
package com.usatiuk.dhfs.remoteobj;
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.repository.JDataRemoteDto;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Collection;
|
||||
@@ -1,7 +1,7 @@
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
import com.usatiuk.dhfs.persistence.JObjectKeyP;
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.dhfs.persistence.JObjectKeyP;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
@Singleton
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package com.usatiuk.dhfs.peersync;
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
import com.usatiuk.objects.JObjectKey;
|
||||
import com.usatiuk.objects.JObjectKeyImpl;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
package com.usatiuk.dhfs.remoteobj;
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
import com.usatiuk.dhfs.peersync.PeerId;
|
||||
import com.usatiuk.dhfs.repository.JDataRemoteDto;
|
||||
import org.pcollections.PMap;
|
||||
|
||||
public record ReceivedObject(PMap<PeerId, Long> changelog, JDataRemoteDto data) {
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user