mirror of
https://github.com/usatiuk/dhfs.git
synced 2025-10-29 04:57:48 +01:00
Compare commits
5 Commits
4f5f347b3c
...
34db870fc6
| Author | SHA1 | Date | |
|---|---|---|---|
| 34db870fc6 | |||
| 0e62a29ce0 | |||
| 7de5f91fd2 | |||
| ac68208b1a | |||
| 4e0675940e |
26
.github/workflows/server.yml
vendored
26
.github/workflows/server.yml
vendored
@@ -20,26 +20,21 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: 'recursive'
|
||||||
|
|
||||||
- name: Install sudo for ACT
|
- name: Install sudo for ACT
|
||||||
run: apt-get update && apt-get install -y sudo
|
run: apt-get update && apt-get install -y sudo
|
||||||
if: env.ACT=='true'
|
if: env.ACT=='true'
|
||||||
|
|
||||||
- name: Install fuse and maven
|
- name: Install FUSE
|
||||||
run: sudo apt-get update && sudo apt-get install -y libfuse2
|
run: sudo apt-get update && sudo apt-get install -y libfuse2 libfuse3-dev libfuse3-3 fuse3
|
||||||
|
|
||||||
- name: Download maven
|
- name: User allow other for fuse
|
||||||
run: |
|
run: echo "user_allow_other" | sudo tee -a /etc/fuse.conf
|
||||||
cd "$HOME"
|
|
||||||
mkdir maven-bin
|
- name: Dump fuse.conf
|
||||||
curl -s -L https://dlcdn.apache.org/maven/maven-3/3.9.9/binaries/apache-maven-3.9.9-bin.tar.gz | tar xvz --strip-components=1 -C maven-bin
|
run: cat /etc/fuse.conf
|
||||||
echo "$HOME"/maven-bin/bin >> $GITHUB_PATH
|
|
||||||
|
|
||||||
- name: Maven info
|
|
||||||
run: |
|
|
||||||
echo $GITHUB_PATH
|
|
||||||
echo $PATH
|
|
||||||
mvn -v
|
|
||||||
|
|
||||||
- name: Set up JDK 21
|
- name: Set up JDK 21
|
||||||
uses: actions/setup-java@v4
|
uses: actions/setup-java@v4
|
||||||
@@ -48,6 +43,9 @@ jobs:
|
|||||||
distribution: "zulu"
|
distribution: "zulu"
|
||||||
cache: maven
|
cache: maven
|
||||||
|
|
||||||
|
- name: Build LazyFS
|
||||||
|
run: cd thirdparty/lazyfs/ && ./build.sh
|
||||||
|
|
||||||
- name: Test with Maven
|
- name: Test with Maven
|
||||||
run: cd dhfs-parent && mvn -T $(nproc) --batch-mode --update-snapshots package verify
|
run: cd dhfs-parent && mvn -T $(nproc) --batch-mode --update-snapshots package verify
|
||||||
|
|
||||||
|
|||||||
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
[submodule "thirdparty/lazyfs/lazyfs"]
|
||||||
|
path = thirdparty/lazyfs/lazyfs
|
||||||
|
url = git@github.com:dsrhaslab/lazyfs.git
|
||||||
@@ -35,13 +35,15 @@ public class DhfsFusex3IT {
|
|||||||
String c2uuid;
|
String c2uuid;
|
||||||
String c3uuid;
|
String c3uuid;
|
||||||
|
|
||||||
|
Network network;
|
||||||
|
|
||||||
// This calculation is somewhat racy, so keep it hardcoded for now
|
// This calculation is somewhat racy, so keep it hardcoded for now
|
||||||
long emptyFileCount = 9;
|
long emptyFileCount = 9;
|
||||||
|
|
||||||
@BeforeEach
|
@BeforeEach
|
||||||
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
|
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
|
||||||
// TODO: Dedup
|
// TODO: Dedup
|
||||||
Network network = Network.newNetwork();
|
network = Network.newNetwork();
|
||||||
|
|
||||||
container1 = new GenericContainer<>(DhfsImage.getInstance())
|
container1 = new GenericContainer<>(DhfsImage.getInstance())
|
||||||
.withPrivilegedMode(true)
|
.withPrivilegedMode(true)
|
||||||
@@ -252,21 +254,22 @@ public class DhfsFusex3IT {
|
|||||||
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf").getStdout()));
|
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf").getStdout()));
|
||||||
|
|
||||||
var client = DockerClientFactory.instance().client();
|
var client = DockerClientFactory.instance().client();
|
||||||
client.pauseContainerCmd(container1.getContainerId()).exec();
|
client.disconnectFromNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
|
||||||
client.pauseContainerCmd(container2.getContainerId()).exec();
|
client.disconnectFromNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
|
||||||
// Pauses needed as otherwise docker buffers some incoming packets
|
client.disconnectFromNetworkCmd().withContainerId(container3.getContainerId()).withNetworkId(network.getId()).exec();
|
||||||
|
|
||||||
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
|
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
|
||||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /dhfs_test/fuse/testf").getExitCode());
|
|
||||||
client.pauseContainerCmd(container3.getContainerId()).exec();
|
|
||||||
client.unpauseContainerCmd(container2.getContainerId()).exec();
|
|
||||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
|
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
|
||||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /dhfs_test/fuse/testf").getExitCode());
|
|
||||||
client.pauseContainerCmd(container2.getContainerId()).exec();
|
|
||||||
client.unpauseContainerCmd(container1.getContainerId()).exec();
|
|
||||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
|
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
|
||||||
|
|
||||||
|
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /dhfs_test/fuse/testf").getExitCode());
|
||||||
|
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /dhfs_test/fuse/testf").getExitCode());
|
||||||
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /dhfs_test/fuse/testf").getExitCode());
|
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /dhfs_test/fuse/testf").getExitCode());
|
||||||
client.unpauseContainerCmd(container2.getContainerId()).exec();
|
|
||||||
client.unpauseContainerCmd(container3.getContainerId()).exec();
|
client.connectToNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
|
||||||
|
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
|
||||||
|
client.connectToNetworkCmd().withContainerId(container3.getContainerId()).withNetworkId(network.getId()).exec();
|
||||||
|
|
||||||
Log.warn("Waiting for connections");
|
Log.warn("Waiting for connections");
|
||||||
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
|
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
|
||||||
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
|
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
|
||||||
|
|||||||
@@ -0,0 +1,177 @@
|
|||||||
|
package com.usatiuk.dhfs.integration;
|
||||||
|
|
||||||
|
import io.quarkus.logging.Log;
|
||||||
|
|
||||||
|
import java.io.*;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.concurrent.ThreadLocalRandom;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
public class LazyFs {
|
||||||
|
private static final String lazyFsPath;
|
||||||
|
|
||||||
|
private final String dataRoot;
|
||||||
|
private final String lazyFsDataPath;
|
||||||
|
private final String name;
|
||||||
|
private final File tmpConfigFile;
|
||||||
|
private final File fifoPath;
|
||||||
|
|
||||||
|
static {
|
||||||
|
lazyFsPath = System.getProperty("lazyFsPath");
|
||||||
|
System.out.println("LazyFs Path: " + lazyFsPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
public LazyFs(String name, String dataRoot, String lazyFsDataPath) {
|
||||||
|
this.name = name;
|
||||||
|
this.dataRoot = dataRoot;
|
||||||
|
this.lazyFsDataPath = lazyFsDataPath;
|
||||||
|
|
||||||
|
try {
|
||||||
|
tmpConfigFile = File.createTempFile("lazyfs", ".conf");
|
||||||
|
tmpConfigFile.deleteOnExit();
|
||||||
|
|
||||||
|
fifoPath = new File("/tmp/" + ThreadLocalRandom.current().nextLong() + ".faultsfifo");
|
||||||
|
fifoPath.deleteOnExit();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private Thread outputPiper;
|
||||||
|
private Process fs;
|
||||||
|
|
||||||
|
private String fifoPath() {
|
||||||
|
return fifoPath.getAbsolutePath();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void start() {
|
||||||
|
var lfsPath = Path.of(lazyFsPath).resolve("build").resolve("lazyfs");
|
||||||
|
if (!lfsPath.toFile().isFile())
|
||||||
|
throw new IllegalStateException("LazyFs binary does not exist: " + lfsPath.toAbsolutePath());
|
||||||
|
if (!lfsPath.toFile().canExecute())
|
||||||
|
throw new IllegalStateException("LazyFs binary is not executable: " + lfsPath.toAbsolutePath());
|
||||||
|
|
||||||
|
try (var rwFile = new RandomAccessFile(tmpConfigFile, "rw");
|
||||||
|
var channel = rwFile.getChannel()) {
|
||||||
|
channel.truncate(0);
|
||||||
|
var config = "[faults]\n" +
|
||||||
|
"fifo_path=\"" + fifoPath() + "\"\n" +
|
||||||
|
"[cache]\n" +
|
||||||
|
"apply_eviction=false\n" +
|
||||||
|
"[cache.simple]\n" +
|
||||||
|
"custom_size=\"1gb\"\n" +
|
||||||
|
"blocks_per_page=1\n" +
|
||||||
|
"[filesystem]\n" +
|
||||||
|
"log_all_operations=false\n" +
|
||||||
|
"logfile=\"\"";
|
||||||
|
rwFile.write(config.getBytes());
|
||||||
|
Log.info("LazyFs config: \n" + config);
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
var argList = new ArrayList<String>();
|
||||||
|
|
||||||
|
argList.add(lfsPath.toString());
|
||||||
|
argList.add(Path.of(dataRoot).toString());
|
||||||
|
argList.add("--config-path");
|
||||||
|
argList.add(tmpConfigFile.getAbsolutePath());
|
||||||
|
argList.add("-o");
|
||||||
|
argList.add("allow_other");
|
||||||
|
argList.add("-o");
|
||||||
|
argList.add("modules=subdir");
|
||||||
|
argList.add("-o");
|
||||||
|
argList.add("subdir=" + Path.of(lazyFsDataPath).toAbsolutePath().toString());
|
||||||
|
try {
|
||||||
|
Log.info("Starting LazyFs " + argList);
|
||||||
|
fs = Runtime.getRuntime().exec(argList.toArray(String[]::new));
|
||||||
|
Thread.sleep(1000);
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
outputPiper = new Thread(() -> {
|
||||||
|
try {
|
||||||
|
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.getInputStream()))) {
|
||||||
|
String line;
|
||||||
|
|
||||||
|
while ((line = input.readLine()) != null) {
|
||||||
|
System.out.println(line);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
Log.info("Exception in LazyFs piper", e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
outputPiper.start();
|
||||||
|
outputPiper = new Thread(() -> {
|
||||||
|
try {
|
||||||
|
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.getErrorStream()))) {
|
||||||
|
String line;
|
||||||
|
|
||||||
|
while ((line = input.readLine()) != null) {
|
||||||
|
System.out.println(line);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
Log.info("Exception in LazyFs piper", e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
outputPiper.start();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void crash() {
|
||||||
|
try {
|
||||||
|
var cmd = "echo \"lazyfs::crash::timing=after::op=write::from_rgx=*\" > " + fifoPath();
|
||||||
|
System.out.println("Running command: " + cmd);
|
||||||
|
Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd});
|
||||||
|
stop();
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void stop() {
|
||||||
|
try {
|
||||||
|
if (fs == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + dataRoot});
|
||||||
|
if (!fs.waitFor(1, TimeUnit.SECONDS))
|
||||||
|
throw new RuntimeException("LazyFs process did not stop in time");
|
||||||
|
fs = null;
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Doesn't actually work?
|
||||||
|
//
|
||||||
|
// public void crashop() {
|
||||||
|
// try {
|
||||||
|
// var cmd = "echo \"lazyfs::torn-op::file=" + Path.of(lazyFsDataPath).toAbsolutePath().toString() + "/objects/data.mdb::persist=1,3::parts=3::occurrence=5\" > /tmp/faults.fifo";
|
||||||
|
// System.out.println("Running command: " + cmd);
|
||||||
|
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd});
|
||||||
|
// Thread.sleep(1000);
|
||||||
|
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + dataRoot});
|
||||||
|
// Thread.sleep(1000);
|
||||||
|
// } catch (Exception e) {
|
||||||
|
// throw new RuntimeException(e);
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// public void crashseq() {
|
||||||
|
// try {
|
||||||
|
// var cmd = "echo \"lazyfs::torn-seq::op=write::file=" + Path.of(lazyFsDataPath).toAbsolutePath().toString() + "/objects/data.mdb::persist=1,4::occurrence=2\" > /tmp/faults.fifo";
|
||||||
|
// System.out.println("Running command: " + cmd);
|
||||||
|
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd});
|
||||||
|
// Thread.sleep(1000);
|
||||||
|
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + dataRoot});
|
||||||
|
// Thread.sleep(1000);
|
||||||
|
// } catch (Exception e) {
|
||||||
|
// throw new RuntimeException(e);
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
}
|
||||||
|
|
||||||
@@ -0,0 +1,199 @@
|
|||||||
|
package com.usatiuk.dhfs.integration;
|
||||||
|
|
||||||
|
import com.github.dockerjava.api.model.Device;
|
||||||
|
import com.usatiuk.dhfs.TestDataCleaner;
|
||||||
|
import io.quarkus.logging.Log;
|
||||||
|
import org.junit.jupiter.api.*;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
import org.testcontainers.DockerClientFactory;
|
||||||
|
import org.testcontainers.containers.GenericContainer;
|
||||||
|
import org.testcontainers.containers.Network;
|
||||||
|
import org.testcontainers.containers.output.Slf4jLogConsumer;
|
||||||
|
import org.testcontainers.containers.output.WaitingConsumer;
|
||||||
|
import org.testcontainers.containers.wait.strategy.Wait;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.time.Duration;
|
||||||
|
import java.util.Objects;
|
||||||
|
import java.util.UUID;
|
||||||
|
import java.util.concurrent.CyclicBarrier;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.concurrent.TimeoutException;
|
||||||
|
import java.util.stream.Stream;
|
||||||
|
|
||||||
|
import static org.awaitility.Awaitility.await;
|
||||||
|
|
||||||
|
public class LazyFsIT {
|
||||||
|
GenericContainer<?> container1;
|
||||||
|
GenericContainer<?> container2;
|
||||||
|
|
||||||
|
WaitingConsumer waitingConsumer1;
|
||||||
|
WaitingConsumer waitingConsumer2;
|
||||||
|
|
||||||
|
String c1uuid;
|
||||||
|
String c2uuid;
|
||||||
|
|
||||||
|
File data1;
|
||||||
|
File data2;
|
||||||
|
File data1Lazy;
|
||||||
|
|
||||||
|
LazyFs lazyFs1;
|
||||||
|
|
||||||
|
@BeforeEach
|
||||||
|
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
|
||||||
|
data1 = Files.createTempDirectory("dhfsdata").toFile();
|
||||||
|
data2 = Files.createTempDirectory("dhfsdata").toFile();
|
||||||
|
data1Lazy = Files.createTempDirectory("lazyfsroot").toFile();
|
||||||
|
|
||||||
|
Network network = Network.newNetwork();
|
||||||
|
|
||||||
|
lazyFs1 = new LazyFs(testInfo.getDisplayName(), data1.toString(), data1Lazy.toString());
|
||||||
|
lazyFs1.start();
|
||||||
|
|
||||||
|
container1 = new GenericContainer<>(DhfsImage.getInstance())
|
||||||
|
.withPrivilegedMode(true)
|
||||||
|
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
|
||||||
|
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
|
||||||
|
.withFileSystemBind(data1.getAbsolutePath(), "/dhfs_test/data");
|
||||||
|
container2 = new GenericContainer<>(DhfsImage.getInstance())
|
||||||
|
.withPrivilegedMode(true)
|
||||||
|
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
|
||||||
|
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
|
||||||
|
.withFileSystemBind(data2.getAbsolutePath(), "/dhfs_test/data");
|
||||||
|
|
||||||
|
Stream.of(container1, container2).parallel().forEach(GenericContainer::start);
|
||||||
|
|
||||||
|
waitingConsumer1 = new WaitingConsumer();
|
||||||
|
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||||
|
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||||
|
waitingConsumer2 = new WaitingConsumer();
|
||||||
|
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
|
||||||
|
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
|
||||||
|
|
||||||
|
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||||
|
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
|
||||||
|
|
||||||
|
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
|
||||||
|
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
|
||||||
|
|
||||||
|
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||||
|
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
|
||||||
|
|
||||||
|
var c1curl = container1.execInContainer("/bin/sh", "-c",
|
||||||
|
"curl --header \"Content-Type: application/json\" " +
|
||||||
|
" --request PUT " +
|
||||||
|
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
|
||||||
|
" http://localhost:8080/peers-manage/known-peers");
|
||||||
|
|
||||||
|
var c2curl = container2.execInContainer("/bin/sh", "-c",
|
||||||
|
"curl --header \"Content-Type: application/json\" " +
|
||||||
|
" --request PUT " +
|
||||||
|
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
|
||||||
|
" http://localhost:8080/peers-manage/known-peers");
|
||||||
|
|
||||||
|
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||||
|
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||||
|
}
|
||||||
|
|
||||||
|
@AfterEach
|
||||||
|
void stop() {
|
||||||
|
lazyFs1.stop();
|
||||||
|
|
||||||
|
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
|
||||||
|
TestDataCleaner.purgeDirectory(data1);
|
||||||
|
TestDataCleaner.purgeDirectory(data1Lazy);
|
||||||
|
TestDataCleaner.purgeDirectory(data2);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void killTest(TestInfo testInfo) throws Exception {
|
||||||
|
var executor = Executors.newFixedThreadPool(2);
|
||||||
|
var barrier = new CyclicBarrier(2);
|
||||||
|
var ret1 = executor.submit(() -> {
|
||||||
|
try {
|
||||||
|
Log.info("Writing to container 1");
|
||||||
|
barrier.await();
|
||||||
|
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
barrier.await();
|
||||||
|
Thread.sleep(1000);
|
||||||
|
Log.info("Killing");
|
||||||
|
lazyFs1.crash();
|
||||||
|
|
||||||
|
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 5, TimeUnit.SECONDS);
|
||||||
|
Thread.sleep(1000);
|
||||||
|
var client = DockerClientFactory.instance().client();
|
||||||
|
client.killContainerCmd(container1.getContainerId()).exec();
|
||||||
|
container1.stop();
|
||||||
|
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||||
|
Log.info("Restart");
|
||||||
|
lazyFs1.start();
|
||||||
|
container1.start();
|
||||||
|
waitingConsumer1 = new WaitingConsumer();
|
||||||
|
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||||
|
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||||
|
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||||
|
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||||
|
|
||||||
|
await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||||
|
Log.info("Listing consistency");
|
||||||
|
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||||
|
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||||
|
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||||
|
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||||
|
Log.info(ls1);
|
||||||
|
Log.info(cat1);
|
||||||
|
Log.info(ls2);
|
||||||
|
Log.info(cat2);
|
||||||
|
|
||||||
|
return ls1.equals(ls2) && cat1.equals(cat2);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// @Test
|
||||||
|
// void killTestDirs(TestInfo testInfo) throws Exception {
|
||||||
|
// var executor = Executors.newFixedThreadPool(2);
|
||||||
|
// var barrier = new CyclicBarrier(2);
|
||||||
|
// var ret1 = executor.submit(() -> {
|
||||||
|
// try {
|
||||||
|
// Log.info("Writing to container 1");
|
||||||
|
// barrier.await();
|
||||||
|
// container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
|
||||||
|
// } catch (Exception e) {
|
||||||
|
// throw new RuntimeException(e);
|
||||||
|
// }
|
||||||
|
// });
|
||||||
|
// barrier.await();
|
||||||
|
// Thread.sleep(10000);
|
||||||
|
// var client = DockerClientFactory.instance().client();
|
||||||
|
// client.killContainerCmd(container1.getContainerId()).exec();
|
||||||
|
// container1.stop();
|
||||||
|
// waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
|
||||||
|
// container1.start();
|
||||||
|
// waitingConsumer1 = new WaitingConsumer();
|
||||||
|
// var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
|
||||||
|
// container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
|
||||||
|
// waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||||
|
// waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
|
||||||
|
//
|
||||||
|
// await().atMost(45, TimeUnit.SECONDS).until(() -> {
|
||||||
|
// Log.info("Listing consistency");
|
||||||
|
// var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||||
|
// var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||||
|
// var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
|
||||||
|
// var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
|
||||||
|
// Log.info(ls1);
|
||||||
|
// Log.info(cat1);
|
||||||
|
// Log.info(ls2);
|
||||||
|
// Log.info(cat2);
|
||||||
|
//
|
||||||
|
// return ls1.equals(ls2) && cat1.equals(cat2);
|
||||||
|
// });
|
||||||
|
// }
|
||||||
|
}
|
||||||
@@ -24,6 +24,8 @@ public class JDataVersionedWrapperSerializer implements ObjectSerializer<JDataVe
|
|||||||
public JDataVersionedWrapper deserialize(ByteString data) {
|
public JDataVersionedWrapper deserialize(ByteString data) {
|
||||||
var version = data.substring(0, Long.BYTES).asReadOnlyByteBuffer().getLong();
|
var version = data.substring(0, Long.BYTES).asReadOnlyByteBuffer().getLong();
|
||||||
var rawData = data.substring(Long.BYTES);
|
var rawData = data.substring(Long.BYTES);
|
||||||
return new JDataVersionedWrapperLazy(version, rawData.size(), () -> dataSerializer.deserialize(rawData));
|
return new JDataVersionedWrapperLazy(version, rawData.size(),
|
||||||
|
() -> dataSerializer.deserialize(rawData)
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,21 +1,16 @@
|
|||||||
package com.usatiuk.objects.iterators;
|
package com.usatiuk.objects.iterators;
|
||||||
|
|
||||||
import io.quarkus.logging.Log;
|
import io.quarkus.logging.Log;
|
||||||
import org.apache.commons.lang3.tuple.Pair;
|
|
||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
public class TombstoneMergingKvIterator<K extends Comparable<K>, V> implements CloseableKvIterator<K, V> {
|
public abstract class TombstoneMergingKvIterator {
|
||||||
private final CloseableKvIterator<K, V> _backing;
|
public static <K extends Comparable<K>, V> CloseableKvIterator<K, V> of(String name, IteratorStart startType, K startKey, List<IterProdFn<K, MaybeTombstone<V>>> iterators) {
|
||||||
private final String _name;
|
return new PredicateKvIterator<K, MaybeTombstone<V>, V>(
|
||||||
|
new MergingKvIterator<K, MaybeTombstone<V>>(name + "-merging", startType, startKey, iterators),
|
||||||
public TombstoneMergingKvIterator(String name, IteratorStart startType, K startKey, List<IterProdFn<K, MaybeTombstone<V>>> iterators) {
|
|
||||||
_name = name;
|
|
||||||
_backing = new PredicateKvIterator<>(
|
|
||||||
new MergingKvIterator<>(name + "-merging", startType, startKey, iterators),
|
|
||||||
startType, startKey,
|
startType, startKey,
|
||||||
pair -> {
|
pair -> {
|
||||||
Log.tracev("{0} - Processing pair {1}", _name, pair);
|
Log.tracev("{0} - Processing pair {1}", name, pair);
|
||||||
if (pair instanceof Tombstone<V>) {
|
if (pair instanceof Tombstone<V>) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
@@ -23,61 +18,7 @@ public class TombstoneMergingKvIterator<K extends Comparable<K>, V> implements C
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@SafeVarargs
|
public static <K extends Comparable<K>, V> CloseableKvIterator<K, V> of(String name, IteratorStart startType, K startKey, IterProdFn<K, MaybeTombstone<V>>... iterators) {
|
||||||
public TombstoneMergingKvIterator(String name, IteratorStart startType, K startKey, IterProdFn<K, MaybeTombstone<V>>... iterators) {
|
return of(name, startType, startKey, List.of(iterators));
|
||||||
this(name, startType, startKey, List.of(iterators));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public K peekNextKey() {
|
|
||||||
return _backing.peekNextKey();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void skip() {
|
|
||||||
_backing.skip();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public K peekPrevKey() {
|
|
||||||
return _backing.peekPrevKey();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Pair<K, V> prev() {
|
|
||||||
return _backing.prev();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean hasPrev() {
|
|
||||||
return _backing.hasPrev();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void skipPrev() {
|
|
||||||
_backing.skipPrev();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void close() {
|
|
||||||
_backing.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean hasNext() {
|
|
||||||
return _backing.hasNext();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Pair<K, V> next() {
|
|
||||||
return _backing.next();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return "TombstoneMergingKvIterator{" +
|
|
||||||
"_backing=" + _backing +
|
|
||||||
", _name='" + _name + '\'' +
|
|
||||||
'}';
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -186,7 +186,7 @@ public class CachingObjectPersistentStore {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
|
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
|
||||||
return new TombstoneMergingKvIterator<>("cache", start, key,
|
return TombstoneMergingKvIterator.<JObjectKey, JDataVersionedWrapper>of("cache", start, key,
|
||||||
(mS, mK) -> new NavigableMapKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>(_curCache.map(), mS, mK),
|
(mS, mK) -> new NavigableMapKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>(_curCache.map(), mS, mK),
|
||||||
(mS, mK) -> new CachingKvIterator(_backing.getIterator(start, key)));
|
(mS, mK) -> new CachingKvIterator(_backing.getIterator(start, key)));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -208,6 +208,7 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
|||||||
// private final Exception _allocationStacktrace = new Exception();
|
// private final Exception _allocationStacktrace = new Exception();
|
||||||
private final Exception _allocationStacktrace = null;
|
private final Exception _allocationStacktrace = null;
|
||||||
private boolean _hasNext = false;
|
private boolean _hasNext = false;
|
||||||
|
private JObjectKey _peekedNextKey = null;
|
||||||
|
|
||||||
LmdbKvIterator(RefcountedCloseable<Txn<ByteBuffer>> txn, IteratorStart start, JObjectKey key) {
|
LmdbKvIterator(RefcountedCloseable<Txn<ByteBuffer>> txn, IteratorStart start, JObjectKey key) {
|
||||||
_txn = txn;
|
_txn = txn;
|
||||||
@@ -277,24 +278,24 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var realGot = JObjectKey.fromByteBuffer(_cursor.key());
|
// var realGot = JObjectKey.fromByteBuffer(_cursor.key());
|
||||||
_cursor.key().flip();
|
// _cursor.key().flip();
|
||||||
|
//
|
||||||
switch (start) {
|
// switch (start) {
|
||||||
case LT -> {
|
// case LT -> {
|
||||||
// assert !_hasNext || realGot.compareTo(key) < 0;
|
//// assert !_hasNext || realGot.compareTo(key) < 0;
|
||||||
}
|
// }
|
||||||
case LE -> {
|
// case LE -> {
|
||||||
// assert !_hasNext || realGot.compareTo(key) <= 0;
|
//// assert !_hasNext || realGot.compareTo(key) <= 0;
|
||||||
}
|
// }
|
||||||
case GT -> {
|
// case GT -> {
|
||||||
assert !_hasNext || realGot.compareTo(key) > 0;
|
// assert !_hasNext || realGot.compareTo(key) > 0;
|
||||||
}
|
// }
|
||||||
case GE -> {
|
// case GE -> {
|
||||||
assert !_hasNext || realGot.compareTo(key) >= 0;
|
// assert !_hasNext || realGot.compareTo(key) >= 0;
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
Log.tracev("got: {0}, hasNext: {1}", realGot, _hasNext);
|
// Log.tracev("got: {0}, hasNext: {1}", realGot, _hasNext);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@@ -323,6 +324,7 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
_goingForward = !_goingForward;
|
_goingForward = !_goingForward;
|
||||||
|
_peekedNextKey = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@@ -330,8 +332,12 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
|||||||
if (!_hasNext) {
|
if (!_hasNext) {
|
||||||
throw new NoSuchElementException("No more elements");
|
throw new NoSuchElementException("No more elements");
|
||||||
}
|
}
|
||||||
|
if (_peekedNextKey != null) {
|
||||||
|
return _peekedNextKey;
|
||||||
|
}
|
||||||
var ret = JObjectKey.fromByteBuffer(_cursor.key());
|
var ret = JObjectKey.fromByteBuffer(_cursor.key());
|
||||||
_cursor.key().flip();
|
_cursor.key().flip();
|
||||||
|
_peekedNextKey = ret;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -341,6 +347,7 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
|||||||
_hasNext = _cursor.next();
|
_hasNext = _cursor.next();
|
||||||
else
|
else
|
||||||
_hasNext = _cursor.prev();
|
_hasNext = _cursor.prev();
|
||||||
|
_peekedNextKey = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@@ -362,6 +369,7 @@ public class LmdbObjectPersistentStore implements ObjectPersistentStore {
|
|||||||
else
|
else
|
||||||
_hasNext = _cursor.prev();
|
_hasNext = _cursor.prev();
|
||||||
Log.tracev("Read: {0}, hasNext: {1}", ret, _hasNext);
|
Log.tracev("Read: {0}, hasNext: {1}", ret, _hasNext);
|
||||||
|
_peekedNextKey = null;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -350,7 +350,7 @@ public class WritebackObjectPersistentStore {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
|
public CloseableKvIterator<JObjectKey, JDataVersionedWrapper> getIterator(IteratorStart start, JObjectKey key) {
|
||||||
return new TombstoneMergingKvIterator<JObjectKey, JDataVersionedWrapper>("writeback-ps", start, key,
|
return TombstoneMergingKvIterator.<JObjectKey, JDataVersionedWrapper>of("writeback-ps", start, key,
|
||||||
(tS, tK) -> new NavigableMapKvIterator<>(_pendingWrites, tS, tK),
|
(tS, tK) -> new NavigableMapKvIterator<>(_pendingWrites, tS, tK),
|
||||||
(tS, tK) -> (CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>) (CloseableKvIterator<JObjectKey, ?>) _cache.getIterator(tS, tK));
|
(tS, tK) -> (CloseableKvIterator<JObjectKey, MaybeTombstone<JDataVersionedWrapper>>) (CloseableKvIterator<JObjectKey, ?>) _cache.getIterator(tS, tK));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -161,7 +161,7 @@ public class TransactionFactoryImpl implements TransactionFactory {
|
|||||||
@Override
|
@Override
|
||||||
public CloseableKvIterator<JObjectKey, JData> getIterator(IteratorStart start, JObjectKey key) {
|
public CloseableKvIterator<JObjectKey, JData> getIterator(IteratorStart start, JObjectKey key) {
|
||||||
Log.tracev("Getting tx iterator with start={0}, key={1}", start, key);
|
Log.tracev("Getting tx iterator with start={0}, key={1}", start, key);
|
||||||
return new ReadTrackingIterator(new TombstoneMergingKvIterator<>("tx", start, key,
|
return new ReadTrackingIterator(TombstoneMergingKvIterator.<JObjectKey, ReadTrackingInternalCrap>of("tx", start, key,
|
||||||
(tS, tK) -> new MappingKvIterator<>(new NavigableMapKvIterator<>(_writes, tS, tK),
|
(tS, tK) -> new MappingKvIterator<>(new NavigableMapKvIterator<>(_writes, tS, tK),
|
||||||
t -> switch (t) {
|
t -> switch (t) {
|
||||||
case TxRecord.TxObjectRecordWrite<?> write ->
|
case TxRecord.TxObjectRecordWrite<?> write ->
|
||||||
@@ -175,6 +175,11 @@ public class TransactionFactoryImpl implements TransactionFactory {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void put(JData obj) {
|
public void put(JData obj) {
|
||||||
|
var read = _readSet.get(obj.key());
|
||||||
|
if (read != null && (read.data().map(JDataVersionedWrapper::data).orElse(null) == obj)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
_writes.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj));
|
_writes.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj));
|
||||||
_newWrites.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj));
|
_newWrites.put(obj.key(), new TxRecord.TxObjectRecordWrite<>(obj));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -144,6 +144,7 @@
|
|||||||
</native.image.path>
|
</native.image.path>
|
||||||
<java.util.logging.manager>org.jboss.logmanager.LogManager</java.util.logging.manager>
|
<java.util.logging.manager>org.jboss.logmanager.LogManager</java.util.logging.manager>
|
||||||
<buildDirectory>${project.build.directory}</buildDirectory>
|
<buildDirectory>${project.build.directory}</buildDirectory>
|
||||||
|
<lazyFsPath>${project.basedir}/../../thirdparty/lazyfs/lazyfs/lazyfs</lazyFsPath>
|
||||||
<nativeLibsDirectory>${dhfs.native-libs-dir}</nativeLibsDirectory>
|
<nativeLibsDirectory>${dhfs.native-libs-dir}</nativeLibsDirectory>
|
||||||
<maven.home>${maven.home}</maven.home>
|
<maven.home>${maven.home}</maven.home>
|
||||||
</systemPropertyVariables>
|
</systemPropertyVariables>
|
||||||
|
|||||||
@@ -36,7 +36,11 @@ public class DeferredInvalidationQueueService implements PeerConnectedEventListe
|
|||||||
Log.info("Initializing with root " + dataRoot);
|
Log.info("Initializing with root " + dataRoot);
|
||||||
if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) {
|
if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) {
|
||||||
Log.info("Reading invalidation queue");
|
Log.info("Reading invalidation queue");
|
||||||
_persistentData = SerializationHelper.deserialize(Files.readAllBytes(Paths.get(dataRoot).resolve(dataFileName)));
|
try {
|
||||||
|
_persistentData = SerializationHelper.deserialize(Files.readAllBytes(Paths.get(dataRoot).resolve(dataFileName)));
|
||||||
|
} catch (Exception e) {
|
||||||
|
Log.error("Error reading invalidation queue", e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
9
thirdparty/lazyfs/build.sh
vendored
Executable file
9
thirdparty/lazyfs/build.sh
vendored
Executable file
@@ -0,0 +1,9 @@
|
|||||||
|
# apt install g++ cmake libfuse3-dev libfuse3-3 fuse3
|
||||||
|
|
||||||
|
export CMAKE_BUILD_PARALLEL_LEVEL="$(nproc)"
|
||||||
|
|
||||||
|
cd lazyfs
|
||||||
|
|
||||||
|
cd libs/libpcache && ./build.sh && cd -
|
||||||
|
|
||||||
|
cd lazyfs && ./build.sh && cd -
|
||||||
1
thirdparty/lazyfs/lazyfs
vendored
Submodule
1
thirdparty/lazyfs/lazyfs
vendored
Submodule
Submodule thirdparty/lazyfs/lazyfs added at 7b137f90ef
Reference in New Issue
Block a user