diff --git a/dhfs-parent/server-old/.dockerignore b/dhfs-parent/server-old/.dockerignore deleted file mode 100644 index 94810d00..00000000 --- a/dhfs-parent/server-old/.dockerignore +++ /dev/null @@ -1,5 +0,0 @@ -* -!target/*-runner -!target/*-runner.jar -!target/lib/* -!target/quarkus-app/* \ No newline at end of file diff --git a/dhfs-parent/server-old/.gitignore b/dhfs-parent/server-old/.gitignore deleted file mode 100644 index 8c7863e7..00000000 --- a/dhfs-parent/server-old/.gitignore +++ /dev/null @@ -1,43 +0,0 @@ -#Maven -target/ -pom.xml.tag -pom.xml.releaseBackup -pom.xml.versionsBackup -release.properties -.flattened-pom.xml - -# Eclipse -.project -.classpath -.settings/ -bin/ - -# IntelliJ -.idea -*.ipr -*.iml -*.iws - -# NetBeans -nb-configuration.xml - -# Visual Studio Code -.vscode -.factorypath - -# OSX -.DS_Store - -# Vim -*.swp -*.swo - -# patch -*.orig -*.rej - -# Local environment -.env - -# Plugin directory -/.quarkus/cli/plugins/ diff --git a/dhfs-parent/server-old/Dockerfile b/dhfs-parent/server-old/Dockerfile deleted file mode 100644 index 62bace54..00000000 --- a/dhfs-parent/server-old/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM azul/zulu-openjdk-debian:21-jre-latest -RUN apt update && apt install -y libfuse2 curl \ No newline at end of file diff --git a/dhfs-parent/server-old/docker-compose.yml b/dhfs-parent/server-old/docker-compose.yml deleted file mode 100644 index a6a0aefa..00000000 --- a/dhfs-parent/server-old/docker-compose.yml +++ /dev/null @@ -1,42 +0,0 @@ -version: "3.2" - -services: - dhfs1: - build: . - privileged: true - devices: - - /dev/fuse - volumes: - - $HOME/dhfs/dhfs1:/dhfs_root - - $HOME/dhfs/dhfs1_f:/dhfs_root/fuse:rshared - - ./target/quarkus-app:/app - command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED - -Ddhfs.objects.persistence.files.root=/dhfs_root/p - -Ddhfs.objects.root=/dhfs_root/d - -Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0 - -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005 - -jar /app/quarkus-run.jar" - ports: - - 8080:8080 - - 8081:8443 - - 5005:5005 - dhfs2: - build: . - privileged: true - devices: - - /dev/fuse - volumes: - - $HOME/dhfs/dhfs2:/dhfs_root - - $HOME/dhfs/dhfs2_f:/dhfs_root/fuse:rshared - - ./target/quarkus-app:/app - command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED - --add-exports java.base/jdk.internal.access=ALL-UNNAMED - -Ddhfs.objects.persistence.files.root=/dhfs_root/p - -Ddhfs.objects.root=/dhfs_root/d - -Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0 - -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5010 - -jar /app/quarkus-run.jar" - ports: - - 8090:8080 - - 8091:8443 - - 5010:5010 diff --git a/dhfs-parent/server-old/pom.xml b/dhfs-parent/server-old/pom.xml deleted file mode 100644 index bb74c72a..00000000 --- a/dhfs-parent/server-old/pom.xml +++ /dev/null @@ -1,209 +0,0 @@ - - - 4.0.0 - com.usatiuk.dhfs - server - 1.0.0-SNAPSHOT - - - com.usatiuk.dhfs - parent - 1.0-SNAPSHOT - - - - - org.testcontainers - testcontainers - test - - - org.awaitility - awaitility - test - - - com.usatiuk - autoprotomap - 1.0-SNAPSHOT - - - com.usatiuk - autoprotomap-deployment - 1.0-SNAPSHOT - provided - - - org.bouncycastle - bcprov-jdk18on - 1.78.1 - - - org.bouncycastle - bcpkix-jdk18on - 1.78.1 - - - io.quarkus - quarkus-security - - - net.openhft - zero-allocation-hashing - - - io.quarkus - quarkus-grpc - - - io.quarkus - quarkus-arc - - - io.quarkus - quarkus-rest - - - io.quarkus - quarkus-rest-client - - - io.quarkus - quarkus-rest-client-jsonb - - - io.quarkus - quarkus-rest-jsonb - - - io.quarkus - quarkus-scheduler - - - io.quarkus - quarkus-junit5 - test - - - org.projectlombok - lombok - provided - - - com.github.SerCeMan - jnr-fuse - 44ed40f8ce - - - com.github.jnr - jnr-ffi - 2.2.16 - - - com.github.jnr - jnr-posix - 3.1.19 - - - com.github.jnr - jnr-constants - 0.10.4 - - - org.apache.commons - commons-lang3 - - - commons-io - commons-io - - - org.jboss.slf4j - slf4j-jboss-logmanager - test - - - commons-codec - commons-codec - - - org.apache.commons - commons-collections4 - - - org.apache.commons - commons-math3 - 3.6.1 - - - com.usatiuk - kleppmanntree - 1.0-SNAPSHOT - - - com.usatiuk.dhfs - supportlib - 1.0-SNAPSHOT - - - com.usatiuk.dhfs - objects - 1.0-SNAPSHOT - - - com.usatiuk.dhfs - utils - 1.0-SNAPSHOT - - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - 1C - false - classes - - - - org.apache.maven.plugins - maven-failsafe-plugin - - - - true - - - concurrent - - - 0.5 - - true - true - - - - - ${quarkus.platform.group-id} - quarkus-maven-plugin - ${quarkus.platform.version} - true - - - quarkus-plugin - - build - generate-code - generate-code-tests - - - - - - - diff --git a/dhfs-parent/server-old/src/lombok.config b/dhfs-parent/server-old/src/lombok.config deleted file mode 100644 index f1c474ce..00000000 --- a/dhfs-parent/server-old/src/lombok.config +++ /dev/null @@ -1 +0,0 @@ -lombok.accessors.prefix += _ diff --git a/dhfs-parent/server-old/src/main/docker/Dockerfile.jvm b/dhfs-parent/server-old/src/main/docker/Dockerfile.jvm deleted file mode 100644 index b1de5988..00000000 --- a/dhfs-parent/server-old/src/main/docker/Dockerfile.jvm +++ /dev/null @@ -1,97 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode -# -# Before building the container image run: -# -# ./mvnw package -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/server-jvm . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/server-jvm -# -# If you want to include the debug port into your docker image -# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005. -# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005 -# when running the container -# -# Then run the container using : -# -# docker run -i --rm -p 8080:8080 quarkus/server-jvm -# -# This image uses the `run-java.sh` script to run the application. -# This scripts computes the command line to execute your Java application, and -# includes memory/GC tuning. -# You can configure the behavior using the following environment properties: -# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") -# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options -# in JAVA_OPTS (example: "-Dsome.property=foo") -# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is -# used to calculate a default maximal heap memory based on a containers restriction. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio -# of the container available memory as set here. The default is `50` which means 50% -# of the available memory is used as an upper boundary. You can skip this mechanism by -# setting this value to `0` in which case no `-Xmx` option is added. -# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This -# is used to calculate a default initial heap memory based on the maximum heap memory. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio -# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` -# is used as the initial heap size. You can skip this mechanism by setting this value -# to `0` in which case no `-Xms` option is added (example: "25") -# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. -# This is used to calculate the maximum value of the initial heap memory. If used in -# a container without any memory constraints for the container then this option has -# no effect. If there is a memory constraint then `-Xms` is limited to the value set -# here. The default is 4096MB which means the calculated value of `-Xms` never will -# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") -# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output -# when things are happening. This option, if set to true, will set -# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). -# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: -# true"). -# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). -# - CONTAINER_CORE_LIMIT: A calculated core limit as described in -# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") -# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). -# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. -# (example: "20") -# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. -# (example: "40") -# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. -# (example: "4") -# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus -# previous GC times. (example: "90") -# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") -# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") -# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should -# contain the necessary JRE command-line options to specify the required GC, which -# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). -# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") -# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") -# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be -# accessed directly. (example: "foo.example.com,bar.example.com") -# -### -FROM registry.access.redhat.com/ubi8/openjdk-21:1.18 - -ENV LANGUAGE='en_US:en' - - -# We make four distinct layers so if there are application changes the library layers can be re-used -COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/ -COPY --chown=185 target/quarkus-app/*.jar /deployments/ -COPY --chown=185 target/quarkus-app/app/ /deployments/app/ -COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/ - -EXPOSE 8080 -USER 185 -ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" -ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" - -ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ] - diff --git a/dhfs-parent/server-old/src/main/docker/Dockerfile.legacy-jar b/dhfs-parent/server-old/src/main/docker/Dockerfile.legacy-jar deleted file mode 100644 index f66a1665..00000000 --- a/dhfs-parent/server-old/src/main/docker/Dockerfile.legacy-jar +++ /dev/null @@ -1,93 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode -# -# Before building the container image run: -# -# ./mvnw package -Dquarkus.package.jar.type=legacy-jar -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/server-legacy-jar . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar -# -# If you want to include the debug port into your docker image -# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005. -# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005 -# when running the container -# -# Then run the container using : -# -# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar -# -# This image uses the `run-java.sh` script to run the application. -# This scripts computes the command line to execute your Java application, and -# includes memory/GC tuning. -# You can configure the behavior using the following environment properties: -# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") -# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options -# in JAVA_OPTS (example: "-Dsome.property=foo") -# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is -# used to calculate a default maximal heap memory based on a containers restriction. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio -# of the container available memory as set here. The default is `50` which means 50% -# of the available memory is used as an upper boundary. You can skip this mechanism by -# setting this value to `0` in which case no `-Xmx` option is added. -# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This -# is used to calculate a default initial heap memory based on the maximum heap memory. -# If used in a container without any memory constraints for the container then this -# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio -# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` -# is used as the initial heap size. You can skip this mechanism by setting this value -# to `0` in which case no `-Xms` option is added (example: "25") -# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. -# This is used to calculate the maximum value of the initial heap memory. If used in -# a container without any memory constraints for the container then this option has -# no effect. If there is a memory constraint then `-Xms` is limited to the value set -# here. The default is 4096MB which means the calculated value of `-Xms` never will -# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") -# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output -# when things are happening. This option, if set to true, will set -# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). -# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: -# true"). -# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). -# - CONTAINER_CORE_LIMIT: A calculated core limit as described in -# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") -# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). -# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. -# (example: "20") -# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. -# (example: "40") -# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. -# (example: "4") -# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus -# previous GC times. (example: "90") -# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") -# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") -# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should -# contain the necessary JRE command-line options to specify the required GC, which -# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). -# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") -# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") -# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be -# accessed directly. (example: "foo.example.com,bar.example.com") -# -### -FROM registry.access.redhat.com/ubi8/openjdk-21:1.18 - -ENV LANGUAGE='en_US:en' - - -COPY target/lib/* /deployments/lib/ -COPY target/*-runner.jar /deployments/quarkus-run.jar - -EXPOSE 8080 -USER 185 -ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" -ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" - -ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ] diff --git a/dhfs-parent/server-old/src/main/docker/Dockerfile.native b/dhfs-parent/server-old/src/main/docker/Dockerfile.native deleted file mode 100644 index 226e7c71..00000000 --- a/dhfs-parent/server-old/src/main/docker/Dockerfile.native +++ /dev/null @@ -1,27 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. -# -# Before building the container image run: -# -# ./mvnw package -Dnative -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.native -t quarkus/server . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/server -# -### -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9 -WORKDIR /work/ -RUN chown 1001 /work \ - && chmod "g+rwX" /work \ - && chown 1001:root /work -COPY --chown=1001:root target/*-runner /work/application - -EXPOSE 8080 -USER 1001 - -ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/dhfs-parent/server-old/src/main/docker/Dockerfile.native-micro b/dhfs-parent/server-old/src/main/docker/Dockerfile.native-micro deleted file mode 100644 index 4bd4c6de..00000000 --- a/dhfs-parent/server-old/src/main/docker/Dockerfile.native-micro +++ /dev/null @@ -1,30 +0,0 @@ -#### -# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. -# It uses a micro base image, tuned for Quarkus native executables. -# It reduces the size of the resulting container image. -# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image. -# -# Before building the container image run: -# -# ./mvnw package -Dnative -# -# Then, build the image with: -# -# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/server . -# -# Then run the container using: -# -# docker run -i --rm -p 8080:8080 quarkus/server -# -### -FROM quay.io/quarkus/quarkus-micro-image:2.0 -WORKDIR /work/ -RUN chown 1001 /work \ - && chmod "g+rwX" /work \ - && chown 1001:root /work -COPY --chown=1001:root target/*-runner /work/application - -EXPOSE 8080 -USER 1001 - -ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/dhfs-parent/server-old/src/main/java/DeadlockDetector.java b/dhfs-parent/server-old/src/main/java/DeadlockDetector.java deleted file mode 100644 index 7b275098..00000000 --- a/dhfs-parent/server-old/src/main/java/DeadlockDetector.java +++ /dev/null @@ -1,63 +0,0 @@ -import io.quarkus.logging.Log; -import io.quarkus.runtime.ShutdownEvent; -import io.quarkus.runtime.StartupEvent; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; - -import java.lang.management.ManagementFactory; -import java.lang.management.ThreadInfo; -import java.lang.management.ThreadMXBean; -import java.util.Arrays; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; - -@ApplicationScoped -public class DeadlockDetector { - private final ExecutorService _executor = Executors.newSingleThreadExecutor(); - - void init(@Observes @Priority(1) StartupEvent event) { - _executor.submit(this::run); - } - - void shutdown(@Observes @Priority(100000) ShutdownEvent event) { - _executor.shutdownNow(); - } - - private void run() { - ThreadMXBean bean = ManagementFactory.getThreadMXBean(); - try { - while (!Thread.interrupted()) { - Thread.sleep(4000); - - long[] threadIds = bean.findDeadlockedThreads(); // Returns null if no threads are deadlocked. - - if (threadIds != null) { - ThreadInfo[] infos = bean.getThreadInfo(threadIds, Integer.MAX_VALUE); - - StringBuilder sb = new StringBuilder(); - - sb.append("Deadlock detected!\n"); - - for (ThreadInfo info : infos) { - StackTraceElement[] stack = info.getStackTrace(); - sb.append(info.getThreadName()).append("\n"); - sb.append("getLockedMonitors: ").append(Arrays.toString(info.getLockedMonitors())).append("\n"); - sb.append("getLockedSynchronizers: ").append(Arrays.toString(info.getLockedSynchronizers())).append("\n"); - sb.append("waiting on: ").append(info.getLockInfo()).append("\n"); - sb.append("locked by: ").append(info.getLockOwnerName()).append("\n"); - sb.append("Stack trace:\n"); - for (var e : stack) { - sb.append(e.toString()).append("\n"); - } - sb.append("==="); - } - - Log.error(sb); - } - } - } catch (InterruptedException e) { - } - Log.info("Deadlock detector thread exiting"); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/Main.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/Main.java deleted file mode 100644 index 69e488c0..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/Main.java +++ /dev/null @@ -1,21 +0,0 @@ -package com.usatiuk.dhfs; - -import io.quarkus.runtime.Quarkus; -import io.quarkus.runtime.QuarkusApplication; -import io.quarkus.runtime.annotations.QuarkusMain; - -@QuarkusMain -public class Main { - public static void main(String... args) { - Quarkus.run(DhfsStorageServerApp.class, args); - } - - public static class DhfsStorageServerApp implements QuarkusApplication { - - @Override - public int run(String... args) throws Exception { - Quarkus.waitForExit(); - return 0; - } - } -} \ No newline at end of file diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/ShutdownChecker.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/ShutdownChecker.java deleted file mode 100644 index dcd379a8..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/ShutdownChecker.java +++ /dev/null @@ -1,42 +0,0 @@ -package com.usatiuk.dhfs; - -import io.quarkus.logging.Log; -import io.quarkus.runtime.ShutdownEvent; -import io.quarkus.runtime.StartupEvent; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.io.IOException; -import java.nio.file.Paths; - -@ApplicationScoped -public class ShutdownChecker { - private static final String dataFileName = "running"; - @ConfigProperty(name = "dhfs.objects.root") - String dataRoot; - boolean _cleanShutdown = true; - boolean _initialized = false; - - void init(@Observes @Priority(2) StartupEvent event) throws IOException { - Paths.get(dataRoot).toFile().mkdirs(); - Log.info("Initializing with root " + dataRoot); - if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) { - _cleanShutdown = false; - Log.error("Unclean shutdown detected!"); - } else { - Paths.get(dataRoot).resolve(dataFileName).toFile().createNewFile(); - } - _initialized = true; - } - - void shutdown(@Observes @Priority(100000) ShutdownEvent event) throws IOException { - Paths.get(dataRoot).resolve(dataFileName).toFile().delete(); - } - - public boolean lastShutdownClean() { - if (!_initialized) throw new IllegalStateException("ShutdownChecker not initialized"); - return _cleanShutdown; - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/DirectoryConflictResolver.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/DirectoryConflictResolver.java deleted file mode 100644 index 0f242e45..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/DirectoryConflictResolver.java +++ /dev/null @@ -1,107 +0,0 @@ -package com.usatiuk.dhfs.files.conflicts; - -import com.usatiuk.dhfs.files.objects.Directory; -import com.usatiuk.dhfs.files.objects.FsNode; -import com.usatiuk.dhfs.objects.jrepository.JObject; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.jrepository.JObjectManager; -import com.usatiuk.dhfs.objects.repository.ConflictResolver; -import com.usatiuk.dhfs.objects.repository.ObjectHeader; -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import io.quarkus.logging.Log; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.inject.Inject; -import org.apache.commons.lang3.NotImplementedException; - -import java.util.*; - -@ApplicationScoped -public class DirectoryConflictResolver implements ConflictResolver { - @Inject - PersistentPeerDataService persistentPeerDataService; - - @Inject - JObjectManager jObjectManager; - - @Override - public void resolve(UUID conflictHost, ObjectHeader theirsHeader, JObjectData theirsData, JObject ours) { - var theirsDir = (Directory) theirsData; - if (!theirsDir.getClass().equals(Directory.class)) { - Log.error("Object type mismatch!"); - throw new NotImplementedException(); - } - - ours.runWriteLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, oursDirU, bump, invalidate) -> { - if (oursDirU == null) - throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict but we don't have local copy")); - if (!(oursDirU instanceof Directory oursDir)) - throw new NotImplementedException("Type conflict for " + ours.getMeta().getName() + ", directory was expected"); - - Directory first; - Directory second; - UUID otherHostname; - - if (oursDir.getMtime() >= theirsDir.getMtime()) { - first = oursDir; - second = theirsDir; - otherHostname = conflictHost; - } else { - second = oursDir; - first = theirsDir; - otherHostname = persistentPeerDataService.getSelfUuid(); - } - - LinkedHashMap mergedChildren = new LinkedHashMap<>(first.getChildren()); - Map newChangelog = new LinkedHashMap<>(m.getChangelog()); - - for (var entry : second.getChildren().entrySet()) { - if (mergedChildren.containsKey(entry.getKey()) && - !Objects.equals(mergedChildren.get(entry.getKey()), entry.getValue())) { - int i = 0; - do { - String name = entry.getKey() + ".conflict." + i + "." + otherHostname; - if (mergedChildren.containsKey(name)) { - i++; - continue; - } - mergedChildren.put(name, entry.getValue()); - break; - } while (true); - } else { - mergedChildren.put(entry.getKey(), entry.getValue()); - } - } - - for (var entry : theirsHeader.getChangelog().getEntriesList()) { - newChangelog.merge(UUID.fromString(entry.getHost()), entry.getVersion(), Long::max); - } - - boolean wasChanged = oursDir.getChildren().size() != mergedChildren.size() - || oursDir.getMtime() != first.getMtime() - || oursDir.getCtime() != first.getCtime(); - - if (m.getBestVersion() > newChangelog.values().stream().reduce(0L, Long::sum)) - throw new StatusRuntimeException(Status.ABORTED.withDescription("Race when conflict resolving")); - - if (wasChanged) { - newChangelog.merge(persistentPeerDataService.getSelfUuid(), 1L, Long::sum); - - for (var child : mergedChildren.values()) { - if (!(new HashSet<>(oursDir.getChildren().values()).contains(child))) { - jObjectManager.getOrPut(child.toString(), FsNode.class, Optional.of(oursDir.getName())); - } - } - - oursDir.setMtime(first.getMtime()); - oursDir.setCtime(first.getCtime()); - - oursDir.setChildren(mergedChildren); - } - - m.setChangelog(newChangelog); - return null; - }); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/FileConflictResolver.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/FileConflictResolver.java deleted file mode 100644 index 4610c4b5..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/FileConflictResolver.java +++ /dev/null @@ -1,182 +0,0 @@ -package com.usatiuk.dhfs.files.conflicts; - -import com.usatiuk.dhfs.files.objects.ChunkData; -import com.usatiuk.dhfs.files.objects.File; -import com.usatiuk.dhfs.files.service.DhfsFileService; -import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeManager; -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile; -import com.usatiuk.dhfs.objects.jrepository.JObject; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.jrepository.JObjectManager; -import com.usatiuk.dhfs.objects.repository.ConflictResolver; -import com.usatiuk.dhfs.objects.repository.ObjectHeader; -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; -import com.usatiuk.kleppmanntree.AlreadyExistsException; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import io.quarkus.logging.Log; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.inject.Inject; -import org.apache.commons.lang3.NotImplementedException; -import org.apache.commons.lang3.tuple.Pair; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.util.*; - -@ApplicationScoped -public class FileConflictResolver implements ConflictResolver { - @Inject - PersistentPeerDataService persistentPeerDataService; - @Inject - DhfsFileService fileService; - @Inject - JKleppmannTreeManager jKleppmannTreeManager; - @Inject - JObjectManager jObjectManager; - - @ConfigProperty(name = "dhfs.files.use_hash_for_chunks") - boolean useHashForChunks; - - // FIXME: There might be a race where node with conflict deletes a file, and we answer that - // it can do it as we haven't recorded the received file in the object model yet - @Override - public void resolve(UUID conflictHost, ObjectHeader theirsHeader, JObjectData theirsData, JObject ours) { - var theirsFile = (File) theirsData; - if (!theirsFile.getClass().equals(File.class)) { - Log.error("Object type mismatch!"); - throw new NotImplementedException(); - } - - var newJFile = ours.runWriteLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, oursFileU, bumpFile, invalidateFile) -> { - if (oursFileU == null) - throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict but we don't have local copy")); - if (!(oursFileU instanceof File oursFile)) - throw new StatusRuntimeException(Status.ABORTED.withDescription("Bad type for file")); - - File first; - File second; - UUID otherHostname; - - if (oursFile.getMtime() >= theirsFile.getMtime()) { - first = oursFile; - second = theirsFile; - otherHostname = conflictHost; - } else { - second = oursFile; - first = theirsFile; - otherHostname = persistentPeerDataService.getSelfUuid(); - } - - Map newChangelog = new LinkedHashMap<>(m.getChangelog()); - - for (var entry : theirsHeader.getChangelog().getEntriesList()) { - newChangelog.merge(UUID.fromString(entry.getHost()), entry.getVersion(), Long::max); - } - - boolean chunksDiff = !Objects.equals(first.getChunks(), second.getChunks()); - - var firstChunksCopy = first.getChunks().entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); - var secondChunksCopy = second.getChunks().entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); - - boolean wasChanged = oursFile.getMtime() != first.getMtime() - || oursFile.getCtime() != first.getCtime() - || first.isSymlink() != second.isSymlink() - || chunksDiff; - - if (m.getBestVersion() > newChangelog.values().stream().reduce(0L, Long::sum)) - throw new StatusRuntimeException(Status.ABORTED.withDescription("Race when conflict resolving")); - - m.setChangelog(newChangelog); - - if (wasChanged) { - m.getChangelog().merge(persistentPeerDataService.getSelfUuid(), 1L, Long::sum); - - if (useHashForChunks) - throw new NotImplementedException(); - - HashSet oursBackup = new HashSet<>(oursFile.getChunks().values()); - oursFile.getChunks().clear(); - - for (var e : firstChunksCopy) { - oursFile.getChunks().put(e.getLeft(), e.getValue()); - jObjectManager.getOrPut(e.getValue(), ChunkData.class, Optional.of(oursFile.getName())); - } - HashSet oursNew = new HashSet<>(oursFile.getChunks().values()); - - oursFile.setMtime(first.getMtime()); - oursFile.setCtime(first.getCtime()); - - var newFile = new File(UUID.randomUUID(), second.getMode(), second.isSymlink()); - - newFile.setMtime(second.getMtime()); - newFile.setCtime(second.getCtime()); - - for (var e : secondChunksCopy) { - newFile.getChunks().put(e.getLeft(), e.getValue()); - jObjectManager.getOrPut(e.getValue(), ChunkData.class, Optional.of(newFile.getName())); - } - - fileService.updateFileSize((JObject) ours); - - var ret = jObjectManager.putLocked(newFile, Optional.empty()); - - fileService.updateFileSize((JObject) ret); - - try { - for (var cuuid : oursBackup) { - if (!oursNew.contains(cuuid)) - jObjectManager - .get(cuuid) - .ifPresent(jObject -> jObject.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (mc, d, b, v) -> { - mc.removeRef(oursFile.getName()); - return null; - })); - } - } catch (Exception e) { - ret.getMeta().unfreeze(); - ret.rwUnlock(); - return null; - } - return ret; - } - - return null; - }); - - if (newJFile == null) return; - boolean locked = true; - - // FIXME: Slow and what happens if a directory is deleted? - try { - var parent = fileService.inoToParent(ours.getMeta().getName()); - // FIXME? - var tree = jKleppmannTreeManager.getTree("fs"); - - var nodeId = tree.getNewNodeId(); - newJFile.getMeta().addRef(nodeId); - newJFile.getMeta().unfreeze(); - newJFile.rwUnlock(); - locked = false; - - int i = 0; - - do { - try { - tree.move(parent.getRight(), new JKleppmannTreeNodeMetaFile(parent.getLeft() + ".fconflict." + persistentPeerDataService.getSelfUuid() + "." + conflictHost + "." + i, newJFile.getMeta().getName()), nodeId); - } catch (AlreadyExistsException aex) { - i++; - continue; - } - break; - } while (true); - } catch (Exception e) { - Log.error("Error when creating new file for " + ours.getMeta().getName(), e); - } finally { - if (locked) { - newJFile.getMeta().unfreeze(); - newJFile.getMeta().getReferrersMutable().clear(); - newJFile.rwUnlock(); - } - } - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/NoOpConflictResolver.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/NoOpConflictResolver.java deleted file mode 100644 index 22a429b7..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/NoOpConflictResolver.java +++ /dev/null @@ -1,45 +0,0 @@ -package com.usatiuk.dhfs.files.conflicts; - -import com.usatiuk.dhfs.objects.jrepository.JObject; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.jrepository.JObjectManager; -import com.usatiuk.dhfs.objects.repository.ConflictResolver; -import com.usatiuk.dhfs.objects.repository.ObjectHeader; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import jakarta.enterprise.context.ApplicationScoped; - -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.Objects; -import java.util.UUID; - -@ApplicationScoped -public class NoOpConflictResolver implements ConflictResolver { - @Override - public void resolve(UUID conflictHost, ObjectHeader theirsHeader, JObjectData theirsData, JObject ours) { - ours.runWriteLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d, b, i) -> { - if (d == null) - throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict but we don't have local copy")); - - if (!Objects.equals(theirsData.getClass(), ours.getData().getClass())) - throw new StatusRuntimeException(Status.ABORTED.withDescription("Type conflict for object " + m.getName() - + " ours: " + ours.getData().getClass() + " theirs: " + theirsData.getClass())); - - if (!Objects.equals(theirsData, ours.getData())) - throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict for immutable object " + m.getName())); - - Map newChangelog = new LinkedHashMap<>(m.getChangelog()); - - for (var entry : theirsHeader.getChangelog().getEntriesList()) - newChangelog.merge(UUID.fromString(entry.getHost()), entry.getVersion(), Long::max); - - if (m.getBestVersion() > newChangelog.values().stream().reduce(0L, Long::sum)) - throw new StatusRuntimeException(Status.ABORTED.withDescription("Race when conflict resolving")); - - m.setChangelog(newChangelog); - - return null; - }); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java deleted file mode 100644 index 46f8e283..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java +++ /dev/null @@ -1,90 +0,0 @@ -package com.usatiuk.dhfs.files.objects; - -import com.google.protobuf.ByteString; -import com.usatiuk.dhfs.files.conflicts.NoOpConflictResolver; -import com.usatiuk.dhfs.objects.jrepository.AssumedUnique; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.jrepository.Leaf; -import com.usatiuk.dhfs.objects.persistence.ChunkDataP; -import com.usatiuk.dhfs.objects.repository.ConflictResolver; -import net.openhft.hashing.LongTupleHashFunction; - -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Objects; -import java.util.stream.Collectors; - -@AssumedUnique -@Leaf -public class ChunkData extends JObjectData { - final ChunkDataP _data; - - public ChunkData(ByteString bytes) { - super(); - _data = ChunkDataP.newBuilder() - .setData(bytes) - // TODO: There might be (most definitely) a copy there - .setName(Arrays.stream(LongTupleHashFunction.xx128().hashBytes(bytes.asReadOnlyByteBuffer())) - .mapToObj(Long::toHexString).collect(Collectors.joining())) - .build(); - } - - public ChunkData(ByteString bytes, String name) { - super(); - _data = ChunkDataP.newBuilder() - .setData(bytes) - .setName(name) - .build(); - } - - public ChunkData(ChunkDataP chunkDataP) { - super(); - _data = chunkDataP; - } - - ChunkDataP getData() { - return _data; - } - - public ByteString getBytes() { - return _data.getData(); - } - - public int getSize() { - return _data.getData().size(); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ChunkData chunkData = (ChunkData) o; - return Objects.equals(getName(), chunkData.getName()); - } - - @Override - public int hashCode() { - return Objects.hashCode(getName()); - } - - @Override - public String getName() { - return _data.getName(); - } - - @Override - public Class getConflictResolver() { - return NoOpConflictResolver.class; - } - - @Override - public Collection extractRefs() { - return List.of(); - } - - @Override - public int estimateSize() { - return _data.getData().size(); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java deleted file mode 100644 index 64532d56..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java +++ /dev/null @@ -1,18 +0,0 @@ -package com.usatiuk.dhfs.files.objects; - -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.persistence.ChunkDataP; -import jakarta.inject.Singleton; - -@Singleton -public class ChunkDataSerializer implements ProtoSerializer { - @Override - public ChunkData deserialize(ChunkDataP message) { - return new ChunkData(message); - } - - @Override - public ChunkDataP serialize(ChunkData object) { - return object.getData(); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/Directory.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/Directory.java deleted file mode 100644 index e4aa578f..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/Directory.java +++ /dev/null @@ -1,65 +0,0 @@ -package com.usatiuk.dhfs.files.objects; - -import com.usatiuk.dhfs.files.conflicts.DirectoryConflictResolver; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.repository.ConflictResolver; -import lombok.Getter; -import lombok.Setter; - -import java.io.Serial; -import java.util.*; - -public class Directory extends FsNode { - @Serial - private static final long serialVersionUID = 1; - @Getter - @Setter - private Map _children = new HashMap<>(); - - public Directory(UUID uuid) { - super(uuid); - } - - public Directory(UUID uuid, long mode) { - super(uuid, mode); - } - - @Override - public Class getConflictResolver() { - return DirectoryConflictResolver.class; - } - - public Optional getKid(String name) { - return Optional.ofNullable(_children.get(name)); - } - - public boolean removeKid(String name) { - return _children.remove(name) != null; - } - - public boolean putKid(String name, UUID uuid) { - if (_children.containsKey(name)) return false; - - _children.put(name, uuid); - return true; - } - - @Override - public Class getRefType() { - return FsNode.class; - } - - @Override - public Collection extractRefs() { - return _children.values().stream().map(UUID::toString).toList(); - } - - public List getChildrenList() { - return _children.keySet().stream().toList(); - } - - @Override - public int estimateSize() { - return _children.size() * 192; - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/DirectorySerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/DirectorySerializer.java deleted file mode 100644 index bec56829..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/DirectorySerializer.java +++ /dev/null @@ -1,36 +0,0 @@ -package com.usatiuk.dhfs.files.objects; - -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.persistence.DirectoryP; -import com.usatiuk.dhfs.objects.persistence.FsNodeP; -import jakarta.inject.Singleton; - -import java.util.Map; -import java.util.UUID; -import java.util.stream.Collectors; - -@Singleton -public class DirectorySerializer implements ProtoSerializer { - @Override - public Directory deserialize(DirectoryP message) { - var ret = new Directory(UUID.fromString(message.getFsNode().getUuid())); - ret.setMtime(message.getFsNode().getMtime()); - ret.setCtime(message.getFsNode().getCtime()); - ret.setMode(message.getFsNode().getMode()); - ret.getChildren().putAll(message.getChildrenMap().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> UUID.fromString(e.getValue())))); - return ret; - } - - @Override - public DirectoryP serialize(Directory object) { - return DirectoryP.newBuilder() - .setFsNode(FsNodeP.newBuilder() - .setCtime(object.getCtime()) - .setMtime(object.getMtime()) - .setMode(object.getMode()) - .setUuid(object.getUuid().toString()) - .build()) - .putAllChildren(object.getChildren().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().toString()))) - .build(); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/File.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/File.java deleted file mode 100644 index 0c6fa4e8..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/File.java +++ /dev/null @@ -1,51 +0,0 @@ -package com.usatiuk.dhfs.files.objects; - -import com.usatiuk.dhfs.files.conflicts.FileConflictResolver; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.repository.ConflictResolver; -import lombok.Getter; -import lombok.Setter; - -import java.util.*; - -public class File extends FsNode { - @Getter - private final NavigableMap _chunks; - @Getter - private final boolean _symlink; - @Getter - @Setter - private long _size = 0; - - public File(UUID uuid, long mode, boolean symlink) { - super(uuid, mode); - _symlink = symlink; - _chunks = new TreeMap<>(); - } - - public File(UUID uuid, long mode, boolean symlink, NavigableMap chunks) { - super(uuid, mode); - _symlink = symlink; - _chunks = chunks; - } - - @Override - public Class getConflictResolver() { - return FileConflictResolver.class; - } - - @Override - public Class getRefType() { - return ChunkData.class; - } - - @Override - public Collection extractRefs() { - return Collections.unmodifiableCollection(_chunks.values()); - } - - @Override - public int estimateSize() { - return _chunks.size() * 192; - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java deleted file mode 100644 index 510cefd3..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java +++ /dev/null @@ -1,46 +0,0 @@ -package com.usatiuk.dhfs.files.objects; - -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.persistence.FileP; -import com.usatiuk.dhfs.objects.persistence.FsNodeP; -import jakarta.inject.Singleton; - -import java.util.TreeMap; -import java.util.UUID; - -@Singleton -public class FileSerializer implements ProtoSerializer { - @Override - public File deserialize(FileP message) { - TreeMap chunks = new TreeMap<>(); - message.getChunksList().forEach(chunk -> { - chunks.put(chunk.getStart(), chunk.getId()); - }); - var ret = new File(UUID.fromString(message.getFsNode().getUuid()), - message.getFsNode().getMode(), - message.getSymlink(), - chunks - ); - ret.setMtime(message.getFsNode().getMtime()); - ret.setCtime(message.getFsNode().getCtime()); - ret.setSize(message.getSize()); - return ret; - } - - @Override - public FileP serialize(File object) { - var builder = FileP.newBuilder() - .setFsNode(FsNodeP.newBuilder() - .setCtime(object.getCtime()) - .setMtime(object.getMtime()) - .setMode(object.getMode()) - .setUuid(object.getUuid().toString()) - .build()) - .setSymlink(object.isSymlink()) - .setSize(object.getSize()); - object.getChunks().forEach((s, i) -> { - builder.addChunksBuilder().setStart(s).setId(i); - }); - return builder.build(); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java deleted file mode 100644 index a6e6ac14..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java +++ /dev/null @@ -1,43 +0,0 @@ -package com.usatiuk.dhfs.files.objects; - -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import lombok.Getter; -import lombok.Setter; - -import java.io.Serial; -import java.util.UUID; - -public abstract class FsNode extends JObjectData { - @Serial - private static final long serialVersionUID = 1; - - @Getter - final UUID _uuid; - @Getter - @Setter - private long _mode; - @Getter - @Setter - private long _ctime; - @Getter - @Setter - private long _mtime; - - protected FsNode(UUID uuid) { - this._uuid = uuid; - this._ctime = System.currentTimeMillis(); - this._mtime = this._ctime; - } - - protected FsNode(UUID uuid, long mode) { - this._uuid = uuid; - this._mode = mode; - this._ctime = System.currentTimeMillis(); - this._mtime = this._ctime; - } - - @Override - public String getName() { - return _uuid.toString(); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java deleted file mode 100644 index 58678dd2..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java +++ /dev/null @@ -1,51 +0,0 @@ -package com.usatiuk.dhfs.files.service; - -import com.google.protobuf.ByteString; -import com.google.protobuf.UnsafeByteOperations; -import com.usatiuk.dhfs.files.objects.File; -import com.usatiuk.dhfs.objects.jrepository.JObject; -import org.apache.commons.lang3.tuple.Pair; - -import java.util.Optional; - -public interface DhfsFileService { - Optional open(String name); - - Optional create(String name, long mode); - - Pair inoToParent(String ino); - - void mkdir(String name, long mode); - - Optional getattr(String name); - - Boolean chmod(String name, long mode); - - void unlink(String name); - - Boolean rename(String from, String to); - - Boolean setTimes(String fileUuid, long atimeMs, long mtimeMs); - - Iterable readDir(String name); - - void updateFileSize(JObject file); - - Long size(String f); - - Optional read(String fileUuid, long offset, int length); - - Long write(String fileUuid, long offset, ByteString data); - - default Long write(String fileUuid, long offset, byte[] data) { - return write(fileUuid, offset, UnsafeByteOperations.unsafeWrap(data)); - } - - Boolean truncate(String fileUuid, long length); - - String readlink(String uuid); - - ByteString readlinkBS(String uuid); - - String symlink(String oldpath, String newpath); -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java deleted file mode 100644 index 33b30d85..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java +++ /dev/null @@ -1,814 +0,0 @@ -package com.usatiuk.dhfs.files.service; - -import com.google.protobuf.ByteString; -import com.google.protobuf.UnsafeByteOperations; -import com.usatiuk.dhfs.files.objects.ChunkData; -import com.usatiuk.dhfs.files.objects.File; -import com.usatiuk.dhfs.files.objects.FsNode; -import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeManager; -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaDirectory; -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile; -import com.usatiuk.dhfs.objects.jrepository.JMutator; -import com.usatiuk.dhfs.objects.jrepository.JObject; -import com.usatiuk.dhfs.objects.jrepository.JObjectManager; -import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; -import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import io.quarkus.logging.Log; -import io.quarkus.runtime.StartupEvent; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import jakarta.inject.Inject; -import org.apache.commons.lang3.tuple.Pair; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.nio.charset.StandardCharsets; -import java.nio.file.Path; -import java.util.*; -import java.util.stream.StreamSupport; - -@ApplicationScoped -public class DhfsFileServiceImpl implements DhfsFileService { - @Inject - JObjectManager jObjectManager; - @Inject - JObjectTxManager jObjectTxManager; - - @ConfigProperty(name = "dhfs.files.target_chunk_size") - int targetChunkSize; - - @ConfigProperty(name = "dhfs.files.write_merge_threshold") - float writeMergeThreshold; - - @ConfigProperty(name = "dhfs.files.write_merge_max_chunk_to_take") - float writeMergeMaxChunkToTake; - - @ConfigProperty(name = "dhfs.files.write_merge_limit") - float writeMergeLimit; - - @ConfigProperty(name = "dhfs.files.write_last_chunk_limit") - float writeLastChunkLimit; - - @ConfigProperty(name = "dhfs.files.use_hash_for_chunks") - boolean useHashForChunks; - - @ConfigProperty(name = "dhfs.files.allow_recursive_delete") - boolean allowRecursiveDelete; - - @ConfigProperty(name = "dhfs.objects.ref_verification") - boolean refVerification; - - @ConfigProperty(name = "dhfs.objects.write_log") - boolean writeLogging; - - @Inject - PersistentPeerDataService persistentPeerDataService; - @Inject - JKleppmannTreeManager jKleppmannTreeManager; - - private JKleppmannTreeManager.JKleppmannTree _tree; - - private ChunkData createChunk(ByteString bytes) { - if (useHashForChunks) { - return new ChunkData(bytes); - } else { - return new ChunkData(bytes, persistentPeerDataService.getUniqueId()); - } - } - - void init(@Observes @Priority(500) StartupEvent event) { - Log.info("Initializing file service"); - _tree = jKleppmannTreeManager.getTree("fs"); - } - - private JObject getDirEntry(String name) { - var res = _tree.traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList()); - if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND); - var ret = jObjectManager.get(res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name))); - if (!ret.getMeta().getKnownClass().equals(JKleppmannTreeNode.class)) - throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not jObject: " + name)); - return (JObject) ret; - } - - private Optional> getDirEntryOpt(String name) { - var res = _tree.traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList()); - if (res == null) return Optional.empty(); - var ret = jObjectManager.get(res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name))); - if (!ret.getMeta().getKnownClass().equals(JKleppmannTreeNode.class)) - throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not jObject: " + name)); - return Optional.of((JObject) ret); - } - - @Override - public Optional getattr(String uuid) { - return jObjectTxManager.executeTx(() -> { - var ref = jObjectManager.get(uuid); - if (ref.isEmpty()) return Optional.empty(); - return ref.get().runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { - GetattrRes ret; - if (d instanceof File f) { - ret = new GetattrRes(f.getMtime(), f.getCtime(), f.getMode(), f.isSymlink() ? GetattrType.SYMLINK : GetattrType.FILE); - } else if (d instanceof JKleppmannTreeNode) { - ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY); - } else { - throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + m.getName())); - } - return Optional.of(ret); - }); - }); - } - - @Override - public Optional open(String name) { - return jObjectTxManager.executeTx(() -> { - try { - var ret = getDirEntry(name); - return Optional.of(ret.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f) return f.getFileIno(); - else if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f) return m.getName(); - throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + m.getName())); - })); - } catch (StatusRuntimeException e) { - if (e.getStatus().getCode() == Status.Code.NOT_FOUND) { - return Optional.empty(); - } - throw e; - } - }); - } - - private void ensureDir(JObject entry) { - entry.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { - if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f) - throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription(m.getName() + " is a file, not directory")); - else if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f) return null; - throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + m.getName())); - }); - } - - @Override - public Optional create(String name, long mode) { - return jObjectTxManager.executeTx(() -> { - Path path = Path.of(name); - var parent = getDirEntry(path.getParent().toString()); - - ensureDir(parent); - - String fname = path.getFileName().toString(); - - var fuuid = UUID.randomUUID(); - Log.debug("Creating file " + fuuid); - File f = new File(fuuid, mode, false); - - var newNodeId = _tree.getNewNodeId(); - var fobj = jObjectManager.putLocked(f, Optional.of(newNodeId)); - try { - _tree.move(parent.getMeta().getName(), new JKleppmannTreeNodeMetaFile(fname, f.getName()), newNodeId); - } catch (Exception e) { - fobj.getMeta().removeRef(newNodeId); - throw e; - } finally { - fobj.rwUnlock(); - } - return Optional.of(f.getName()); - }); - } - - //FIXME: Slow.. - @Override - public Pair inoToParent(String ino) { - return jObjectTxManager.executeTx(() -> { - return _tree.findParent(w -> { - if (w.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f) - if (f.getFileIno().equals(ino)) - return true; - return false; - }); - }); - } - - @Override - public void mkdir(String name, long mode) { - jObjectTxManager.executeTx(() -> { - Path path = Path.of(name); - var parent = getDirEntry(path.getParent().toString()); - ensureDir(parent); - - String dname = path.getFileName().toString(); - - Log.debug("Creating directory " + name); - - _tree.move(parent.getMeta().getName(), new JKleppmannTreeNodeMetaDirectory(dname), _tree.getNewNodeId()); - }); - } - - @Override - public void unlink(String name) { - jObjectTxManager.executeTx(() -> { - var node = getDirEntryOpt(name).orElse(null); - JKleppmannTreeNodeMeta meta = node.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { - if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f) - if (!d.getNode().getChildren().isEmpty()) throw new DirectoryNotEmptyException(); - return d.getNode().getMeta(); - }); - - _tree.trash(meta, node.getMeta().getName()); - }); - } - - @Override - public Boolean rename(String from, String to) { - return jObjectTxManager.executeTx(() -> { - var node = getDirEntry(from); - JKleppmannTreeNodeMeta meta = node.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> d.getNode().getMeta()); - - var toPath = Path.of(to); - var toDentry = getDirEntry(toPath.getParent().toString()); - ensureDir(toDentry); - - _tree.move(toDentry.getMeta().getName(), meta.withName(toPath.getFileName().toString()), node.getMeta().getName()); - - return true; - }); - } - - @Override - public Boolean chmod(String uuid, long mode) { - return jObjectTxManager.executeTx(() -> { - var dent = jObjectManager.get(uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND)); - - dent.runWriteLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d, bump, i) -> { - if (d instanceof JKleppmannTreeNode) { - return null;//FIXME:? - } else if (d instanceof File f) { - bump.apply(); - f.setMtime(System.currentTimeMillis()); - f.setMode(mode); - } else { - throw new IllegalArgumentException(uuid + " is not a file"); - } - return null; - }); - - return true; - }); - } - - @Override - public Iterable readDir(String name) { - return jObjectTxManager.executeTx(() -> { - var found = getDirEntry(name); - - return found.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { - if (!(d instanceof JKleppmannTreeNode) || !(d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory)) { - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - } - return new ArrayList<>(d.getNode().getChildren().keySet()); - }); - }); - } - - @Override - public Optional read(String fileUuid, long offset, int length) { - return jObjectTxManager.executeTx(() -> { - if (length < 0) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length)); - if (offset < 0) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset)); - - var fileOpt = jObjectManager.get(fileUuid); - if (fileOpt.isEmpty()) { - Log.error("File not found when trying to read: " + fileUuid); - return Optional.empty(); - } - var file = fileOpt.get(); - - try { - return file.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (md, fileData) -> { - if (!(fileData instanceof File)) { - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - } - var chunksAll = ((File) fileData).getChunks(); - if (chunksAll.isEmpty()) { - return Optional.of(ByteString.empty()); - } - var chunksList = chunksAll.tailMap(chunksAll.floorKey(offset)).entrySet(); - - if (chunksList.isEmpty()) { - return Optional.of(ByteString.empty()); - } - - var chunks = chunksList.iterator(); - ByteString buf = ByteString.empty(); - - long curPos = offset; - var chunk = chunks.next(); - - while (curPos < offset + length) { - var chunkPos = chunk.getKey(); - - long offInChunk = curPos - chunkPos; - - long toReadInChunk = (offset + length) - curPos; - - var chunkBytes = readChunk(chunk.getValue()); - - long readableLen = chunkBytes.size() - offInChunk; - - var toReadReally = Math.min(readableLen, toReadInChunk); - - if (toReadReally < 0) break; - - buf = buf.concat(chunkBytes.substring((int) offInChunk, (int) (offInChunk + toReadReally))); - - curPos += toReadReally; - - if (readableLen > toReadInChunk) - break; - - if (!chunks.hasNext()) break; - - chunk = chunks.next(); - } - - // FIXME: - return Optional.of(buf); - }); - } catch (Exception e) { - Log.error("Error reading file: " + fileUuid, e); - return Optional.empty(); - } - }); - } - - private ByteString readChunk(String uuid) { - var chunkRead = jObjectManager.get(uuid).orElse(null); - - if (chunkRead == null) { - Log.error("Chunk requested not found: " + uuid); - throw new StatusRuntimeException(Status.NOT_FOUND); - } - - return chunkRead.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> { - if (!(d instanceof ChunkData cd)) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - return cd.getBytes(); - }); - } - - private int getChunkSize(String uuid) { - return readChunk(uuid).size(); - } - - private void cleanupChunks(File f, Collection uuids) { - // FIXME: - var inFile = useHashForChunks ? new HashSet<>(f.getChunks().values()) : Collections.emptySet(); - for (var cuuid : uuids) { - try { - if (inFile.contains(cuuid)) continue; - jObjectManager.get(cuuid) - .ifPresent(jObject -> jObject.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, - (m, d, b, v) -> { - m.removeRef(f.getName()); - return null; - })); - } catch (Exception e) { - Log.error("Error when cleaning chunk " + cuuid, e); - } - } - } - - @Override - public Long write(String fileUuid, long offset, ByteString data) { - return jObjectTxManager.executeTx(() -> { - if (offset < 0) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset)); - - // FIXME: - var file = (JObject) jObjectManager.get(fileUuid).orElse(null); - if (file == null) { - Log.error("File not found when trying to read: " + fileUuid); - return -1L; - } - - file.rwLockNoCopy(); - try { - file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE); - // FIXME: - if (!(file.getData() instanceof File)) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - - if (writeLogging) { - Log.info("Writing to file: " + file.getMeta().getName() + " size=" + size(fileUuid) + " " - + offset + " " + data.size()); - } - - if (size(fileUuid) < offset) - truncate(fileUuid, offset); - - // FIXME: Some kind of immutable interface? - var chunksAll = Collections.unmodifiableNavigableMap(file.getData().getChunks()); - var first = chunksAll.floorEntry(offset); - var last = chunksAll.lowerEntry(offset + data.size()); - NavigableMap removedChunks = new TreeMap<>(); - - long start = 0; - - NavigableMap beforeFirst = first != null ? chunksAll.headMap(first.getKey(), false) : Collections.emptyNavigableMap(); - NavigableMap afterLast = last != null ? chunksAll.tailMap(last.getKey(), false) : Collections.emptyNavigableMap(); - - if (first != null && (getChunkSize(first.getValue()) + first.getKey() <= offset)) { - beforeFirst = chunksAll; - afterLast = Collections.emptyNavigableMap(); - first = null; - last = null; - start = offset; - } else if (!chunksAll.isEmpty()) { - var between = chunksAll.subMap(first.getKey(), true, last.getKey(), true); - removedChunks.putAll(between); - start = first.getKey(); - } - - ByteString pendingWrites = ByteString.empty(); - - if (first != null && first.getKey() < offset) { - var chunkBytes = readChunk(first.getValue()); - pendingWrites = pendingWrites.concat(chunkBytes.substring(0, (int) (offset - first.getKey()))); - } - pendingWrites = pendingWrites.concat(data); - - if (last != null) { - var lchunkBytes = readChunk(last.getValue()); - if (last.getKey() + lchunkBytes.size() > offset + data.size()) { - var startInFile = offset + data.size(); - var startInChunk = startInFile - last.getKey(); - pendingWrites = pendingWrites.concat(lchunkBytes.substring((int) startInChunk, lchunkBytes.size())); - } - } - - int combinedSize = pendingWrites.size(); - - if (targetChunkSize > 0) { - if (combinedSize < (targetChunkSize * writeMergeThreshold)) { - boolean leftDone = false; - boolean rightDone = false; - while (!leftDone && !rightDone) { - if (beforeFirst.isEmpty()) leftDone = true; - if (!beforeFirst.isEmpty() || !leftDone) { - var takeLeft = beforeFirst.lastEntry(); - - var cuuid = takeLeft.getValue(); - - if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) { - leftDone = true; - continue; - } - - if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) { - leftDone = true; - continue; - } - - // FIXME: (and test this) - beforeFirst = beforeFirst.headMap(takeLeft.getKey(), false); - start = takeLeft.getKey(); - pendingWrites = readChunk(cuuid).concat(pendingWrites); - combinedSize += getChunkSize(cuuid); - removedChunks.put(takeLeft.getKey(), takeLeft.getValue()); - } - if (afterLast.isEmpty()) rightDone = true; - if (!afterLast.isEmpty() && !rightDone) { - var takeRight = afterLast.firstEntry(); - - var cuuid = takeRight.getValue(); - - if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) { - rightDone = true; - continue; - } - - if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) { - rightDone = true; - continue; - } - - // FIXME: (and test this) - afterLast = afterLast.tailMap(takeRight.getKey(), false); - pendingWrites = pendingWrites.concat(readChunk(cuuid)); - combinedSize += getChunkSize(cuuid); - removedChunks.put(takeRight.getKey(), takeRight.getValue()); - } - } - } - } - - NavigableMap newChunks = new TreeMap<>(); - - { - int cur = 0; - while (cur < combinedSize) { - int end; - - if (targetChunkSize <= 0) - end = combinedSize; - else { - if ((combinedSize - cur) > (targetChunkSize * writeLastChunkLimit)) { - end = Math.min(cur + targetChunkSize, combinedSize); - } else { - end = combinedSize; - } - } - - var thisChunk = pendingWrites.substring(cur, end); - - ChunkData newChunkData = createChunk(thisChunk); - //FIXME: - jObjectManager.put(newChunkData, Optional.of(file.getMeta().getName())); - newChunks.put(start, newChunkData.getName()); - - start += thisChunk.size(); - cur = end; - } - } - - file.mutate(new FileChunkMutator(file.getData().getMtime(), System.currentTimeMillis(), removedChunks, newChunks)); - - cleanupChunks(file.getData(), removedChunks.values()); - updateFileSize((JObject) file); - } finally { - file.rwUnlock(); - } - - return (long) data.size(); - }); - } - - @Override - public Boolean truncate(String fileUuid, long length) { - return jObjectTxManager.executeTx(() -> { - if (length < 0) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length)); - - var file = (JObject) jObjectManager.get(fileUuid).orElse(null); - if (file == null) { - Log.error("File not found when trying to read: " + fileUuid); - return false; - } - - if (length == 0) { - file.rwLockNoCopy(); - try { - file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE); - - var oldChunks = Collections.unmodifiableNavigableMap(new TreeMap<>(file.getData().getChunks())); - - file.mutate(new JMutator<>() { - long oldMtime; - - @Override - public boolean mutate(File object) { - oldMtime = object.getMtime(); - object.getChunks().clear(); - return true; - } - - @Override - public void revert(File object) { - object.setMtime(oldMtime); - object.getChunks().putAll(oldChunks); - } - }); - cleanupChunks(file.getData(), oldChunks.values()); - updateFileSize((JObject) file); - } catch (Exception e) { - Log.error("Error writing file chunks: " + fileUuid, e); - return false; - } finally { - file.rwUnlock(); - } - return true; - } - - file.rwLockNoCopy(); - try { - file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE); - - var curSize = size(fileUuid); - if (curSize == length) return true; - - var chunksAll = Collections.unmodifiableNavigableMap(file.getData().getChunks()); - NavigableMap removedChunks = new TreeMap<>(); - NavigableMap newChunks = new TreeMap<>(); - - if (curSize < length) { - long combinedSize = (length - curSize); - - long start = curSize; - - // Hack - HashMap zeroCache = new HashMap<>(); - - { - long cur = 0; - while (cur < combinedSize) { - long end; - - if (targetChunkSize <= 0) - end = combinedSize; - else { - if ((combinedSize - cur) > (targetChunkSize * 1.5)) { - end = cur + targetChunkSize; - } else { - end = combinedSize; - } - } - - if (!zeroCache.containsKey(end - cur)) - zeroCache.put(end - cur, UnsafeByteOperations.unsafeWrap(new byte[Math.toIntExact(end - cur)])); - - ChunkData newChunkData = createChunk(zeroCache.get(end - cur)); - //FIXME: - jObjectManager.put(newChunkData, Optional.of(file.getMeta().getName())); - newChunks.put(start, newChunkData.getName()); - - start += newChunkData.getSize(); - cur = end; - } - } - } else { - var tail = chunksAll.lowerEntry(length); - var afterTail = chunksAll.tailMap(tail.getKey(), false); - - removedChunks.put(tail.getKey(), tail.getValue()); - removedChunks.putAll(afterTail); - - var tailBytes = readChunk(tail.getValue()); - var newChunk = tailBytes.substring(0, (int) (length - tail.getKey())); - - ChunkData newChunkData = createChunk(newChunk); - //FIXME: - jObjectManager.put(newChunkData, Optional.of(file.getMeta().getName())); - newChunks.put(tail.getKey(), newChunkData.getName()); - } - - file.mutate(new FileChunkMutator(file.getData().getMtime(), System.currentTimeMillis(), removedChunks, newChunks)); - - cleanupChunks(file.getData(), removedChunks.values()); - updateFileSize((JObject) file); - return true; - } catch (Exception e) { - Log.error("Error reading file: " + fileUuid, e); - return false; - } finally { - file.rwUnlock(); - } - }); - } - - @Override - public String readlink(String uuid) { - return jObjectTxManager.executeTx(() -> { - return readlinkBS(uuid).toStringUtf8(); - }); - } - - @Override - public ByteString readlinkBS(String uuid) { - return jObjectTxManager.executeTx(() -> { - var fileOpt = jObjectManager.get(uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid))); - - return fileOpt.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (md, fileData) -> { - if (!(fileData instanceof File)) { - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - } - - if (!((File) fileData).isSymlink()) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Not a symlink: " + uuid)); - - return read(uuid, 0, Math.toIntExact(size(uuid))).get(); - }); - }); - } - - @Override - public String symlink(String oldpath, String newpath) { - return jObjectTxManager.executeTx(() -> { - Path path = Path.of(newpath); - var parent = getDirEntry(path.getParent().toString()); - - ensureDir(parent); - - String fname = path.getFileName().toString(); - - var fuuid = UUID.randomUUID(); - Log.debug("Creating file " + fuuid); - - File f = new File(fuuid, 0, true); - var newNodeId = _tree.getNewNodeId(); - ChunkData newChunkData = createChunk(UnsafeByteOperations.unsafeWrap(oldpath.getBytes(StandardCharsets.UTF_8))); - - f.getChunks().put(0L, newChunkData.getName()); - - jObjectManager.put(newChunkData, Optional.of(f.getName())); - var newFile = jObjectManager.putLocked(f, Optional.of(newNodeId)); - try { - updateFileSize(newFile); - } finally { - newFile.rwUnlock(); - } - - _tree.move(parent.getMeta().getName(), new JKleppmannTreeNodeMetaFile(fname, f.getName()), newNodeId); - return f.getName(); - }); - } - - @Override - public Boolean setTimes(String fileUuid, long atimeMs, long mtimeMs) { - return jObjectTxManager.executeTx(() -> { - var file = jObjectManager.get(fileUuid).orElseThrow( - () -> new StatusRuntimeException(Status.NOT_FOUND.withDescription( - "File not found for setTimes: " + fileUuid)) - ); - - file.runWriteLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, fileData, bump, i) -> { - if (fileData instanceof JKleppmannTreeNode) return null; // FIXME: - if (!(fileData instanceof FsNode fd)) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - - bump.apply(); - fd.setMtime(mtimeMs); - return null; - }); - - return true; - }); - } - - @Override - public void updateFileSize(JObject file) { - jObjectTxManager.executeTx(() -> { - file.rwLockNoCopy(); - try { - file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE); - if (!(file.getData() instanceof File fd)) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - - long realSize = 0; - - var last = fd.getChunks().lastEntry(); - if (last != null) { - var lastSize = getChunkSize(last.getValue()); - realSize = last.getKey() + lastSize; - } - - if (realSize != fd.getSize()) { - long finalRealSize = realSize; - file.mutate(new JMutator() { - long oldSize; - - @Override - public boolean mutate(File object) { - oldSize = object.getSize(); - object.setSize(finalRealSize); - return true; - } - - @Override - public void revert(File object) { - object.setSize(oldSize); - } - }); - } - } catch (Exception e) { - Log.error("Error updating file size: " + file.getMeta().getName(), e); - } finally { - file.rwUnlock(); - } - }); - } - - @Override - public Long size(String uuid) { - return jObjectTxManager.executeTx(() -> { - var read = jObjectManager.get(uuid) - .orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND)); - - try { - return read.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (fsNodeData, fileData) -> { - if (!(fileData instanceof File fd)) - throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - - return fd.getSize(); - }); - } catch (Exception e) { - Log.error("Error reading file: " + uuid, e); - return -1L; - } - }); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DirectoryNotEmptyException.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DirectoryNotEmptyException.java deleted file mode 100644 index f13096f9..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DirectoryNotEmptyException.java +++ /dev/null @@ -1,8 +0,0 @@ -package com.usatiuk.dhfs.files.service; - -public class DirectoryNotEmptyException extends RuntimeException { - @Override - public synchronized Throwable fillInStackTrace() { - return this; - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/FileChunkMutator.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/FileChunkMutator.java deleted file mode 100644 index 3b31cdae..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/FileChunkMutator.java +++ /dev/null @@ -1,36 +0,0 @@ -package com.usatiuk.dhfs.files.service; - -import com.usatiuk.dhfs.files.objects.File; -import com.usatiuk.dhfs.objects.jrepository.JMutator; - -import java.util.NavigableMap; - -public class FileChunkMutator implements JMutator { - private final long _oldTime; - private final long _newTime; - private final NavigableMap _removedChunks; - private final NavigableMap _newChunks; - - public FileChunkMutator(long oldTime, long newTime, NavigableMap removedChunks, NavigableMap newChunks) { - _oldTime = oldTime; - _newTime = newTime; - _removedChunks = removedChunks; - _newChunks = newChunks; - } - - @Override - public boolean mutate(File object) { - object.setMtime(_newTime); - object.getChunks().keySet().removeAll(_removedChunks.keySet()); - object.getChunks().putAll(_newChunks); - return true; - } - - @Override - public void revert(File object) { - object.setMtime(_oldTime); - object.getChunks().keySet().removeAll(_newChunks.keySet()); - object.getChunks().putAll(_removedChunks); - } - -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrRes.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrRes.java deleted file mode 100644 index 3240a6b4..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrRes.java +++ /dev/null @@ -1,4 +0,0 @@ -package com.usatiuk.dhfs.files.service; - -public record GetattrRes(long mtime, long ctime, long mode, GetattrType type) { -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrType.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrType.java deleted file mode 100644 index ebcd4868..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrType.java +++ /dev/null @@ -1,7 +0,0 @@ -package com.usatiuk.dhfs.files.service; - -public enum GetattrType { - FILE, - DIRECTORY, - SYMLINK -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java deleted file mode 100644 index 0fa8ee29..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java +++ /dev/null @@ -1,391 +0,0 @@ -package com.usatiuk.dhfs.fuse; - -import com.google.protobuf.UnsafeByteOperations; -import com.sun.security.auth.module.UnixSystem; -import com.usatiuk.dhfs.files.service.DhfsFileService; -import com.usatiuk.dhfs.files.service.DirectoryNotEmptyException; -import com.usatiuk.dhfs.files.service.GetattrRes; -import com.usatiuk.dhfs.objects.repository.persistence.ObjectPersistentStore; -import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer; -import com.usatiuk.kleppmanntree.AlreadyExistsException; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import io.quarkus.logging.Log; -import io.quarkus.runtime.ShutdownEvent; -import io.quarkus.runtime.StartupEvent; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import jakarta.inject.Inject; -import jnr.ffi.Pointer; -import org.apache.commons.lang3.SystemUtils; -import org.eclipse.microprofile.config.inject.ConfigProperty; -import ru.serce.jnrfuse.ErrorCodes; -import ru.serce.jnrfuse.FuseFillDir; -import ru.serce.jnrfuse.FuseStubFS; -import ru.serce.jnrfuse.struct.FileStat; -import ru.serce.jnrfuse.struct.FuseFileInfo; -import ru.serce.jnrfuse.struct.Statvfs; -import ru.serce.jnrfuse.struct.Timespec; - -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Optional; - -import static jnr.posix.FileStat.*; - -@ApplicationScoped -public class DhfsFuse extends FuseStubFS { - private static final int blksize = 1048576; - private static final int iosize = 1048576; - @Inject - ObjectPersistentStore persistentStore; // FIXME? - @ConfigProperty(name = "dhfs.fuse.root") - String root; - @ConfigProperty(name = "dhfs.fuse.enabled") - boolean enabled; - @ConfigProperty(name = "dhfs.fuse.debug") - Boolean debug; - @ConfigProperty(name = "dhfs.files.target_chunk_size") - int targetChunkSize; - @Inject - JnrPtrByteOutputAccessors jnrPtrByteOutputAccessors; - @Inject - DhfsFileService fileService; - - void init(@Observes @Priority(100000) StartupEvent event) { - if (!enabled) return; - Paths.get(root).toFile().mkdirs(); - Log.info("Mounting with root " + root); - - var uid = new UnixSystem().getUid(); - var gid = new UnixSystem().getGid(); - - var opts = new ArrayList(); - - // Assuming macFuse - if (SystemUtils.IS_OS_MAC) { - opts.add("-o"); - opts.add("iosize=" + iosize); - } else if (SystemUtils.IS_OS_LINUX) { - // FIXME: There's something else missing: the writes still seem to be 32k max -// opts.add("-o"); -// opts.add("large_read"); - opts.add("-o"); - opts.add("big_writes"); - opts.add("-o"); - opts.add("max_read=" + iosize); - opts.add("-o"); - opts.add("max_write=" + iosize); - } - opts.add("-o"); - opts.add("auto_cache"); - opts.add("-o"); - opts.add("uid=" + uid); - opts.add("-o"); - opts.add("gid=" + gid); - - mount(Paths.get(root), false, debug, opts.toArray(String[]::new)); - } - - void shutdown(@Observes @Priority(1) ShutdownEvent event) { - if (!enabled) return; - Log.info("Unmounting"); - umount(); - Log.info("Unmounted"); - } - - @Override - public int statfs(String path, Statvfs stbuf) { - try { - stbuf.f_frsize.set(blksize); - stbuf.f_bsize.set(blksize); - stbuf.f_blocks.set(persistentStore.getTotalSpace() / blksize); // total data blocks in file system - stbuf.f_bfree.set(persistentStore.getFreeSpace() / blksize); // free blocks in fs - stbuf.f_bavail.set(persistentStore.getUsableSpace() / blksize); // avail blocks in fs - stbuf.f_files.set(1000); //FIXME: - stbuf.f_ffree.set(Integer.MAX_VALUE - 2000); //FIXME: - stbuf.f_favail.set(Integer.MAX_VALUE - 2000); //FIXME: - stbuf.f_namemax.set(2048); - return super.statfs(path, stbuf); - } catch (Exception e) { - Log.error("When statfs " + path, e); - return -ErrorCodes.EIO(); - } - } - - @Override - public int getattr(String path, FileStat stat) { - try { - var fileOpt = fileService.open(path); - if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); - var uuid = fileOpt.get(); - Optional found = fileService.getattr(uuid); - if (found.isEmpty()) { - return -ErrorCodes.ENOENT(); - } - switch (found.get().type()) { - case FILE -> { - stat.st_mode.set(S_IFREG | found.get().mode()); - stat.st_nlink.set(1); - stat.st_size.set(fileService.size(uuid)); - } - case DIRECTORY -> { - stat.st_mode.set(S_IFDIR | found.get().mode()); - stat.st_nlink.set(2); - } - case SYMLINK -> { - stat.st_mode.set(S_IFLNK | 0777); - stat.st_nlink.set(1); - stat.st_size.set(fileService.size(uuid)); - } - } - - // FIXME: Race? - stat.st_ctim.tv_sec.set(found.get().ctime() / 1000); - stat.st_ctim.tv_nsec.set((found.get().ctime() % 1000) * 1000); - stat.st_mtim.tv_sec.set(found.get().mtime() / 1000); - stat.st_mtim.tv_nsec.set((found.get().mtime() % 1000) * 1000); - stat.st_atim.tv_sec.set(found.get().mtime() / 1000); - stat.st_atim.tv_nsec.set((found.get().mtime() % 1000) * 1000); - stat.st_blksize.set(blksize); - } catch (Exception e) { - Log.error("When getattr " + path, e); - return -ErrorCodes.EIO(); - } catch (Throwable e) { - Log.error("When getattr " + path, e); - return -ErrorCodes.EIO(); - } - return 0; - } - - @Override - public int utimens(String path, Timespec[] timespec) { - try { - var fileOpt = fileService.open(path); - if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); - var file = fileOpt.get(); - var res = fileService.setTimes(file, - timespec[0].tv_sec.get() * 1000, - timespec[1].tv_sec.get() * 1000); - if (!res) return -ErrorCodes.EINVAL(); - else return 0; - } catch (Exception e) { - Log.error("When utimens " + path, e); - return -ErrorCodes.EIO(); - } - } - - @Override - public int open(String path, FuseFileInfo fi) { - try { - if (fileService.open(path).isEmpty()) return -ErrorCodes.ENOENT(); - return 0; - } catch (Exception e) { - Log.error("When open " + path, e); - return -ErrorCodes.EIO(); - } - } - - @Override - public int read(String path, Pointer buf, long size, long offset, FuseFileInfo fi) { - if (size < 0) return -ErrorCodes.EINVAL(); - if (offset < 0) return -ErrorCodes.EINVAL(); - try { - var fileOpt = fileService.open(path); - if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); - var file = fileOpt.get(); - var read = fileService.read(fileOpt.get(), offset, (int) size); - if (read.isEmpty()) return 0; - UnsafeByteOperations.unsafeWriteTo(read.get(), new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size)); - return read.get().size(); - } catch (Exception e) { - Log.error("When reading " + path, e); - return -ErrorCodes.EIO(); - } - } - - @Override - public int write(String path, Pointer buf, long size, long offset, FuseFileInfo fi) { - if (offset < 0) return -ErrorCodes.EINVAL(); - try { - var fileOpt = fileService.open(path); - if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); - var buffer = UninitializedByteBuffer.allocateUninitialized((int) size); - - jnrPtrByteOutputAccessors.getUnsafe().copyMemory( - buf.address(), - jnrPtrByteOutputAccessors.getNioAccess().getBufferAddress(buffer), - size - ); - - var written = fileService.write(fileOpt.get(), offset, UnsafeByteOperations.unsafeWrap(buffer)); - return written.intValue(); - } catch (Exception e) { - Log.error("When writing " + path, e); - return -ErrorCodes.EIO(); - } - } - - @Override - public int create(String path, long mode, FuseFileInfo fi) { - try { - var ret = fileService.create(path, mode); - if (ret.isEmpty()) return -ErrorCodes.ENOSPC(); - else return 0; - } catch (Exception e) { - Log.error("When creating " + path, e); - return -ErrorCodes.EIO(); - } - } - - @Override - public int mkdir(String path, long mode) { - try { - fileService.mkdir(path, mode); - return 0; - } catch (AlreadyExistsException aex) { - return -ErrorCodes.EEXIST(); - } catch (Exception e) { - Log.error("When creating dir " + path, e); - return -ErrorCodes.EIO(); - } - } - - @Override - public int rmdir(String path) { - try { - fileService.unlink(path); - return 0; - } catch (DirectoryNotEmptyException ex) { - return -ErrorCodes.ENOTEMPTY(); - } catch (Exception e) { - Log.error("When removing dir " + path, e); - return -ErrorCodes.EIO(); - } - } - - @Override - public int rename(String path, String newName) { - try { - var ret = fileService.rename(path, newName); - if (!ret) return -ErrorCodes.ENOENT(); - else return 0; - } catch (Exception e) { - Log.error("When renaming " + path, e); - return -ErrorCodes.EIO(); - } - - } - - @Override - public int unlink(String path) { - try { - fileService.unlink(path); - return 0; - } catch (Exception e) { - Log.error("When unlinking " + path, e); - return -ErrorCodes.EIO(); - } - } - - @Override - public int truncate(String path, long size) { - if (size < 0) return -ErrorCodes.EINVAL(); - try { - var fileOpt = fileService.open(path); - if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); - var file = fileOpt.get(); - var ok = fileService.truncate(file, size); - if (ok) - return 0; - else - return -ErrorCodes.ENOSPC(); - } catch (Exception e) { - Log.error("When truncating " + path, e); - return -ErrorCodes.EIO(); - } - } - - @Override - public int chmod(String path, long mode) { - try { - var fileOpt = fileService.open(path); - if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); - var ret = fileService.chmod(fileOpt.get(), mode); - if (ret) return 0; - else return -ErrorCodes.EINVAL(); - } catch (Exception e) { - Log.error("When chmod " + path, e); - return -ErrorCodes.EIO(); - } - } - - @Override - public int readdir(String path, Pointer buf, FuseFillDir filler, long offset, FuseFileInfo fi) { - try { - Iterable found; - try { - found = fileService.readDir(path); - } catch (StatusRuntimeException e) { - if (e.getStatus().getCode().equals(Status.NOT_FOUND.getCode())) - return -ErrorCodes.ENOENT(); - else throw e; - } - - filler.apply(buf, ".", null, 0); - filler.apply(buf, "..", null, 0); - - for (var c : found) { - filler.apply(buf, c, null, 0); - } - - return 0; - } catch (Exception e) { - Log.error("When readdir " + path, e); - return -ErrorCodes.EIO(); - } - } - - @Override - public int readlink(String path, Pointer buf, long size) { - if (size < 0) return -ErrorCodes.EINVAL(); - try { - var fileOpt = fileService.open(path); - if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); - var file = fileOpt.get(); - var read = fileService.readlinkBS(fileOpt.get()); - if (read.isEmpty()) return 0; - UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size)); - buf.putByte(Math.min(size - 1, read.size()), (byte) 0); - return 0; - } catch (Exception e) { - Log.error("When reading " + path, e); - return -ErrorCodes.EIO(); - } - } - - @Override - public int chown(String path, long uid, long gid) { - try { - var fileOpt = fileService.open(path); - if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT(); - return 0; - } catch (Exception e) { - Log.error("When chown " + path, e); - return -ErrorCodes.EIO(); - } - } - - @Override - public int symlink(String oldpath, String newpath) { - try { - var ret = fileService.symlink(oldpath, newpath); - if (ret == null) return -ErrorCodes.EEXIST(); - else return 0; - } catch (Exception e) { - Log.error("When creating " + newpath, e); - return -ErrorCodes.EIO(); - } - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutput.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutput.java deleted file mode 100644 index d2790516..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutput.java +++ /dev/null @@ -1,64 +0,0 @@ -package com.usatiuk.dhfs.fuse; - -import com.google.protobuf.ByteOutput; -import jnr.ffi.Pointer; - -import java.nio.ByteBuffer; -import java.nio.MappedByteBuffer; - -public class JnrPtrByteOutput extends ByteOutput { - private final Pointer _backing; - private final long _size; - private final JnrPtrByteOutputAccessors _accessors; - private long _pos; - - public JnrPtrByteOutput(JnrPtrByteOutputAccessors accessors, Pointer backing, long size) { - _backing = backing; - _size = size; - _pos = 0; - _accessors = accessors; - } - - @Override - public void write(byte value) { - throw new UnsupportedOperationException(); - } - - @Override - public void write(byte[] value, int offset, int length) { - if (length + _pos > _size) throw new IndexOutOfBoundsException(); - _backing.put(_pos, value, offset, length); - _pos += length; - } - - @Override - public void writeLazy(byte[] value, int offset, int length) { - if (length + _pos > _size) throw new IndexOutOfBoundsException(); - _backing.put(_pos, value, offset, length); - _pos += length; - } - - @Override - public void write(ByteBuffer value) { - var rem = value.remaining(); - if (rem + _pos > _size) throw new IndexOutOfBoundsException(); - - if (value.isDirect()) { - if (value instanceof MappedByteBuffer mb) { - mb.load(); - } - long addr = _accessors.getNioAccess().getBufferAddress(value) + value.position(); - var out = _backing.address() + _pos; - _accessors.getUnsafe().copyMemory(addr, out, rem); - } else { - throw new UnsupportedOperationException(); - } - - _pos += rem; - } - - @Override - public void writeLazy(ByteBuffer value) { - write(value); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java deleted file mode 100644 index 78cc8ff4..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java +++ /dev/null @@ -1,24 +0,0 @@ -package com.usatiuk.dhfs.fuse; - -import jakarta.inject.Singleton; -import jdk.internal.access.JavaNioAccess; -import jdk.internal.access.SharedSecrets; -import lombok.Getter; -import sun.misc.Unsafe; - -import java.lang.reflect.Field; - -@Singleton -class JnrPtrByteOutputAccessors { - @Getter - JavaNioAccess _nioAccess; - @Getter - Unsafe _unsafe; - - JnrPtrByteOutputAccessors() throws NoSuchFieldException, IllegalAccessException { - _nioAccess = SharedSecrets.getJavaNioAccess(); - Field f = Unsafe.class.getDeclaredField("theUnsafe"); - f.setAccessible(true); - _unsafe = (Unsafe) f.get(null); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java deleted file mode 100644 index 2743bf48..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java +++ /dev/null @@ -1,566 +0,0 @@ -package com.usatiuk.dhfs.objects.jkleppmanntree; - -import com.usatiuk.dhfs.files.objects.File; -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.*; -import com.usatiuk.dhfs.objects.jrepository.*; -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; -import com.usatiuk.dhfs.objects.repository.opsupport.Op; -import com.usatiuk.dhfs.objects.repository.opsupport.OpObject; -import com.usatiuk.dhfs.objects.repository.opsupport.OpObjectRegistry; -import com.usatiuk.dhfs.objects.repository.opsupport.OpSender; -import com.usatiuk.kleppmanntree.*; -import com.usatiuk.dhfs.utils.VoidFn; -import io.quarkus.logging.Log; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.inject.Inject; -import org.apache.commons.lang3.tuple.Pair; - -import java.util.*; -import java.util.concurrent.ConcurrentHashMap; -import java.util.function.Function; - -@ApplicationScoped -public class JKleppmannTreeManager { - private static final String dataFileName = "trees"; - private final ConcurrentHashMap _trees = new ConcurrentHashMap<>(); - @Inject - JKleppmannTreePeerInterface jKleppmannTreePeerInterface; - @Inject - OpSender opSender; - @Inject - OpObjectRegistry opObjectRegistry; - @Inject - JObjectManager jObjectManager; - @Inject - PersistentPeerDataService persistentPeerDataService; - @Inject - JObjectTxManager jObjectTxManager; - @Inject - SoftJObjectFactory softJObjectFactory; - @Inject - JKleppmannTreePeerInterface peerInterface; - - public JKleppmannTree getTree(String name) { - return _trees.computeIfAbsent(name, this::createTree); - } - - private JKleppmannTree createTree(String name) { - return jObjectTxManager.executeTx(() -> { - var data = jObjectManager.get(JKleppmannTreePersistentData.nameFromTreeName(name)).orElse(null); - if (data == null) { - data = jObjectManager.put(new JKleppmannTreePersistentData(name), Optional.empty()); - } - var tree = new JKleppmannTree(name); - opObjectRegistry.registerObject(tree); - return tree; - }); - } - - public class JKleppmannTree implements OpObject { - private final KleppmannTree _tree; - - private final SoftJObject _persistentData; - - private final JKleppmannTreeStorageInterface _storageInterface; - private final JKleppmannTreeClock _clock; - - private final String _treeName; - - JKleppmannTree(String treeName) { - _treeName = treeName; - - _persistentData = softJObjectFactory.create(JKleppmannTreePersistentData.class, JKleppmannTreePersistentData.nameFromTreeName(treeName)); - - _storageInterface = new JKleppmannTreeStorageInterface(); - _clock = new JKleppmannTreeClock(); - - _tree = new KleppmannTree<>(_storageInterface, peerInterface, _clock, new JOpRecorder()); - } - - public String traverse(List names) { - return _tree.traverse(names); - } - - public String getNewNodeId() { - return _storageInterface.getNewNodeId(); - } - - public void move(String newParent, JKleppmannTreeNodeMeta newMeta, String node) { - _tree.move(newParent, newMeta, node); - } - - public void trash(JKleppmannTreeNodeMeta newMeta, String node) { - _tree.move(_storageInterface.getTrashId(), newMeta.withName(node), node); - } - - @Override - public boolean hasPendingOpsForHost(UUID host) { - return _persistentData.get() - .runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, - (m, d) -> d.getQueues().containsKey(host) && - !d.getQueues().get(host).isEmpty() - ); - } - - @Override - public List getPendingOpsForHost(UUID host, int limit) { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - if (d.getQueues().containsKey(host)) { - var queue = d.getQueues().get(host); - ArrayList collected = new ArrayList<>(); - - for (var node : queue.entrySet()) { - collected.add(new JKleppmannTreeOpWrapper(node.getValue())); - if (collected.size() >= limit) break; - } - - return collected; - } - return List.of(); - }); - } - - @Override - public String getId() { - return _treeName; - } - - @Override - public void commitOpForHost(UUID host, Op op) { - if (!(op instanceof JKleppmannTreeOpWrapper jop)) - throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass() + " " + getId()); - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - - var got = _persistentData.get().getData().getQueues().get(host).firstEntry().getValue(); - if (!Objects.equals(jop.getOp(), got)) - throw new IllegalArgumentException("Committed op push was not the oldest"); - - _persistentData.get().mutate(new JMutator() { - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - object.getQueues().get(host).pollFirstEntry(); - return true; - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - object.getQueues().get(host).put(jop.getOp().timestamp(), jop.getOp()); - } - }); - - } - - @Override - public void pushBootstrap(UUID host) { - _tree.recordBoostrapFor(host); - } - - public Pair findParent(Function predicate) { - return _tree.findParent(predicate); - } - - @Override - public boolean acceptExternalOp(UUID from, Op op) { - if (op instanceof JKleppmannTreePeriodicPushOp pushOp) { - return _tree.updateExternalTimestamp(pushOp.getFrom(), pushOp.getTimestamp()); - } - - if (!(op instanceof JKleppmannTreeOpWrapper jop)) - throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass() + " " + getId()); - - JObject fileRef; - if (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile f) { - var fino = f.getFileIno(); - fileRef = jObjectManager.getOrPut(fino, File.class, Optional.of(jop.getOp().childId())); - } else { - fileRef = null; - } - - if (Log.isTraceEnabled()) - Log.trace("Received op from " + from + ": " + jop.getOp().timestamp().timestamp() + " " + jop.getOp().childId() + "->" + jop.getOp().newParentId() + " as " + jop.getOp().newMeta().getName()); - - try { - _tree.applyExternalOp(from, jop.getOp()); - } catch (Exception e) { - Log.error("Error applying external op", e); - throw e; - } finally { - // FIXME: - // Fixup the ref if it didn't really get applied - - if ((fileRef == null) && (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile)) - Log.error("Could not create child of pushed op: " + jop.getOp()); - - if (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile f) { - if (fileRef != null) { - var got = jObjectManager.get(jop.getOp().childId()).orElse(null); - - VoidFn remove = () -> { - fileRef.runWriteLockedVoid(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d, b, v) -> { - m.removeRef(jop.getOp().childId()); - }); - }; - - if (got == null) { - remove.apply(); - } else { - try { - got.rLock(); - try { - got.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - if (got.getData() == null || !got.getData().extractRefs().contains(f.getFileIno())) - remove.apply(); - } finally { - got.rUnlock(); - } - } catch (DeletedObjectAccessException dex) { - remove.apply(); - } - } - } - } - } - return true; - } - - @Override - public Op getPeriodicPushOp() { - return new JKleppmannTreePeriodicPushOp(persistentPeerDataService.getSelfUuid(), _clock.peekTimestamp()); - } - - @Override - public void addToTx() { - // FIXME: a hack - _persistentData.get().rwLockNoCopy(); - _persistentData.get().rwUnlock(); - } - - private class JOpRecorder implements OpRecorder { - @Override - public void recordOp(OpMove op) { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - var hostUuds = persistentPeerDataService.getHostUuids().stream().toList(); - _persistentData.get().mutate(new JMutator() { - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - object.recordOp(hostUuds, op); - return true; - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - object.removeOp(hostUuds, op); - } - }); - opSender.push(JKleppmannTree.this); - } - - @Override - public void recordOpForPeer(UUID peer, OpMove op) { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - _persistentData.get().mutate(new JMutator() { - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - object.recordOp(peer, op); - return true; - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - object.removeOp(peer, op); - } - }); - opSender.push(JKleppmannTree.this); - } - } - - private class JKleppmannTreeClock implements Clock { - @Override - public Long getTimestamp() { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - var ret = _persistentData.get().getData().getClock().peekTimestamp() + 1; - _persistentData.get().mutate(new JMutator() { - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - object.getClock().getTimestamp(); - return true; - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - object.getClock().ungetTimestamp(); - } - }); - return ret; - } - - @Override - public Long peekTimestamp() { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getClock().peekTimestamp()); - } - - @Override - public Long updateTimestamp(Long receivedTimestamp) { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - _persistentData.get().mutate(new JMutator() { - Long _old; - - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - _old = object.getClock().updateTimestamp(receivedTimestamp); - return true; - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - object.getClock().setTimestamp(_old); - } - }); - return _persistentData.get().getData().getClock().peekTimestamp(); - } - } - - public class JKleppmannTreeStorageInterface implements StorageInterface { - private final LogWrapper _logWrapper = new LogWrapper(); - private final PeerLogWrapper _peerLogWrapper = new PeerLogWrapper(); - - public JKleppmannTreeStorageInterface() { - if (jObjectManager.get(getRootId()).isEmpty()) { - putNode(new JKleppmannTreeNode(new TreeNode<>(getRootId(), null, new JKleppmannTreeNodeMetaDirectory("")))); - putNode(new JKleppmannTreeNode(new TreeNode<>(getTrashId(), null, null))); - } - } - - public JObject putNode(JKleppmannTreeNode node) { - return jObjectManager.put(node, Optional.ofNullable(node.getNode().getParent())); - } - - public JObject putNodeLocked(JKleppmannTreeNode node) { - return jObjectManager.putLocked(node, Optional.ofNullable(node.getNode().getParent())); - } - - @Override - public String getRootId() { - return _treeName + "_jt_root"; - } - - @Override - public String getTrashId() { - return _treeName + "_jt_trash"; - } - - @Override - public String getNewNodeId() { - return persistentPeerDataService.getUniqueId(); - } - - @Override - public JKleppmannTreeNodeWrapper getById(String id) { - var got = jObjectManager.get(id); - if (got.isEmpty()) return null; - return new JKleppmannTreeNodeWrapper((JObject) got.get()); - } - - @Override - public JKleppmannTreeNodeWrapper createNewNode(TreeNode node) { - return new JKleppmannTreeNodeWrapper(putNodeLocked(new JKleppmannTreeNode(node))); - } - - @Override - public void removeNode(String id) {} - - @Override - public LogInterface getLog() { - return _logWrapper; - } - - @Override - public PeerTimestampLogInterface getPeerTimestampLog() { - return _peerLogWrapper; - } - - @Override - public void rLock() { - _persistentData.get().rLock(); - } - - @Override - public void rUnlock() { - _persistentData.get().rUnlock(); - } - - @Override - public void rwLock() { - _persistentData.get().rwLockNoCopy(); - } - - @Override - public void rwUnlock() { - _persistentData.get().rwUnlock(); - } - - @Override - public void assertRwLock() { - _persistentData.get().assertRwLock(); - } - - private class PeerLogWrapper implements PeerTimestampLogInterface { - - @Override - public Long getForPeer(UUID peerId) { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, - (m, d) -> d.getPeerTimestampLog().get(peerId)); - } - - @Override - public void putForPeer(UUID peerId, Long timestamp) { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - _persistentData.get().mutate(new JMutator() { - Long old; - - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - old = object.getPeerTimestampLog().put(peerId, timestamp); - return !Objects.equals(old, timestamp); - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - if (old != null) - object.getPeerTimestampLog().put(peerId, old); - else - object.getPeerTimestampLog().remove(peerId, timestamp); - } - }); - } - } - - private class LogWrapper implements LogInterface { - @Override - public Pair, LogRecord> peekOldest() { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - var ret = d.getLog().firstEntry(); - if (ret == null) return null; - return Pair.of(ret); - }); - } - - @Override - public Pair, LogRecord> takeOldest() { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - - var ret = _persistentData.get().getData().getLog().firstEntry(); - if (ret != null) - _persistentData.get().mutate(new JMutator() { - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - object.getLog().pollFirstEntry(); - return true; - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - object.getLog().put(ret.getKey(), ret.getValue()); - } - }); - return Pair.of(ret); - } - - @Override - public Pair, LogRecord> peekNewest() { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - var ret = d.getLog().lastEntry(); - if (ret == null) return null; - return Pair.of(ret); - }); - } - - @Override - public List, LogRecord>> newestSlice(CombinedTimestamp since, boolean inclusive) { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - var tail = d.getLog().tailMap(since, inclusive); - return tail.entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); - }); - } - - @Override - public List, LogRecord>> getAll() { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - return d.getLog().entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList(); - }); - } - - @Override - public boolean isEmpty() { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - return d.getLog().isEmpty(); - }); - } - - @Override - public boolean containsKey(CombinedTimestamp timestamp) { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - return d.getLog().containsKey(timestamp); - }); - } - - @Override - public long size() { - return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - return (long) d.getLog().size(); - }); - } - - @Override - public void put(CombinedTimestamp timestamp, LogRecord record) { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - if (_persistentData.get().getData().getLog().containsKey(timestamp)) - throw new IllegalStateException("Overwriting log entry?"); - _persistentData.get().mutate(new JMutator() { - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - object.getLog().put(timestamp, record); - return true; - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - object.getLog().remove(timestamp, record); - } - }); - } - - @Override - public void replace(CombinedTimestamp timestamp, LogRecord record) { - _persistentData.get().assertRwLock(); - _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - _persistentData.get().mutate(new JMutator() { - LogRecord old; - - @Override - public boolean mutate(JKleppmannTreePersistentData object) { - old = object.getLog().put(timestamp, record); - return !Objects.equals(old, record); - } - - @Override - public void revert(JKleppmannTreePersistentData object) { - if (old != null) - object.getLog().put(timestamp, old); - else - object.getLog().remove(timestamp, record); - } - }); - } - } - } - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java deleted file mode 100644 index cd4b09c9..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java +++ /dev/null @@ -1,71 +0,0 @@ -package com.usatiuk.dhfs.objects.jkleppmanntree; - -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; -import com.usatiuk.dhfs.objects.jrepository.JObject; -import com.usatiuk.dhfs.objects.jrepository.JObjectManager; -import com.usatiuk.kleppmanntree.TreeNode; -import com.usatiuk.kleppmanntree.TreeNodeWrapper; - -import java.util.UUID; - -public class JKleppmannTreeNodeWrapper implements TreeNodeWrapper { - private final JObject _backing; - - public JKleppmannTreeNodeWrapper(JObject backing) {_backing = backing;} - - @Override - public void rLock() { - _backing.rLock(); - } - - @Override - public void rUnlock() { - _backing.rUnlock(); - } - - @Override - public void rwLock() { - _backing.rwLock(); - } - - @Override - public void rwUnlock() { - _backing.bumpVer(); // FIXME:? - _backing.rwUnlock(); - } - - @Override - public void freeze() { - _backing.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> { - m.freeze(); - return null; - }); - } - - @Override - public void unfreeze() { - _backing.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> { - m.unfreeze(); - return null; - }); - } - - @Override - public void notifyRef(String id) { - _backing.getMeta().addRef(id); - } - - @Override - public void notifyRmRef(String id) { - _backing.getMeta().removeRef(id); - } - - @Override - public TreeNode getNode() { - _backing.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - if (_backing.getData() == null) - throw new IllegalStateException("Node " + _backing.getMeta().getName() + " data lost!"); - return _backing.getData().getNode(); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java deleted file mode 100644 index 4612f8fc..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java +++ /dev/null @@ -1,30 +0,0 @@ -package com.usatiuk.dhfs.objects.jkleppmanntree; - -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile; -import com.usatiuk.dhfs.objects.repository.opsupport.Op; -import com.usatiuk.kleppmanntree.OpMove; -import lombok.Getter; - -import java.util.Collection; -import java.util.List; -import java.util.UUID; - -// Wrapper to avoid having to specify generic types -public class JKleppmannTreeOpWrapper implements Op { - @Getter - private final OpMove _op; - - public JKleppmannTreeOpWrapper(OpMove op) { - if (op == null) throw new IllegalArgumentException("op shouldn't be null"); - _op = op; - } - - @Override - public Collection getEscapedRefs() { - if (_op.newMeta() instanceof JKleppmannTreeNodeMetaFile mf) { - return List.of(mf.getFileIno()); - } - return List.of(); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java deleted file mode 100644 index 39b5d484..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java +++ /dev/null @@ -1,25 +0,0 @@ -package com.usatiuk.dhfs.objects.jkleppmanntree; - -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; -import com.usatiuk.kleppmanntree.PeerInterface; -import jakarta.inject.Inject; -import jakarta.inject.Singleton; - -import java.util.Collection; -import java.util.UUID; - -@Singleton -public class JKleppmannTreePeerInterface implements PeerInterface { - @Inject - PersistentPeerDataService persistentPeerDataService; - - @Override - public UUID getSelfId() { - return persistentPeerDataService.getSelfUuid(); - } - - @Override - public Collection getAllPeers() { - return persistentPeerDataService.getHostUuidsAndSelf(); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java deleted file mode 100644 index 3c84d067..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java +++ /dev/null @@ -1,25 +0,0 @@ -package com.usatiuk.dhfs.objects.jkleppmanntree; - -import com.usatiuk.dhfs.objects.repository.opsupport.Op; -import lombok.Getter; - -import java.util.Collection; -import java.util.List; -import java.util.UUID; - -public class JKleppmannTreePeriodicPushOp implements Op { - @Getter - private final UUID _from; - @Getter - private final long _timestamp; - - public JKleppmannTreePeriodicPushOp(UUID from, long timestamp) { - _from = from; - _timestamp = timestamp; - } - - @Override - public Collection getEscapedRefs() { - return List.of(); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeLogEffectSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeLogEffectSerializer.java deleted file mode 100644 index 7c9f13da..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeLogEffectSerializer.java +++ /dev/null @@ -1,53 +0,0 @@ -package com.usatiuk.dhfs.objects.jkleppmanntree.serializers; - -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeOpWrapper; -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; -import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaP; -import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeOpLogEffectP; -import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeOpP; -import com.usatiuk.kleppmanntree.LogEffect; -import com.usatiuk.kleppmanntree.LogEffectOld; -import jakarta.inject.Inject; -import jakarta.inject.Singleton; - -import java.util.UUID; - -@Singleton -public class JKleppmannTreeLogEffectSerializer implements ProtoSerializer> { - @Inject - ProtoSerializer opProtoSerializer; - @Inject - ProtoSerializer metaProtoSerializer; - - @Override - public LogEffect deserialize(JKleppmannTreeOpLogEffectP message) { - return new LogEffect<>( - message.hasOldParent() ? new LogEffectOld<>( - opProtoSerializer.deserialize(message.getOldEffectiveMove()).getOp(), - message.getOldParent(), - metaProtoSerializer.deserialize(message.getOldMeta()) - ) : null, - opProtoSerializer.deserialize(message.getEffectiveOp()).getOp(), - message.getNewParentId(), - metaProtoSerializer.deserialize(message.getNewMeta()), - message.getSelfId() - ); - } - - @Override - public JKleppmannTreeOpLogEffectP serialize(LogEffect object) { - var builder = JKleppmannTreeOpLogEffectP.newBuilder(); - // FIXME: all these wrappers - if (object.oldInfo() != null) { - builder.setOldEffectiveMove(opProtoSerializer.serialize(new JKleppmannTreeOpWrapper(object.oldInfo().oldEffectiveMove()))); - builder.setOldParent(object.oldInfo().oldParent()); - builder.setOldMeta(metaProtoSerializer.serialize(object.oldInfo().oldMeta())); - } - builder.setEffectiveOp(opProtoSerializer.serialize(new JKleppmannTreeOpWrapper(object.effectiveOp()))); - builder.setNewParentId(object.newParentId()); - builder.setNewMeta(metaProtoSerializer.serialize(object.newMeta())); - builder.setSelfId(object.childId()); - return builder.build(); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeNodeProtoSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeNodeProtoSerializer.java deleted file mode 100644 index 8e0e36f6..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeNodeProtoSerializer.java +++ /dev/null @@ -1,56 +0,0 @@ -package com.usatiuk.dhfs.objects.jkleppmanntree.serializers; - -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeOpWrapper; -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode; -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; -import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaP; -import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeP; -import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeOpP; -import com.usatiuk.kleppmanntree.TreeNode; -import jakarta.inject.Inject; -import jakarta.inject.Singleton; - -import java.util.HashMap; -import java.util.UUID; - -@Singleton -public class JKleppmannTreeNodeProtoSerializer implements ProtoSerializer { - @Inject - ProtoSerializer metaProtoSerializer; - @Inject - ProtoSerializer opProtoSerializer; - - @Override - public JKleppmannTreeNode deserialize(JKleppmannTreeNodeP message) { - var children = new HashMap(); - message.getChildrenList().forEach(child -> children.put(child.getKey(), child.getValue())); - var node = new TreeNode( - message.getId(), - message.hasParent() ? message.getParent() : null, - message.hasMeta() ? metaProtoSerializer.deserialize(message.getMeta()) : null, - children - ); - if (message.hasLastEffectiveOp()) - node.setLastEffectiveOp((opProtoSerializer.deserialize(message.getLastEffectiveOp())).getOp()); - return new JKleppmannTreeNode(node); - } - - @Override - public JKleppmannTreeNodeP serialize(JKleppmannTreeNode object) { - var builder = JKleppmannTreeNodeP.newBuilder().setId(object.getNode().getId()); - if (object.getNode().getParent() != null) - builder.setParent(object.getNode().getParent()); - if (object.getNode().getMeta() != null) { - builder.setMeta(metaProtoSerializer.serialize(object.getNode().getMeta())); - } - if (object.getNode().getLastEffectiveOp() != null) - builder.setLastEffectiveOp( - opProtoSerializer.serialize(new JKleppmannTreeOpWrapper(object.getNode().getLastEffectiveOp())) - ); - object.getNode().getChildren().forEach((k, v) -> { - builder.addChildrenBuilder().setKey(k).setValue(v); - }); - return builder.build(); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeOpProtoSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeOpProtoSerializer.java deleted file mode 100644 index 4e7c8c43..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeOpProtoSerializer.java +++ /dev/null @@ -1,40 +0,0 @@ -package com.usatiuk.dhfs.objects.jkleppmanntree.serializers; - -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeOpWrapper; -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; -import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaP; -import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeOpP; -import com.usatiuk.kleppmanntree.CombinedTimestamp; -import com.usatiuk.kleppmanntree.OpMove; -import jakarta.inject.Inject; -import jakarta.inject.Singleton; - -import java.util.UUID; - -@Singleton -public class JKleppmannTreeOpProtoSerializer implements ProtoSerializer { - @Inject - ProtoSerializer metaProtoSerializer; - - @Override - public JKleppmannTreeOpWrapper deserialize(JKleppmannTreeOpP message) { - return new JKleppmannTreeOpWrapper(new OpMove<>( - new CombinedTimestamp<>(message.getTimestamp(), UUID.fromString(message.getPeer())), message.getNewParentId(), - message.hasMeta() ? metaProtoSerializer.deserialize(message.getMeta()) : null, - message.getChild() - )); - } - - @Override - public JKleppmannTreeOpP serialize(JKleppmannTreeOpWrapper object) { - var builder = JKleppmannTreeOpP.newBuilder(); - builder.setTimestamp(object.getOp().timestamp().timestamp()) - .setPeer(object.getOp().timestamp().nodeId().toString()) - .setNewParentId(object.getOp().newParentId()) - .setChild(object.getOp().childId()); - if (object.getOp().newMeta() != null) - builder.setMeta(metaProtoSerializer.serialize(object.getOp().newMeta())); - return builder.build(); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePeriodicPushOpProtoSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePeriodicPushOpProtoSerializer.java deleted file mode 100644 index 24bd6a66..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePeriodicPushOpProtoSerializer.java +++ /dev/null @@ -1,22 +0,0 @@ -package com.usatiuk.dhfs.objects.jkleppmanntree.serializers; - -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreePeriodicPushOp; -import com.usatiuk.dhfs.objects.repository.JKleppmannTreePeriodicPushOpP; -import jakarta.inject.Singleton; - -import java.util.UUID; - -@Singleton -public class JKleppmannTreePeriodicPushOpProtoSerializer implements ProtoSerializer { - - @Override - public JKleppmannTreePeriodicPushOp deserialize(JKleppmannTreePeriodicPushOpP message) { - return new JKleppmannTreePeriodicPushOp(UUID.fromString(message.getFromUuid()), message.getTimestamp()); - } - - @Override - public JKleppmannTreePeriodicPushOpP serialize(JKleppmannTreePeriodicPushOp object) { - return JKleppmannTreePeriodicPushOpP.newBuilder().setTimestamp(object.getTimestamp()).setFromUuid(object.getFrom().toString()).build(); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePersistentDataProtoSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePersistentDataProtoSerializer.java deleted file mode 100644 index 75cdab5b..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePersistentDataProtoSerializer.java +++ /dev/null @@ -1,86 +0,0 @@ -package com.usatiuk.dhfs.objects.jkleppmanntree.serializers; - -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeOpWrapper; -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta; -import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreePersistentData; -import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeOpLogEffectP; -import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeOpP; -import com.usatiuk.dhfs.objects.persistence.JKleppmannTreePersistentDataP; -import com.usatiuk.kleppmanntree.*; -import jakarta.inject.Inject; -import jakarta.inject.Singleton; - -import java.util.HashMap; -import java.util.TreeMap; -import java.util.UUID; - -@Singleton -public class JKleppmannTreePersistentDataProtoSerializer implements ProtoSerializer { - @Inject - ProtoSerializer opProtoSerializer; - @Inject - ProtoSerializer> effectProtoSerializer; - - @Override - public JKleppmannTreePersistentData deserialize(JKleppmannTreePersistentDataP message) { - HashMap, OpMove>> queues = new HashMap<>(); - - for (var q : message.getQueuesList()) { - var qmap = new TreeMap, OpMove>(); - for (var o : q.getEntriesList()) { - var op = (JKleppmannTreeOpWrapper) opProtoSerializer.deserialize(o.getOp()); - qmap.put(new CombinedTimestamp<>(o.getClock(), UUID.fromString(o.getUuid())), op.getOp()); - } - queues.put(UUID.fromString(q.getNode()), qmap); - } - - var log = new HashMap(); - - for (var l : message.getPeerLogList()) { - log.put(UUID.fromString(l.getHost()), l.getTimestamp()); - } - - var opLog = new TreeMap, LogRecord>(); - for (var l : message.getOpLogList()) { - opLog.put(new CombinedTimestamp<>(l.getClock(), UUID.fromString(l.getUuid())), - new LogRecord<>(opProtoSerializer.deserialize(l.getOp()).getOp(), l.getEffectsList().stream().map(effectProtoSerializer::deserialize).toList()) - ); - } - - return new JKleppmannTreePersistentData( - message.getTreeName(), - new AtomicClock(message.getClock()), - queues, - log, - opLog - ); - } - - @Override - public JKleppmannTreePersistentDataP serialize(JKleppmannTreePersistentData object) { - var builder = JKleppmannTreePersistentDataP.newBuilder() - .setTreeName(object.getTreeName()) - .setClock(object.getClock().peekTimestamp()); - for (var q : object.getQueues().entrySet()) { - if (q.getValue().isEmpty()) continue; - var qb = builder.addQueuesBuilder(); - qb.setNode(q.getKey().toString()); - for (var e : q.getValue().entrySet()) { - qb.addEntriesBuilder().setClock(e.getKey().timestamp()).setUuid(e.getKey().nodeId().toString()) - .setOp((JKleppmannTreeOpP) opProtoSerializer.serialize(new JKleppmannTreeOpWrapper(e.getValue()))); - } - } - for (var peerLogEntry : object.getPeerTimestampLog().entrySet()) { - builder.addPeerLogBuilder().setHost(peerLogEntry.getKey().toString()).setTimestamp(peerLogEntry.getValue()); - } - for (var e : object.getLog().entrySet()) { - builder.addOpLogBuilder() - .setClock(e.getKey().timestamp()) - .setUuid(e.getKey().nodeId().toString()) - .setOp(opProtoSerializer.serialize(new JKleppmannTreeOpWrapper(e.getValue().op()))) - .addAllEffects(e.getValue().effects().stream().map(effectProtoSerializer::serialize).toList()); - } - return builder.build(); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java deleted file mode 100644 index 0146da88..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java +++ /dev/null @@ -1,45 +0,0 @@ -package com.usatiuk.dhfs.objects.jkleppmanntree.structs; - -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.jrepository.OnlyLocal; -import com.usatiuk.dhfs.objects.repository.ConflictResolver; -import com.usatiuk.kleppmanntree.TreeNode; -import lombok.Getter; - -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.UUID; - -// FIXME: Ideally this is two classes? -@OnlyLocal -public class JKleppmannTreeNode extends JObjectData { - @Getter - final TreeNode _node; - - public JKleppmannTreeNode(TreeNode node) { - _node = node; - } - - @Override - public String getName() { - return _node.getId(); - } - - @Override - public Class getConflictResolver() { - return null; - } - - @Override - public Collection extractRefs() { - if (_node.getMeta() instanceof JKleppmannTreeNodeMetaFile) - return List.of(((JKleppmannTreeNodeMetaFile) _node.getMeta()).getFileIno()); - return Collections.unmodifiableCollection(_node.getChildren().values()); - } - - @Override - public Class getRefType() { - return JObjectData.class; - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java deleted file mode 100644 index 2ea7d27f..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java +++ /dev/null @@ -1,31 +0,0 @@ -package com.usatiuk.dhfs.objects.jkleppmanntree.structs; - -import com.usatiuk.autoprotomap.runtime.ProtoMirror; -import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaP; -import com.usatiuk.kleppmanntree.NodeMeta; -import lombok.Getter; - -import java.util.Objects; - -@ProtoMirror(JKleppmannTreeNodeMetaP.class) -public abstract class JKleppmannTreeNodeMeta implements NodeMeta { - @Getter - private final String _name; - - public JKleppmannTreeNodeMeta(String name) {_name = name;} - - public abstract JKleppmannTreeNodeMeta withName(String name); - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - JKleppmannTreeNodeMeta that = (JKleppmannTreeNodeMeta) o; - return Objects.equals(_name, that._name); - } - - @Override - public int hashCode() { - return Objects.hashCode(_name); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java deleted file mode 100644 index 79882017..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java +++ /dev/null @@ -1,16 +0,0 @@ -package com.usatiuk.dhfs.objects.jkleppmanntree.structs; - -import com.usatiuk.autoprotomap.runtime.ProtoMirror; -import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaDirectoryP; - -@ProtoMirror(JKleppmannTreeNodeMetaDirectoryP.class) -public class JKleppmannTreeNodeMetaDirectory extends JKleppmannTreeNodeMeta { - public JKleppmannTreeNodeMetaDirectory(String name) { - super(name); - } - - @Override - public JKleppmannTreeNodeMeta withName(String name) { - return new JKleppmannTreeNodeMetaDirectory(name); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java deleted file mode 100644 index 124cd51d..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java +++ /dev/null @@ -1,37 +0,0 @@ -package com.usatiuk.dhfs.objects.jkleppmanntree.structs; - -import com.usatiuk.autoprotomap.runtime.ProtoMirror; -import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaFileP; -import lombok.Getter; - -import java.util.Objects; - -@ProtoMirror(JKleppmannTreeNodeMetaFileP.class) -public class JKleppmannTreeNodeMetaFile extends JKleppmannTreeNodeMeta { - @Getter - private final String _fileIno; - - public JKleppmannTreeNodeMetaFile(String name, String fileIno) { - super(name); - _fileIno = fileIno; - } - - @Override - public JKleppmannTreeNodeMeta withName(String name) { - return new JKleppmannTreeNodeMetaFile(name, _fileIno); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - if (!super.equals(o)) return false; - JKleppmannTreeNodeMetaFile that = (JKleppmannTreeNodeMetaFile) o; - return Objects.equals(_fileIno, that._fileIno); - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), _fileIno); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java deleted file mode 100644 index d6881d5b..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java +++ /dev/null @@ -1,88 +0,0 @@ -package com.usatiuk.dhfs.objects.jkleppmanntree.structs; - -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.jrepository.OnlyLocal; -import com.usatiuk.dhfs.objects.repository.ConflictResolver; -import com.usatiuk.kleppmanntree.AtomicClock; -import com.usatiuk.kleppmanntree.CombinedTimestamp; -import com.usatiuk.kleppmanntree.LogRecord; -import com.usatiuk.kleppmanntree.OpMove; -import lombok.Getter; - -import java.util.*; - -@OnlyLocal -public class JKleppmannTreePersistentData extends JObjectData { - private final String _treeName; - @Getter - private final AtomicClock _clock; - @Getter - private final HashMap, OpMove>> _queues; - @Getter - private final HashMap _peerTimestampLog; - @Getter - private final TreeMap, LogRecord> _log; - - public JKleppmannTreePersistentData(String treeName, AtomicClock clock, - HashMap, OpMove>> queues, - HashMap peerTimestampLog, TreeMap, LogRecord> log) { - _treeName = treeName; - _clock = clock; - _queues = queues; - _peerTimestampLog = peerTimestampLog; - _log = log; - } - - public JKleppmannTreePersistentData(String treeName) { - _treeName = treeName; - _clock = new AtomicClock(1); - _queues = new HashMap<>(); - _peerTimestampLog = new HashMap<>(); - _log = new TreeMap<>(); - } - - public static String nameFromTreeName(String treeName) { - return treeName + "_pd"; - } - - public void recordOp(UUID host, OpMove opMove) { - _queues.computeIfAbsent(host, h -> new TreeMap<>()); - _queues.get(host).put(opMove.timestamp(), opMove); - } - - public void removeOp(UUID host, OpMove opMove) { - _queues.get(host).remove(opMove.timestamp(), opMove); - } - - public void recordOp(Collection hosts, OpMove opMove) { - for (var u : hosts) { - recordOp(u, opMove); - } - } - - public void removeOp(Collection hosts, OpMove opMove) { - for (var u : hosts) { - removeOp(u, opMove); - } - } - - - @Override - public String getName() { - return nameFromTreeName(_treeName); - } - - public String getTreeName() { - return _treeName; - } - - @Override - public Class getConflictResolver() { - return null; - } - - @Override - public Collection extractRefs() { - return List.of(); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/AssumedUnique.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/AssumedUnique.java deleted file mode 100644 index 47e026c8..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/AssumedUnique.java +++ /dev/null @@ -1,11 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -@Retention(RetentionPolicy.RUNTIME) -@Target(ElementType.TYPE) -public @interface AssumedUnique { -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/DeletedObjectAccessException.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/DeletedObjectAccessException.java deleted file mode 100644 index 5557adc2..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/DeletedObjectAccessException.java +++ /dev/null @@ -1,4 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -public class DeletedObjectAccessException extends RuntimeException { -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JMutator.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JMutator.java deleted file mode 100644 index 4f0a1be7..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JMutator.java +++ /dev/null @@ -1,7 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -public interface JMutator { - boolean mutate(T object); - - void revert(T object); -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObject.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObject.java deleted file mode 100644 index 1d0a9ca0..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObject.java +++ /dev/null @@ -1,87 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -import com.usatiuk.dhfs.utils.VoidFn; - -public abstract class JObject { - public abstract ObjectMetadata getMeta(); - - public abstract T getData(); - - abstract void rollback(ObjectMetadata meta, JObjectData data); - - public abstract R runReadLocked(JObjectManager.ResolutionStrategy resolutionStrategy, JObjectManager.ObjectFnRead fn); - - // Note: this is expensive - public abstract R runWriteLocked(JObjectManager.ResolutionStrategy resolutionStrategy, JObjectManager.ObjectFnWrite fn); - - public void runReadLockedVoid(JObjectManager.ResolutionStrategy resolutionStrategy, JObjectManager.ObjectFnReadVoid fn) { - runReadLocked(resolutionStrategy, (m, d) -> { - fn.apply(m, d); - return null; - }); - } - - public void runWriteLockedVoid(JObjectManager.ResolutionStrategy resolutionStrategy, JObjectManager.ObjectFnWriteVoid fn) { - runWriteLocked(resolutionStrategy, (m, d, b, v) -> { - fn.apply(m, d, b, v); - return null; - }); - } - - public JObject as(Class klass) { - if (klass.isAssignableFrom(getMeta().getKnownClass())) return (JObject) this; - throw new IllegalStateException("Class mismatch for " + getMeta().getName() + " got: " + getMeta().getKnownClass()); - } - - public JObject local() { - tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - if (getData() == null) - throw new IllegalStateException("Data missing for " + getMeta().getName()); - return this; - } - - public JObject remote() { - tryResolve(JObjectManager.ResolutionStrategy.REMOTE); - if (getData() == null) - throw new IllegalStateException("Data missing for " + getMeta().getName()); - return this; - } - - public abstract void mutate(JMutator mutator); - - public abstract boolean tryResolve(JObjectManager.ResolutionStrategy resolutionStrategy); - - public abstract void externalResolution(JObjectData data); - - public abstract void rwLock(); - - public abstract boolean tryRwLock(); - - public abstract void rwLockNoCopy(); - - public abstract void rwUnlock(); - - public abstract void drop(); - - abstract boolean haveRwLock(); - - public abstract void assertRwLock(); - - public abstract void doDelete(); - - public abstract void markSeen(); - - public abstract void rLock(); - - public abstract void rUnlock(); - - public abstract void bumpVer(); - - public abstract void commitFence(); - - public abstract void commitFenceAsync(VoidFn callback); - - public abstract int estimateSize(); - - abstract boolean updateDeletionState(); -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectData.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectData.java deleted file mode 100644 index 9afa248e..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectData.java +++ /dev/null @@ -1,29 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -import com.usatiuk.autoprotomap.runtime.ProtoMirror; -import com.usatiuk.dhfs.objects.persistence.JObjectDataP; -import com.usatiuk.dhfs.objects.repository.ConflictResolver; - -import java.util.Collection; -import java.util.List; - -@ProtoMirror(JObjectDataP.class) -public abstract class JObjectData { - public abstract String getName(); - - public Class getConflictResolver() { - throw new UnsupportedOperationException(); - } - - public Class getRefType() { - throw new UnsupportedOperationException("This object shouldn't have refs"); - } - - public Collection extractRefs() { - return List.of(); - } - - public int estimateSize() { - return 0; - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectKey.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectKey.java deleted file mode 100644 index 41558e57..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectKey.java +++ /dev/null @@ -1,4 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -public record JObjectKey(short type) { -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectLRU.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectLRU.java deleted file mode 100644 index 4194a807..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectLRU.java +++ /dev/null @@ -1,95 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -import io.quarkus.logging.Log; -import io.quarkus.runtime.Shutdown; -import io.quarkus.runtime.Startup; -import jakarta.enterprise.context.ApplicationScoped; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.util.LinkedHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; - -@ApplicationScoped -public class JObjectLRU { - private final LinkedHashMap, Long> _cache = new LinkedHashMap<>(); - @ConfigProperty(name = "dhfs.objects.lru.limit") - long sizeLimit; - @ConfigProperty(name = "dhfs.objects.lru.print-stats") - boolean printStats; - private long _curSize = 0; - private long _evict = 0; - private ExecutorService _statusExecutor = null; - - @Startup - void init() { - if (printStats) { - _statusExecutor = Executors.newSingleThreadExecutor(); - _statusExecutor.submit(() -> { - try { - while (true) { - Thread.sleep(10000); - if (_curSize > 0) - Log.info("Cache status: size=" - + _curSize / 1024 / 1024 + "MB" - + " evicted=" + _evict); - _evict = 0; - if (Log.isTraceEnabled()) { - long realSize = 0; - synchronized (_cache) { - for (JObject object : _cache.keySet()) { - realSize += object.estimateSize(); - } - Log.info("Cache status: real size=" - + realSize / 1024 / 1024 + "MB" + " entries=" + _cache.size()); - } - } - } - } catch (InterruptedException ignored) { - } - }); - } - } - - @Shutdown - void shutdown() { - if (_statusExecutor != null) - _statusExecutor.shutdownNow(); - } - - public void notifyAccess(JObject obj) { - if (obj.getData() == null) return; - long size = obj.estimateSize(); - synchronized (_cache) { - _curSize += size; - var old = _cache.putLast(obj, size); - if (old != null) - _curSize -= old; - - while (_curSize >= sizeLimit) { - var del = _cache.pollFirstEntry(); - _curSize -= del.getValue(); - _evict++; - } - } - } - - public void updateSize(JObject obj) { - long size = obj.estimateSize(); - synchronized (_cache) { - var old = _cache.replace(obj, size); - if (old != null) { - _curSize += size; - _curSize -= old; - } else { - return; - } - - while (_curSize >= sizeLimit) { - var del = _cache.pollFirstEntry(); - _curSize -= del.getValue(); - _evict++; - } - } - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManager.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManager.java deleted file mode 100644 index 377c9533..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManager.java +++ /dev/null @@ -1,63 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -import com.usatiuk.dhfs.utils.VoidFn; -import jakarta.annotation.Nullable; - -import java.util.Collection; -import java.util.Optional; - -public interface JObjectManager { - // FIXME: - void runWriteListeners(JObject obj, boolean metaChanged, boolean dataChanged); - - void registerWriteListener(Class klass, WriteListenerFn fn); - - void registerMetaWriteListener(Class klass, WriteListenerFn fn); - - Optional> get(String name); - - Collection findAll(); - - // Put a new object - JObject put(T object, Optional parent); - - JObject putLocked(T object, Optional parent); - - // Get an object with a name if it exists, otherwise create new one based on metadata - // Should be used when working with objects referenced from the outside - JObject getOrPut(String name, Class klass, Optional parent); - - JObject getOrPutLocked(String name, Class klass, Optional parent); - - enum ResolutionStrategy { - NO_RESOLUTION, - LOCAL_ONLY, - REMOTE - } - - @FunctionalInterface - interface WriteListenerFn { - void apply(JObject obj); - } - - @FunctionalInterface - interface ObjectFnRead { - R apply(ObjectMetadata meta, @Nullable T data); - } - - @FunctionalInterface - interface ObjectFnWrite { - R apply(ObjectMetadata indexData, @Nullable T data, VoidFn bump, VoidFn invalidate); - } - - @FunctionalInterface - interface ObjectFnReadVoid { - void apply(ObjectMetadata meta, @Nullable T data); - } - - @FunctionalInterface - interface ObjectFnWriteVoid { - void apply(ObjectMetadata indexData, @Nullable T data, VoidFn bump, VoidFn invalidate); - } - -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManagerImpl.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManagerImpl.java deleted file mode 100644 index 5cd3e2ce..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManagerImpl.java +++ /dev/null @@ -1,795 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.persistence.JObjectDataP; -import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP; -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; -import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; -import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; -import com.usatiuk.dhfs.objects.repository.persistence.ObjectPersistentStore; -import com.usatiuk.dhfs.utils.VoidFn; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import io.quarkus.logging.Log; -import io.quarkus.runtime.Shutdown; -import io.quarkus.runtime.Startup; -import jakarta.inject.Inject; -import jakarta.inject.Singleton; -import lombok.Getter; -import org.apache.commons.collections4.MultiValuedMap; -import org.apache.commons.collections4.multimap.ArrayListValuedHashMap; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.lang.ref.ReferenceQueue; -import java.lang.ref.WeakReference; -import java.util.*; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.function.Supplier; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -@Singleton -public class JObjectManagerImpl implements JObjectManager { - private final MultiValuedMap, WriteListenerFn> _writeListeners - = new ArrayListValuedHashMap<>(); - private final MultiValuedMap, WriteListenerFn> _metaWriteListeners - = new ArrayListValuedHashMap<>(); - private final ConcurrentHashMap _map = new ConcurrentHashMap<>(); - private final ReferenceQueue> _refQueue = new ReferenceQueue<>(); - @Inject - ObjectPersistentStore objectPersistentStore; - @Inject - RemoteObjectServiceClient remoteObjectServiceClient; - @Inject - InvalidationQueueService invalidationQueueService; - @Inject - PersistentPeerDataService persistentPeerDataService; - @Inject - JObjectRefProcessor jObjectRefProcessor; - @Inject - SoftJObjectFactory softJObjectFactory; - @Inject - JObjectLRU jObjectLRU; - @Inject - JObjectTxManager jObjectTxManager; - @Inject - TxWriteback txWriteback; - - @Inject - ProtoSerializer metaProtoSerializer; - @Inject - ProtoSerializer dataProtoSerializer; - - @ConfigProperty(name = "dhfs.objects.ref_verification") - boolean refVerification; - @ConfigProperty(name = "dhfs.objects.lock_timeout_secs") - int lockTimeoutSecs; - private Thread _refCleanupThread; - - @Override - public void runWriteListeners(JObject obj, boolean metaChanged, boolean dataChanged) { - if (metaChanged) - for (var t : _metaWriteListeners.keySet()) { // FIXME:? - if (t.isAssignableFrom(obj.getMeta().getKnownClass())) - for (var cb : _metaWriteListeners.get(t)) - cb.apply(obj); - } - if (dataChanged) - for (var t : _writeListeners.keySet()) { // FIXME:? - if (t.isAssignableFrom(obj.getMeta().getKnownClass())) - for (var cb : _writeListeners.get(t)) - cb.apply(obj); - } - } - - @Override - public void registerWriteListener(Class klass, WriteListenerFn fn) { - _writeListeners.put(klass, fn); - } - - @Override - public void registerMetaWriteListener(Class klass, WriteListenerFn fn) { - _metaWriteListeners.put(klass, fn); - } - - @Startup - void init() { - _refCleanupThread = new Thread(this::refCleanupThread); - _refCleanupThread.setName("JObject ref cleanup thread"); - _refCleanupThread.start(); - } - - @Shutdown - void shutdown() throws InterruptedException { - _refCleanupThread.interrupt(); - _refCleanupThread.join(); - } - - private void refCleanupThread() { - try { - while (!Thread.interrupted()) { - NamedWeakReference cur = (NamedWeakReference) _refQueue.remove(); - _map.remove(cur._key, cur); - } - } catch (InterruptedException ignored) { - } - Log.info("Ref cleanup thread exiting"); - } - - private JObjectImpl getFromMap(String key) { - var ret = _map.get(key); - if (ret != null && ret.get() != null) { - return ret.get(); - } - return null; - } - - @Override - public Optional> get(String name) { - { - var inMap = getFromMap(name); - if (inMap != null) { - jObjectLRU.notifyAccess(inMap); - return Optional.of(inMap); - } - } - - ObjectMetadataP readMd; - try { - readMd = objectPersistentStore.readObjectMeta(name); - } catch (StatusRuntimeException ex) { - if (ex.getStatus().getCode().equals(Status.NOT_FOUND.getCode())) - return Optional.empty(); - throw ex; - } - var meta = metaProtoSerializer.deserialize(readMd); - if (!(meta instanceof ObjectMetadata)) - throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("Unexpected metadata type for " + name)); - - if (((ObjectMetadata) meta).isDeleted()) { - Log.warn("Deleted meta on disk for " + name); - return Optional.empty(); - } - - JObjectImpl ret = null; - var newObj = new JObjectImpl<>((ObjectMetadata) meta); - while (ret == null) { - var ref = _map.computeIfAbsent(name, k -> new NamedWeakReference(newObj, _refQueue)); - if (ref.get() == null) _map.remove(name, ref); - else ret = ref.get(); - } - jObjectLRU.notifyAccess(ret); - return Optional.of(ret); - } - - @Override - public Collection findAll() { - var out = _map.values().stream().map(WeakReference::get) - .filter(Objects::nonNull) - .map(JObjectImpl::getMeta).map(ObjectMetadata::getName) - .collect(Collectors.toCollection((Supplier>) LinkedHashSet::new)); - out.addAll(objectPersistentStore.findAllObjects()); - return out; - } - - public JObjectImpl putImpl(D object, Optional parent, boolean lock) { - while (true) { - JObjectImpl ret; - JObjectImpl newObj = null; - try { - ret = getFromMap(object.getName()); - if (ret != null) { - if (!object.getClass().isAnnotationPresent(AssumedUnique.class)) - throw new IllegalArgumentException("Trying to insert different object with same key"); - } else { - newObj = new JObjectImpl(object.getName(), persistentPeerDataService.getSelfUuid(), object); - newObj.rwLock(); - while (ret == null) { - JObjectImpl finalNewObj = newObj; - var ref = _map.computeIfAbsent(object.getName(), k -> new NamedWeakReference(finalNewObj, _refQueue)); - if (ref.get() == null) _map.remove(object.getName(), ref); - else ret = ref.get(); - } - if (ret != newObj) { - newObj.drop(); - continue; - } - } - JObjectImpl finalRet = (JObjectImpl) ret; - - boolean shouldWrite = false; - try { - shouldWrite = ret.runReadLocked(ResolutionStrategy.NO_RESOLUTION, (m, d) -> { - return (object.getClass().isAnnotationPresent(PushResolution.class) - && object.getClass().isAnnotationPresent(AssumedUnique.class) - && finalRet.getData() == null && !finalRet.getMeta().isHaveLocalCopy()) - || (parent.isEmpty() && !m.isFrozen()) || (parent.isPresent() && !m.checkRef(parent.get())); - }); - } catch (DeletedObjectAccessException dex) { - shouldWrite = true; - } - - if (shouldWrite) - ret.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, i) -> { - if (object.getClass().isAnnotationPresent(PushResolution.class) - && object.getClass().isAnnotationPresent(AssumedUnique.class) - && finalRet.getData() == null && !finalRet.getMeta().isHaveLocalCopy()) { - finalRet.externalResolution(object); - } - - if (parent.isPresent()) { - m.addRef(parent.get()); - if (m.isFrozen()) - m.unfreeze(); - } else { - m.freeze(); - } - - return null; - }); - } finally { - // FIXME? - if (newObj != null) - newObj.forceInvalidate(); - } - if (newObj == null) { - jObjectLRU.notifyAccess(ret); - if (lock) - ret.rwLock(); - } - if (newObj != null && !lock) - newObj.rwUnlock(); - return (JObjectImpl) ret; - } - } - - @Override - public JObjectImpl putLocked(D object, Optional parent) { - return putImpl(object, parent, true); - } - - @Override - public JObjectImpl put(D object, Optional parent) { - return putImpl(object, parent, false); - } - - public JObject getOrPutImpl(String name, Class klass, Optional parent, boolean lock) { - while (true) { - var got = get(name).orElse(null); - - if (got != null) { - { - boolean shouldWrite = false; - try { - // These two mutate in one direction only, it's ok to not take the lock - var gotKlass = got.getMeta().getKnownClass(); - var gotSeen = got.getMeta().isSeen(); - shouldWrite - = !(((gotKlass.equals(klass)) - || (klass.isAssignableFrom(gotKlass))) - && gotSeen); - } catch (DeletedObjectAccessException dex) { - shouldWrite = true; - } - if (shouldWrite || lock) { - got.rwLock(); - try { - var meta = got.getMeta(); - meta.narrowClass(klass); - meta.markSeen(); - } finally { - if (!lock) got.rwUnlock(); - } - } - } - - parent.ifPresent(s -> { - boolean shouldWrite = false; - try { - shouldWrite = !got.runReadLocked(ResolutionStrategy.NO_RESOLUTION, (m, d) -> m.checkRef(s)); - } catch (DeletedObjectAccessException dex) { - shouldWrite = true; - } - - if (!shouldWrite) return; - - got.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, i) -> { - if (m.isFrozen()) - m.unfreeze(); - m.addRef(s); - return true; - }); - }); - return got; - } - - JObjectImpl ret = null; - var created = new JObjectImpl<>(new ObjectMetadata(name, false, klass)); - created.rwLock(); - while (ret == null) { - var ref = _map.computeIfAbsent(name, k -> new NamedWeakReference(created, _refQueue)); - if (ref.get() == null) _map.remove(name, ref); - else ret = ref.get(); - } - if (ret != created) { - created.drop(); - continue; - } - - created.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, i) -> { - parent.ifPresent(m::addRef); - m.markSeen(); - return null; - }); - if (!lock) - created.rwUnlock(); - return created; - } - } - - @Override - public JObject getOrPutLocked(String name, Class klass, Optional parent) { - return getOrPutImpl(name, klass, parent, true); - } - - @Override - public JObject getOrPut(String name, Class klass, Optional parent) { - return getOrPutImpl(name, klass, parent, false); - } - - private static class NamedWeakReference extends WeakReference> { - @Getter - final String _key; - - public NamedWeakReference(JObjectImpl target, ReferenceQueue> q) { - super(target, q); - this._key = target.getMeta().getName(); - } - } - - public class JObjectImpl extends JObject { - private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock(); - private final AtomicReference _dataPart = new AtomicReference<>(); - private ObjectMetadata _metaPart; - - // Create a new object - protected JObjectImpl(String name, UUID selfUuid, T obj) { - _metaPart = new ObjectMetadata(name, false, obj.getClass()); - _metaPart.setHaveLocalCopy(true); - _dataPart.set(obj); - _metaPart.getChangelog().put(selfUuid, 1L); - if (Log.isTraceEnabled()) - Log.trace("new JObject: " + getMeta().getName()); - } - - // Create an object from existing metadata - protected JObjectImpl(ObjectMetadata objectMetadata) { - _metaPart = objectMetadata; - if (Log.isTraceEnabled()) - Log.trace("new JObject (ext): " + getMeta().getName()); - } - - @Override - public T getData() { - return _dataPart.get(); - } - - @Override - void rollback(ObjectMetadata meta, JObjectData data) { - _metaPart = meta; - _dataPart.set((T) data); - } - - @Override - public ObjectMetadata getMeta() { - return _metaPart; - } - - @Override - public void markSeen() { - if (!_metaPart.isSeen()) { - runWriteLocked(ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> { - m.markSeen(); - return null; - }); - } - } - - private void tryRemoteResolve() { - if (_dataPart.get() == null) { - rwLock(); - try { - tryLocalResolve(); - if (_dataPart.get() == null) { - var res = resolveDataRemote(); - _metaPart.narrowClass(res.getClass()); - _dataPart.set((T) res); - _metaPart.setHaveLocalCopy(true); - hydrateRefs(); - } // _dataPart.get() == null - } finally { - rwUnlock(); - } // try - } // _dataPart.get() == null - } - - private void tryLocalResolve() { - if (_dataPart.get() == null) { - rLock(); - try { - if (_dataPart.get() == null) { - if (!getMeta().isHaveLocalCopy()) return; - JObjectData res; - try { - res = resolveDataLocal(); - } catch (Exception e) { - Log.error("Object " + _metaPart.getName() + " data couldn't be read but it should exist locally!", e); - return; - } - - if (_metaPart.getSavedRefs() != null && !_metaPart.getSavedRefs().isEmpty()) - throw new IllegalStateException("Object " + _metaPart.getName() + " has non-hydrated refs when written locally"); - - _metaPart.narrowClass(res.getClass()); - if (_dataPart.compareAndSet(null, (T) res)) - onResolution(); - } // _dataPart.get() == null - } finally { - rUnlock(); - } // try - } // _dataPart.get() == null - } - - @Override - public void externalResolution(JObjectData data) { - assertRwLock(); - if (Log.isTraceEnabled()) - Log.trace("External resolution of " + getMeta().getName()); - if (_dataPart.get() != null) - throw new IllegalStateException("Data is not null when recording external resolution of " + getMeta().getName()); - if (!data.getClass().isAnnotationPresent(PushResolution.class)) - throw new IllegalStateException("Expected external resolution only for classes with pushResolution " + getMeta().getName()); - _metaPart.narrowClass(data.getClass()); - _dataPart.set((T) data); - _metaPart.setHaveLocalCopy(true); - hydrateRefs(); - } - - public boolean tryRLock() { - try { - if (!_lock.readLock().tryLock(lockTimeoutSecs, TimeUnit.SECONDS)) - return false; - if (_metaPart.isDeleted()) { - _lock.readLock().unlock(); - throw new DeletedObjectAccessException(); - } - return true; - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - - boolean tryRwLockImpl(boolean block, boolean txCopy) { - try { - if (block) { - if (!_lock.writeLock().tryLock(lockTimeoutSecs, TimeUnit.SECONDS)) - return false; - } else { - if (!_lock.writeLock().tryLock()) - return false; - } - try { - // TODO: Fix putImpl -// if (_metaPart.isDeleted()) -// throw new DeletedObjectAccessException(); - - if (_lock.writeLock().getHoldCount() == 1) { - jObjectTxManager.addToTx(this, txCopy); - } - } catch (Throwable t) { - _lock.writeLock().unlock(); - throw t; - } - return true; - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - - @Override - public void rwLock() { - if (!tryRwLockImpl(true, true)) - throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("Failed to acquire write lock for " + getMeta().getName())); - } - - @Override - public boolean tryRwLock() { - return tryRwLockImpl(false, true); - } - - @Override - public void rwLockNoCopy() { - if (!tryRwLockImpl(true, false)) - throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("Failed to acquire write lock for " + getMeta().getName())); - } - - public void rLock() { - if (!tryRLock()) - throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("Failed to acquire read lock for " + getMeta().getName())); - } - - public void rUnlock() { - _lock.readLock().unlock(); - } - - protected void forceInvalidate() { - assertRwLock(); - jObjectTxManager.forceInvalidate(this); - } - - public void rwUnlock() { - int hc = _lock.writeLock().getHoldCount(); - - _lock.writeLock().unlock(); - - // FIXME: this relies on the transaction running - if (hc == 2) { - updateDeletionState(); - } - } - - @Override - public void drop() { - if (_lock.writeLock().getHoldCount() < 2) { - throw new IllegalStateException("Expected for object to be locked and in transaction"); - } - _lock.writeLock().unlock(); - jObjectTxManager.drop(this); - } - - public boolean haveRwLock() { - return _lock.isWriteLockedByCurrentThread(); - } - - @Override - public void assertRwLock() { - if (!haveRwLock()) - throw new IllegalStateException("Expected to be write-locked there: " + getMeta().getName() + " " + Thread.currentThread().getName()); - } - - @Override - public R runReadLocked(ResolutionStrategy resolutionStrategy, ObjectFnRead fn) { - tryResolve(resolutionStrategy); - - rLock(); - try { - return fn.apply(_metaPart, _dataPart.get()); - } finally { - rUnlock(); - } - } - - protected boolean isResolved() { - return _dataPart.get() != null; - } - - @Override - public R runWriteLocked(ResolutionStrategy resolutionStrategy, ObjectFnWrite fn) { - rwLock(); - try { - tryResolve(resolutionStrategy); - VoidFn invalidateFn = () -> { - tryLocalResolve(); - backupRefs(); - _dataPart.set(null); - removeLocal(_metaPart.getName()); - }; - return fn.apply(_metaPart, _dataPart.get(), this::bumpVer, invalidateFn); - } finally { - rwUnlock(); - } - } - - @Override - public void mutate(JMutator mutator) { - assertRwLock(); - - if (getData() == null) throw new IllegalStateException("Resolve before mutate!"); - - if (mutator.mutate(getData())) { - bumpVer(); - jObjectTxManager.addMutator(this, mutator); - } - } - - public boolean tryResolve(ResolutionStrategy resolutionStrategy) { - if (resolutionStrategy == ResolutionStrategy.LOCAL_ONLY || - resolutionStrategy == ResolutionStrategy.REMOTE) - tryLocalResolve(); - if (resolutionStrategy == ResolutionStrategy.REMOTE) tryRemoteResolve(); - - return _dataPart.get() != null; - } - - @Override - public void doDelete() { - assertRwLock(); - getMeta().markDeleted(); - _dataPart.set(null); - _metaPart.setHaveLocalCopy(false); - _metaPart.setSavedRefs(new HashSet<>()); - } - - public void backupRefs() { - assertRwLock(); - if (getData() != null) { - if ((getMeta().getSavedRefs() != null) && (!getMeta().getSavedRefs().isEmpty())) { - Log.error("Saved refs not empty for " + getMeta().getName() + " will clean"); - getMeta().setSavedRefs(null); - } - getMeta().setSavedRefs(new LinkedHashSet<>(getData().extractRefs())); - } - } - - public void hydrateRefs() { - assertRwLock(); - if (getMeta().getSavedRefs() != null) { - StringBuilder sb = new StringBuilder(); - sb.append("Hydrating refs for ").append(getMeta().getName()).append("\n"); - sb.append("Saved refs: "); - getMeta().getSavedRefs().forEach(r -> sb.append(r).append(" ")); - sb.append("\nExtracted refs: "); - var extracted = new LinkedHashSet<>(getData().extractRefs()); - extracted.forEach(r -> sb.append(r).append(" ")); - Log.debug(sb.toString()); - for (var r : getMeta().getSavedRefs()) { - if (!extracted.contains(r)) - get(r).ifPresent(ro -> ro.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, i) -> { - m.removeRef(getMeta().getName()); - return null; - })); - } - for (var r : extracted) { - if (!getMeta().getSavedRefs().contains(r)) { - Log.trace("Hydrating ref " + r + " for " + getMeta().getName()); - getOrPut(r, getData().getRefType(), Optional.of(getMeta().getName())); - } - } - getMeta().setSavedRefs(null); - } - } - - @Override - public boolean updateDeletionState() { - assertRwLock(); - - if (!getMeta().isDeletionCandidate() && getMeta().isDeleted()) { - getMeta().undelete(); - Log.debug("Undelete: " + getMeta().getName()); - - Stream refs = Stream.empty(); - - if (getMeta().getSavedRefs() != null) - refs = getMeta().getSavedRefs().stream(); - if (getData() != null) - refs = Stream.concat(refs, getData().extractRefs().stream()); - - refs.forEach(r -> { - Log.trace("Hydrating ref after undelete " + r + " for " + getMeta().getName()); - getOrPut(r, getData() != null ? getData().getRefType() : JObjectData.class, Optional.of(getMeta().getName())); - }); - - } - - if (getMeta().isDeletionCandidate() && !getMeta().isDeleted()) { - if (!getMeta().isSeen()) - tryQuickDelete(); - else - jObjectRefProcessor.putDeletionCandidate(getMeta().getName()); - return true; - } - return false; - } - - private void quickDeleteRef(String name) { - var got = get(name).orElse(null); - if (got == null) return; - if (got.tryRwLock()) { - try { - got.getMeta().removeRef(getMeta().getName()); - } finally { - got.rwUnlock(); - } - } else { - jObjectRefProcessor.putQuickDeletionCandidate(softJObjectFactory.create(JObjectData.class, this), softJObjectFactory.create(JObjectData.class, got)); - } - } - - private void tryQuickDelete() { - assertRwLock(); - if (!getMeta().getKnownClass().isAnnotationPresent(Leaf.class)) - tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - - if (Log.isTraceEnabled()) - Log.trace("Quick delete of: " + getMeta().getName()); - - Collection extracted = null; - if (!getMeta().getKnownClass().isAnnotationPresent(Leaf.class) && getData() != null) - extracted = getData().extractRefs(); - Collection saved = getMeta().getSavedRefs(); - - doDelete(); - - if (saved != null) - for (var r : saved) quickDeleteRef(r); - if (extracted != null) - for (var r : extracted) quickDeleteRef(r); - } - - public T resolveDataLocal() { - // jObject.assertRwLock(); - // FIXME: No way to assert read lock? - return (T) dataProtoSerializer.deserialize(objectPersistentStore.readObject(getMeta().getName())); - } - - public T resolveDataRemote() { - var obj = remoteObjectServiceClient.getObject(this); - invalidationQueueService.pushInvalidationToAll(this); - return (T) dataProtoSerializer.deserialize(obj); - } - - // Really more like "onUpdateSize" - // Also not called from tryResolveRemote/externalResolution because - // there it's handled by the notifyWrite - public void onResolution() { - jObjectLRU.updateSize(this); - } - - public void removeLocal(String name) { - assertRwLock(); - try { - Log.debug("Invalidating " + name); - getMeta().setHaveLocalCopy(false); - } catch (StatusRuntimeException sx) { - if (sx.getStatus() != Status.NOT_FOUND) - Log.info("Couldn't delete object from persistent store: ", sx); - } catch (Exception e) { - Log.info("Couldn't delete object from persistent store: ", e); - } - } - - @Override - public void bumpVer() { - assertRwLock(); - getMeta().bumpVersion(persistentPeerDataService.getSelfUuid()); - } - - @Override - public void commitFence() { - if (haveRwLock()) - throw new IllegalStateException("Waiting on object flush inside transaction?"); - if (getMeta().getLastModifiedTx() == -1) return; - txWriteback.fence(getMeta().getLastModifiedTx()); - } - - @Override - public void commitFenceAsync(VoidFn callback) { - if (haveRwLock()) - throw new IllegalStateException("Waiting on object flush inside transaction?"); - if (getMeta().getLastModifiedTx() == -1) { - callback.apply(); - return; - } - txWriteback.asyncFence(getMeta().getLastModifiedTx(), callback); - } - - @Override - public int estimateSize() { - if (_dataPart.get() == null) return 1024; // Assume metadata etc takes up something - else return _dataPart.get().estimateSize() + 1024; - } - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectRefProcessor.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectRefProcessor.java deleted file mode 100644 index 5de25357..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectRefProcessor.java +++ /dev/null @@ -1,282 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; -import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; -import com.usatiuk.dhfs.objects.repository.autosync.AutoSyncProcessor; -import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue; -import io.quarkus.logging.Log; -import io.quarkus.runtime.ShutdownEvent; -import io.quarkus.runtime.StartupEvent; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import jakarta.inject.Inject; -import org.apache.commons.lang3.concurrent.BasicThreadFactory; -import org.apache.commons.lang3.tuple.Pair; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.io.IOException; -import java.util.*; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; - -@ApplicationScoped -public class JObjectRefProcessor { - private final HashSetDelayedBlockingQueue, SoftJObject>> _quickCandidates = new HashSetDelayedBlockingQueue<>(0); - private final HashSetDelayedBlockingQueue _candidates; - private final HashSetDelayedBlockingQueue _canDeleteRetries; - private final HashSet _movablesInProcessing = new HashSet<>(); - @Inject - JObjectManager jObjectManager; - @Inject - PersistentPeerDataService persistentPeerDataService; - @Inject - RemoteObjectServiceClient remoteObjectServiceClient; - @Inject - AutoSyncProcessor autoSyncProcessor; - @Inject - JObjectTxManager jObjectTxManager; - @ConfigProperty(name = "dhfs.objects.move-processor.threads") - int moveProcessorThreads; - @ConfigProperty(name = "dhfs.objects.ref-processor.threads") - int refProcessorThreads; - @ConfigProperty(name = "dhfs.objects.deletion.can-delete-retry-delay") - long canDeleteRetryDelay; - @Inject - ExecutorService executorService; - - private ExecutorService _movableProcessorExecutorService; - private ExecutorService _refProcessorExecutorService; - - public JObjectRefProcessor(@ConfigProperty(name = "dhfs.objects.deletion.delay") long deletionDelay, - @ConfigProperty(name = "dhfs.objects.deletion.can-delete-retry-delay") long canDeleteRetryDelay) { - _candidates = new HashSetDelayedBlockingQueue<>(deletionDelay); - _canDeleteRetries = new HashSetDelayedBlockingQueue<>(canDeleteRetryDelay); - } - - void init(@Observes @Priority(200) StartupEvent event) throws IOException { - BasicThreadFactory factory = new BasicThreadFactory.Builder() - .namingPattern("move-proc-%d") - .build(); - _movableProcessorExecutorService = Executors.newFixedThreadPool(moveProcessorThreads, factory); - - BasicThreadFactory factoryRef = new BasicThreadFactory.Builder() - .namingPattern("ref-proc-%d") - .build(); - _refProcessorExecutorService = Executors.newFixedThreadPool(refProcessorThreads, factoryRef); - for (int i = 0; i < refProcessorThreads; i++) { - _refProcessorExecutorService.submit(this::refProcessor); - } - - // Continue GC from last shutdown - //FIXME -// executorService.submit(() -> -// jObjectManager.findAll().forEach(n -> { -// jObjectManager.get(n).ifPresent(o -> o.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> { -// return null; -// })); -// })); - } - - void shutdown(@Observes @Priority(800) ShutdownEvent event) throws InterruptedException { - _refProcessorExecutorService.shutdownNow(); - if (!_refProcessorExecutorService.awaitTermination(30, TimeUnit.SECONDS)) { - Log.error("Refcounting threads didn't exit in 30 seconds"); - } - } - - public void putQuickDeletionCandidate(SoftJObject from, SoftJObject obj) { - _quickCandidates.add(Pair.of(from, obj)); - } - - public void putDeletionCandidate(String name) { - synchronized (_movablesInProcessing) { - if (_movablesInProcessing.contains(name)) return; - if (_candidates.add(name)) - Log.debug("Deletion candidate: " + name); - } - } - - private void asyncProcessMovable(String objName) { - synchronized (_movablesInProcessing) { - if (_movablesInProcessing.contains(objName)) return; - _movablesInProcessing.add(objName); - } - - _movableProcessorExecutorService.submit(() -> { - var obj = jObjectManager.get(objName).orElse(null); - if (obj == null || obj.getMeta().isDeleted()) return; - boolean delay = false; - try { - var knownHosts = persistentPeerDataService.getHostUuids(); - List missing = new ArrayList<>(); - - var ourReferrers = obj.runReadLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d) -> { - for (var x : knownHosts) - if (!m.getConfirmedDeletes().contains(x)) missing.add(x); - return m.getReferrers(); - }); - var ret = remoteObjectServiceClient.canDelete(missing, obj.getMeta().getName(), ourReferrers); - - long ok = 0; - - for (var r : ret) { - if (!r.getDeletionCandidate()) - for (var rr : r.getReferrersList()) - autoSyncProcessor.add(rr); - else - ok++; - } - - if (ok != missing.size()) { - Log.debug("Delaying deletion check of " + obj.getMeta().getName()); - delay = true; - } - - jObjectTxManager.executeTx(() -> { - obj.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, i) -> { - for (var r : ret) - if (r.getDeletionCandidate()) - m.getConfirmedDeletes().add(UUID.fromString(r.getSelfUuid())); - return null; - }); - }); - } catch (Exception e) { - Log.warn("When processing deletion of movable object " + obj.getMeta().getName(), e); - } finally { - synchronized (_movablesInProcessing) { - _movablesInProcessing.remove(obj.getMeta().getName()); - if (!delay) - _candidates.add(obj.getMeta().getName()); - else - _canDeleteRetries.add(obj.getMeta().getName()); - } - } - }); - } - - private boolean processMovable(JObject obj) { - obj.assertRwLock(); - var knownHosts = persistentPeerDataService.getHostUuids(); - boolean missing = false; - for (var x : knownHosts) - if (!obj.getMeta().getConfirmedDeletes().contains(x)) { - missing = true; - break; - } - - if (!missing) return true; - asyncProcessMovable(obj.getMeta().getName()); - return false; - } - - private void deleteRef(JObject self, String name) { - jObjectManager.get(name).ifPresent(ref -> ref.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (mc, dc, bc, ic) -> { - mc.removeRef(self.getMeta().getName()); - return null; - })); - } - - private void refProcessor() { - try { - while (!Thread.interrupted()) { - String next = null; - Pair, SoftJObject> nextQuick = null; - - while (next == null && nextQuick == null) { - nextQuick = _quickCandidates.tryGet(); - - if (nextQuick != null) break; - - next = _canDeleteRetries.tryGet(); - if (next == null) - next = _candidates.tryGet(); - if (next == null) - nextQuick = _quickCandidates.get(canDeleteRetryDelay); - } - - JObject target; - - if (nextQuick != null) { - var fromSoft = nextQuick.getLeft(); - var toSoft = nextQuick.getRight(); - - var from = nextQuick.getLeft().get(); - var to = nextQuick.getRight().get(); - - if (from != null && !from.getMeta().isDeleted()) { - Log.warn("Quick delete failed for " + from.getMeta().getName() + " -> " + toSoft.getName()); - continue; - } - - if (to == null) { - Log.warn("Quick delete object missing: " + toSoft.getName()); - continue; - } - - target = to; - - jObjectTxManager.executeTx(() -> { - if (from != null) - from.rwLock(); - try { - try { - to.rwLock(); - to.getMeta().removeRef(fromSoft.getName()); - } finally { - to.rwUnlock(); - } - } finally { - if (from != null) - from.rwUnlock(); - } - }); - } else { - target = jObjectManager.get(next).orElse(null); - } - - if (target == null) continue; - try { - jObjectTxManager.executeTx(() -> { - target.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, v, i) -> { - if (m.isFrozen()) return null; - if (m.isDeleted()) return null; - if (!m.isDeletionCandidate()) return null; - if (m.isSeen() && !m.isOnlyLocal()) { - if (!processMovable(target)) - return null; - } - if (m.isSeen() && m.isOnlyLocal()) - Log.warn("Seen only-local object: " + m.getName()); - - - if (!target.getMeta().getKnownClass().isAnnotationPresent(Leaf.class)) - target.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - - Log.debug("Deleting " + m.getName()); - - Collection extracted = null; - if (!target.getMeta().getKnownClass().isAnnotationPresent(Leaf.class) && target.getData() != null) - extracted = target.getData().extractRefs(); - Collection saved = target.getMeta().getSavedRefs(); - - target.doDelete(); - - if (saved != null) - for (var r : saved) deleteRef(target, r); - if (extracted != null) - for (var r : extracted) deleteRef(target, r); - - return null; - }); - }); - } catch (Exception ex) { - Log.error("Error when deleting: " + next, ex); - } - } - } catch (InterruptedException ignored) { - } - Log.info("JObject Refcounter thread exiting"); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectSnapshot.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectSnapshot.java deleted file mode 100644 index 8bf62ef0..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectSnapshot.java +++ /dev/null @@ -1,10 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -import com.usatiuk.dhfs.objects.persistence.JObjectDataP; -import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP; - -public record JObjectSnapshot - (ObjectMetadataP meta, - JObjectDataP data, - int changelogHash) { -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectTxManager.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectTxManager.java deleted file mode 100644 index 3634f3a2..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectTxManager.java +++ /dev/null @@ -1,397 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.persistence.JObjectDataP; -import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP; -import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; -import com.usatiuk.dhfs.utils.VoidFn; -import io.quarkus.logging.Log; -import jakarta.annotation.Nullable; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.inject.Inject; -import org.apache.commons.lang3.concurrent.BasicThreadFactory; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.util.*; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.atomic.AtomicLong; -import java.util.function.Consumer; -import java.util.function.Supplier; - -@ApplicationScoped -public class JObjectTxManager { - private final ThreadLocal _state = new ThreadLocal<>(); - private final ExecutorService _serializerThreads; - private final AtomicLong _transientTxId = new AtomicLong(); - @Inject - ProtoSerializer dataProtoSerializer; - @Inject - ProtoSerializer metaProtoSerializer; - @Inject - JObjectLRU jObjectLRU; - @Inject - JObjectManager jObjectManager; - @Inject - InvalidationQueueService invalidationQueueService; - @Inject - TxWriteback txWriteback; - @ConfigProperty(name = "dhfs.objects.ref_verification") - boolean refVerification; - - public JObjectTxManager() { - BasicThreadFactory factory = new BasicThreadFactory.Builder() - .namingPattern("tx-serializer-%d") - .build(); - - _serializerThreads = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(), factory); - } - - public void begin() { - if (_state.get() != null) - throw new IllegalStateException("Transaction already running"); - - _state.set(new TxState()); - } - - public void drop(JObject obj) { - var state = _state.get(); - if (state == null) - throw new IllegalStateException("Transaction not running"); - Log.debug("Dropping " + obj.getMeta().getName() + " from " + state._id); - obj.assertRwLock(); - state._writeObjects.remove(obj); - obj.rwUnlock(); - } - - // Returns Id of bundle to wait for, or -1 if there was nothing written - public long commit() { - var state = _state.get(); - if (state == null) - throw new IllegalStateException("Transaction not running"); - - if (state._writeObjects.isEmpty()) { -// Log.trace("Empty transaction " + state._id); - state._callbacks.forEach(c -> c.accept(null)); - _state.remove(); - return -1; - } - - Log.debug("Committing transaction " + state._id); - - for (var obj : state._writeObjects.entrySet()) { - Log.debug("Committing " + obj.getKey().getMeta().getName() + " deleted=" + obj.getKey().getMeta().isDeleted() + " deletion-candidate=" + obj.getKey().getMeta().isDeletionCandidate()); - - var dataDiff = obj.getKey().getMeta().changelogHash() != obj.getValue().snapshot.changelogHash() - || obj.getValue()._forceInvalidated; - - if (refVerification) { - // Null check in case of object not being resolved before (though then we can't check this) - boolean dataDiffReal = obj.getValue().snapshot.data() != null - && !Objects.equals(obj.getValue().snapshot.data(), obj.getKey().getData() == null ? null : dataProtoSerializer.serialize(obj.getKey().getData())); - - if (dataDiffReal && !dataDiff) { - var msg = "Data diff not equal for " + obj.getKey().getMeta().getName() + " " + obj.getKey().getData() + " before = " + ((obj.getValue().snapshot != null) ? obj.getValue().snapshot.data() : null) + " after = " + ((obj.getKey().getData() != null) ? dataProtoSerializer.serialize(obj.getKey().getData()) : null); - throw new IllegalStateException(msg); - } - if (dataDiff && !dataDiffReal) - Log.warn("Useless update for " + obj.getKey().getMeta().getName()); - } - -// if (obj.getValue()._copy && !obj.getValue()._mutators.isEmpty()) -// throw new IllegalStateException("Object copied but had mutators!"); - - if (refVerification && !obj.getValue()._copy) { - var cur = dataProtoSerializer.serialize(obj.getKey().getData()); - for (var mut : obj.getValue()._mutators.reversed()) - revertMutator(obj.getKey(), mut); - var rev = dataProtoSerializer.serialize(obj.getKey().getData()); - - if (obj.getValue().snapshot.data() != null && !Objects.equals(rev, obj.getValue().snapshot.data())) - throw new IllegalStateException("Mutator could not be reverted for object " + obj.getKey().getMeta().getName() + "\n old = " + obj.getValue().snapshot.data() + "\n reverted = " + rev + "\n"); - - for (var mut : obj.getValue()._mutators) - applyMutator(obj.getKey(), mut); - - var cur2 = dataProtoSerializer.serialize(obj.getKey().getData()); - if (!Objects.equals(cur, cur2)) - throw new IllegalStateException("Mutator could not be reapplied for object " + obj.getKey().getMeta().getName() + "\n old = " + cur + "\n reapplied = " + cur2 + "\n"); - } - - obj.getValue()._metaSerialized = metaProtoSerializer.serialize(obj.getKey().getMeta()); - obj.getValue()._metaChanged = !Objects.equals(obj.getValue().snapshot.meta(), obj.getValue()._metaSerialized); - obj.getValue()._dataChanged = dataDiff; - - notifyWrite(obj.getKey(), obj.getValue()._metaChanged, dataDiff); - - if (refVerification) { - var oldRefs = obj.getValue().snapshot.data() == null - ? null - : ((JObjectData) dataProtoSerializer.deserialize(obj.getValue().snapshot.data())).extractRefs(); - verifyRefs(obj.getKey(), oldRefs); - } - } - - var bundle = txWriteback.createBundle(); - - try { - for (var e : state._writeObjects.entrySet()) { - var key = e.getKey(); - var value = e.getValue(); - if (key.getMeta().isDeleted()) { - bundle.delete(key); - continue; - } - - if (!value._dataChanged && !value._metaChanged) { - continue; - } - - if (key.getMeta().isHaveLocalCopy() && value._dataChanged) { - bundle.commit(key, - value._metaSerialized, - dataProtoSerializer.serialize(key.getData()) - ); - } else if (key.getMeta().isHaveLocalCopy() && !value._dataChanged) { - bundle.commitMetaChange(key, value._metaSerialized); - } else if (!key.getMeta().isHaveLocalCopy()) { - bundle.commit(key, value._metaSerialized, null); - } else { - throw new IllegalStateException("Unexpected object flush combination"); - } - } - } catch (Exception ex) { - Log.error("Error creating tx bundle ", ex); - txWriteback.dropBundle(bundle); - throw ex; - } - - for (var e : state._writeObjects.entrySet()) - e.getKey().getMeta().setLastModifiedTx(bundle.getId()); - - state._writeObjects.forEach((key, value) -> key.rwUnlock()); - - state._callbacks.forEach(s -> txWriteback.asyncFence(bundle.getId(), () -> s.accept(null))); - - txWriteback.commitBundle(bundle); - - _state.remove(); - - return bundle.getId(); - } - - private void notifyWrite(JObject obj, boolean metaChanged, boolean hasDataChanged) { - jObjectLRU.updateSize(obj); - jObjectManager.runWriteListeners(obj, metaChanged, hasDataChanged); - if (hasDataChanged && obj.getMeta().isHaveLocalCopy()) { - invalidationQueueService.pushInvalidationToAll(obj); - } - } - - private void verifyRefs(JObject obj, @Nullable Collection oldRefs) { - if (!refVerification) return; - - if (obj.getData() == null) return; - if (obj.getMeta().isDeleted()) return; - var newRefs = obj.getData().extractRefs(); - if (oldRefs != null) - for (var o : oldRefs) - if (!newRefs.contains(o)) { - jObjectManager.get(o).ifPresent(refObj -> { - if (refObj.getMeta().checkRef(obj.getMeta().getName())) - throw new IllegalStateException("Object " + o + " is referenced from " + obj.getMeta().getName() + " but shouldn't be"); - }); - } - for (var r : newRefs) { - var refObj = jObjectManager.get(r).orElseThrow(() -> new IllegalStateException("Object " + r + " not found but should be referenced from " + obj.getMeta().getName())); - if (refObj.getMeta().isDeleted()) - throw new IllegalStateException("Object " + r + " deleted but referenced from " + obj.getMeta().getName()); - if (!refObj.getMeta().checkRef(obj.getMeta().getName())) - throw new IllegalStateException("Object " + r + " is not referenced by " + obj.getMeta().getName() + " but should be"); - } - } - - private void applyMutator(JObject obj, JMutator mutator) { - mutator.mutate((T) obj.getData()); - } - - private void revertMutator(JObject obj, JMutator mutator) { - mutator.revert((T) obj.getData()); - } - - public void rollback(String message) { - var state = _state.get(); - if (state == null) - throw new IllegalStateException("Transaction not running"); - Log.debug("Rollback of " + state._id); - - for (var obj : state._writeObjects.entrySet()) { - Log.debug("Rollback of " + obj.getKey().getMeta().getName()); - try { - if (obj.getValue()._copy) { - obj.getKey().rollback( - metaProtoSerializer.deserialize(obj.getValue().snapshot.meta()), - obj.getValue().snapshot.data() != null ? dataProtoSerializer.deserialize(obj.getValue().snapshot.data()) : null); - } else { - for (var mut : obj.getValue()._mutators.reversed()) - revertMutator(obj.getKey(), mut); - obj.getKey().rollback(metaProtoSerializer.deserialize(obj.getValue().snapshot.meta()), obj.getKey().getData()); - } - obj.getKey().updateDeletionState(); - } finally { - obj.getKey().rwUnlock(); - } - } - - state._callbacks.forEach(c -> c.accept(message != null ? message : "Unknown error")); - Log.debug("Rollback of " + state._id + " done"); - _state.remove(); - } - - public void executeTxAndFlushAsync(VoidFn fn, Consumer callback) { - var state = _state.get(); - if (state != null) { - _state.get()._callbacks.add(callback); - fn.apply(); - return; - } - - begin(); - try { - _state.get()._callbacks.add(callback); - fn.apply(); - commit(); - } catch (Exception e) { - Log.debug("Error in transaction " + _state.get()._id, e); - rollback(e.getMessage()); - throw e; - } - } - - public void executeTxAndFlush(VoidFn fn) { - executeTxAndFlush(() -> { - fn.apply(); - return null; - }); - } - - public T executeTxAndFlush(Supplier fn) { - if (_state.get() != null) { - throw new IllegalStateException("Can't wait for transaction to flush from non-top-level tx"); - } - - begin(); - try { - var ret = fn.get(); - var bundleId = commit(); - if (bundleId != -1) - txWriteback.fence(bundleId); - return ret; - } catch (Exception e) { - Log.debug("Error in transaction " + _state.get()._id, e); - rollback(e.getMessage()); - throw e; - } - } - - public void executeTx(VoidFn fn) { - executeTx(() -> { - fn.apply(); - return null; - }); - } - - public T executeTx(Supplier fn) { - if (_state.get() != null) { - return fn.get(); - } - - begin(); - try { - var ret = fn.get(); - commit(); - return ret; - } catch (Exception e) { - Log.debug("Error in transaction " + _state.get()._id, e); - rollback(e.getMessage()); - throw e; - } - } - - public void forceInvalidate(JObject obj) { - var state = _state.get(); - - if (state == null) - throw new IllegalStateException("Transaction not running"); - - obj.assertRwLock(); - - var got = state._writeObjects.get(obj); - if (got != null) - got._forceInvalidated = true; - } - - void addToTx(JObject obj, boolean copy) { - var state = _state.get(); - - if (state == null) - throw new IllegalStateException("Transaction not running"); - - Log.debug("Adding " + obj.getMeta().getName() + " to transaction " + state._id); - - obj.assertRwLock(); - obj.rwLock(); - - var snapshot = copy - ? new JObjectSnapshot( - metaProtoSerializer.serialize(obj.getMeta()), - (obj.getData() == null) ? null : dataProtoSerializer.serialize(obj.getData()), - obj.getMeta().changelogHash()) - : new JObjectSnapshot( - metaProtoSerializer.serialize(obj.getMeta()), (!refVerification || (obj.getData() == null)) ? null : dataProtoSerializer.serialize(obj.getData()), - obj.getMeta().changelogHash()); - - state._writeObjects.put(obj, new TxState.TxObjectState(snapshot, copy)); - } - - void addMutator(JObject obj, JMutator mut) { - var state = _state.get(); - - if (state == null) - throw new IllegalStateException("Transaction not running"); - - obj.assertRwLock(); - - //TODO: Asserts for rwLock/rwLockNoCopy? - - var got = state._writeObjects.get(obj); - if (got == null) - throw new IllegalStateException("Object not in transaction"); - if (got._copy) { - Log.trace("Ignoring mutator for copied object: " + obj.getMeta().getName()); - return; - } - got._mutators.addLast(mut); - } - - private class TxState { - private final long _id = _transientTxId.incrementAndGet(); - private final HashMap, TxObjectState> _writeObjects = new HashMap<>(); - private final ArrayList> _callbacks = new ArrayList<>(); - - private static class TxObjectState { - final JObjectSnapshot snapshot; - final List> _mutators = new LinkedList<>(); - final boolean _copy; - boolean _forceInvalidated = false; - ObjectMetadataP _metaSerialized; // Filled in when committing - boolean _metaChanged = false; // Filled in when committing - boolean _dataChanged = false; // Filled in when committing - - private TxObjectState(JObjectSnapshot snapshot, boolean copy) { - this.snapshot = snapshot; - _copy = copy; - } - } - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/Leaf.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/Leaf.java deleted file mode 100644 index a6241a56..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/Leaf.java +++ /dev/null @@ -1,12 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -// Indicates the object never has references -@Retention(RetentionPolicy.RUNTIME) -@Target(ElementType.TYPE) -public @interface Leaf { -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadata.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadata.java deleted file mode 100644 index 8441f6e8..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadata.java +++ /dev/null @@ -1,196 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -import com.usatiuk.dhfs.objects.persistence.JObjectDataP; -import com.usatiuk.dhfs.objects.repository.ObjectChangelog; -import com.usatiuk.dhfs.objects.repository.ObjectChangelogEntry; -import com.usatiuk.dhfs.objects.repository.ObjectHeader; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import io.quarkus.logging.Log; -import lombok.Getter; -import lombok.Setter; - -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.Serial; -import java.util.*; -import java.util.concurrent.atomic.AtomicReference; - -public class ObjectMetadata { - @Getter - private final String _name; - @Getter - private final Map _remoteCopies = new LinkedHashMap<>(); - private final AtomicReference> _knownClass = new AtomicReference<>(); - @Getter - private final HashSet _confirmedDeletes = new LinkedHashSet<>(); - private final Set _referrers = new LinkedHashSet<>(); - @Getter - private volatile boolean _seen = false; - @Getter - private volatile boolean _deleted = false; - @Getter - @Setter - private Map _changelog = new LinkedHashMap<>(4); - @Getter - @Setter - private Set _savedRefs = Collections.emptySet(); - @Getter - private boolean _frozen = false; - @Getter - @Setter - private volatile boolean _haveLocalCopy = false; - @Getter - private transient volatile boolean _written = true; - @Getter - @Setter - private long _lastModifiedTx = -1; // -1 if it's already on disk - - public ObjectMetadata(String name, boolean written, Class knownClass) { - _name = name; - _written = written; - _knownClass.set(knownClass); - } - - public Class getKnownClass() { - return _knownClass.get(); - } - - protected void narrowClass(Class klass) { - Class got = null; - do { - got = _knownClass.get(); - if (got.equals(klass)) return; - if (klass.isAssignableFrom(got)) return; - if (!got.isAssignableFrom(klass)) - throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("Could not narrow class of object " + getName() + " from " + got + " to " + klass)); - } while (!_knownClass.compareAndSet(got, klass)); - } - - @Serial - private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { - in.defaultReadObject(); - _written = true; - } - - public void markSeen() { - Log.trace("Marking seen: " + getName()); - _seen = true; - } - - public void markDeleted() { - _deleted = true; - } - - public void undelete() { - _confirmedDeletes.clear(); - _deleted = false; - } - - public void markWritten() { - _written = true; - } - - // FIXME:? a better way? - public void markUnWritten() { - _written = false; - } - - public boolean isReferred() { - return !_referrers.isEmpty(); - } - - public void freeze() { - if (_frozen) throw new IllegalArgumentException("Already frozen"); - _confirmedDeletes.clear(); - Log.trace("Freezing " + getName()); - _frozen = true; - } - - public void unfreeze() { - if (!_frozen) throw new IllegalArgumentException("Already unfrozen"); - Log.trace("Unfreezing " + getName()); - _frozen = false; - } - - public boolean checkRef(String from) { - return _referrers.contains(from); - } - - public void addRef(String from) { - if (from.equals(getName())) - throw new IllegalArgumentException("Trying to make object refer to itself: " + getName()); - _confirmedDeletes.clear(); - _referrers.add(from); - if (Log.isTraceEnabled()) - Log.trace("Adding ref " + from + " to " + getName()); - } - - public void removeRef(String from) { - if (Log.isTraceEnabled()) - Log.trace("Removing ref " + from + " from " + getName()); - _referrers.remove(from); - } - - public Collection getReferrers() { - return _referrers.stream().toList(); - } - - public Collection getReferrersMutable() { - return _referrers; - } - - public boolean isDeletionCandidate() { - return !isFrozen() && !isReferred(); - } - - public Long getOurVersion() { - return _changelog.values().stream().reduce(0L, Long::sum); - } - - public Long getBestVersion() { - if (_remoteCopies.isEmpty()) return getOurVersion(); - return Math.max(getOurVersion(), _remoteCopies.values().stream().max(Long::compareTo).get()); - } - - public void bumpVersion(UUID selfUuid) { - _changelog.merge(selfUuid, 1L, Long::sum); - } - - public ObjectChangelog toRpcChangelog() { - var changelogBuilder = ObjectChangelog.newBuilder(); - - for (var h : _changelog.entrySet()) { - if (h.getValue() == 0) continue; - var logEntry = ObjectChangelogEntry.newBuilder(); - logEntry.setHost(h.getKey().toString()); - logEntry.setVersion(h.getValue()); - changelogBuilder.addEntries(logEntry.build()); - } - return changelogBuilder.build(); - } - - public ObjectHeader toRpcHeader() { - return toRpcHeader(null); - } - - public ObjectHeader toRpcHeader(JObjectDataP data) { - var headerBuilder = ObjectHeader.newBuilder().setName(getName()); - headerBuilder.setChangelog(toRpcChangelog()); - - if (data != null) - headerBuilder.setPushedData(data); - - return headerBuilder.build(); - } - - public int changelogHash() { - int res = Objects.hashCode(_changelog); - res = 31 * res + Objects.hashCode(_haveLocalCopy); - return res; - } - - public boolean isOnlyLocal() { - return getKnownClass().isAnnotationPresent(OnlyLocal.class); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadataSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadataSerializer.java deleted file mode 100644 index 4d9b6339..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/ObjectMetadataSerializer.java +++ /dev/null @@ -1,60 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import jakarta.inject.Singleton; - -import java.util.Collections; -import java.util.LinkedHashSet; -import java.util.Map; -import java.util.UUID; -import java.util.stream.Collectors; - -@Singleton -public class ObjectMetadataSerializer implements ProtoSerializer { - @Override - public ObjectMetadataP serialize(ObjectMetadata object) { - return ObjectMetadataP.newBuilder() - .setName(object.getName()) - .putAllRemoteCopies(object.getRemoteCopies().entrySet().stream().collect(Collectors.toMap(e -> e.getKey().toString(), Map.Entry::getValue))) - .setKnownClass(object.getKnownClass().getName()) - .setSeen(object.isSeen()) - .setDeleted(object.isDeleted()) - .addAllConfirmedDeletes(() -> object.getConfirmedDeletes().stream().map(e -> e.toString()).iterator()) - .addAllReferrers(object.getReferrers()) - .putAllChangelog(object.getChangelog().entrySet().stream().collect(Collectors.toMap(e -> e.getKey().toString(), Map.Entry::getValue))) - .addAllSavedRefs(object.getSavedRefs() != null ? object.getSavedRefs() : Collections.emptyList()) - .setFrozen(object.isFrozen()) - .setHaveLocalCopy(object.isHaveLocalCopy()) - .build(); - } - - @Override - public ObjectMetadata deserialize(ObjectMetadataP message) { - try { - var obj = new ObjectMetadata(message.getName(), true, - (Class) Class.forName(message.getKnownClass(), true, ObjectMetadata.class.getClassLoader())); - if (!JObjectData.class.isAssignableFrom(obj.getKnownClass())) - throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("Class not inherited from JObjectData " + message.getKnownClass())); - - obj.getRemoteCopies().putAll(message.getRemoteCopiesMap().entrySet().stream().collect(Collectors.toMap(e -> UUID.fromString(e.getKey()), Map.Entry::getValue))); - if (message.getSeen()) obj.markSeen(); - if (message.getDeleted()) obj.markDeleted(); - message.getConfirmedDeletesList().stream().map(UUID::fromString).forEach(o -> obj.getConfirmedDeletes().add(o)); - obj.getReferrersMutable().addAll(message.getReferrersList()); - obj.getChangelog().putAll(message.getChangelogMap().entrySet().stream().collect(Collectors.toMap(e -> UUID.fromString(e.getKey()), Map.Entry::getValue))); - if (message.getSavedRefsCount() > 0) - obj.setSavedRefs(new LinkedHashSet<>(message.getSavedRefsList())); - if (message.getFrozen()) - obj.freeze(); - if (message.getHaveLocalCopy()) - obj.setHaveLocalCopy(true); - - return obj; - } catch (ClassNotFoundException cx) { - throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("Could not find class " + message.getKnownClass())); - } - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/OnlyLocal.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/OnlyLocal.java deleted file mode 100644 index 441130b6..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/OnlyLocal.java +++ /dev/null @@ -1,11 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -@Retention(RetentionPolicy.RUNTIME) -@Target(ElementType.TYPE) -public @interface OnlyLocal { -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/PushResolution.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/PushResolution.java deleted file mode 100644 index d6aaf28a..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/PushResolution.java +++ /dev/null @@ -1,11 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -@Retention(RetentionPolicy.RUNTIME) -@Target(ElementType.TYPE) -public @interface PushResolution { -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObject.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObject.java deleted file mode 100644 index be321700..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObject.java +++ /dev/null @@ -1,7 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -public interface SoftJObject { - JObject get(); - - String getName(); -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObjectFactory.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObjectFactory.java deleted file mode 100644 index bb5073fb..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/SoftJObjectFactory.java +++ /dev/null @@ -1,77 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.inject.Inject; -import lombok.NonNull; - -import java.lang.ref.SoftReference; -import java.util.concurrent.atomic.AtomicReference; - -@ApplicationScoped -public class SoftJObjectFactory { - @Inject - JObjectManager jObjectManager; - - public SoftJObject create(Class klass, String name) { - return new SoftJObjectImpl<>(klass, name); - } - - public SoftJObject create(Class klass, JObject obj) { - return new SoftJObjectImpl<>(klass, obj); - } - - private class SoftJObjectImpl implements SoftJObject { - private final Class _klass; - private final String _objName; - private final AtomicReference>> _obj; - - private SoftJObjectImpl(Class klass, @NonNull String objName) { - _klass = klass; - _objName = objName; - _obj = new AtomicReference<>(); - } - - private SoftJObjectImpl(Class klass, JObject obj) { - _klass = klass; - _objName = obj.getMeta().getName(); - _obj = new AtomicReference<>(new SoftReference<>(obj)); - } - - @Override - public JObject get() { - while (true) { - var have = _obj.get(); - if (have != null) { - var ref = have.get(); - if (ref != null) - return ref; - } - var got = jObjectManager.get(_objName).orElse(null); - if (got == null) return null; - var checked = got.as(_klass); - var next = new SoftReference<>(checked); - if (_obj.compareAndSet(have, next)) - return checked; - } - } - - @Override - public String getName() { - return _objName; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - SoftJObjectImpl that = (SoftJObjectImpl) o; - return _objName.equals(that._objName); - } - - @Override - public int hashCode() { - return _objName.hashCode(); - } - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxBundle.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxBundle.java deleted file mode 100644 index 9c0e089e..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxBundle.java +++ /dev/null @@ -1,14 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -import com.usatiuk.dhfs.objects.persistence.JObjectDataP; -import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP; - -public interface TxBundle { - long getId(); - - void commit(JObject obj, ObjectMetadataP meta, JObjectDataP data); - - void commitMetaChange(JObject obj, ObjectMetadataP meta); - - void delete(JObject obj); -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWriteback.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWriteback.java deleted file mode 100644 index 70a4e60e..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWriteback.java +++ /dev/null @@ -1,17 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -import com.usatiuk.dhfs.utils.VoidFn; - -public interface TxWriteback { - TxBundle createBundle(); - - void commitBundle(TxBundle bundle); - - void dropBundle(TxBundle bundle); - - void fence(long bundleId); - - // Executes callback after bundle with bundleId id has been persisted - // if it was already, runs callback on the caller thread - void asyncFence(long bundleId, VoidFn callback); -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWritebackImpl.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWritebackImpl.java deleted file mode 100644 index ab1b1440..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/TxWritebackImpl.java +++ /dev/null @@ -1,417 +0,0 @@ -package com.usatiuk.dhfs.objects.jrepository; - -import com.usatiuk.dhfs.objects.persistence.JObjectDataP; -import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP; -import com.usatiuk.dhfs.objects.repository.persistence.ObjectPersistentStore; -import com.usatiuk.dhfs.utils.VoidFn; -import io.quarkus.logging.Log; -import io.quarkus.runtime.ShutdownEvent; -import io.quarkus.runtime.StartupEvent; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import jakarta.inject.Inject; -import lombok.Getter; -import org.apache.commons.lang3.concurrent.BasicThreadFactory; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.util.*; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -@ApplicationScoped -public class TxWritebackImpl implements TxWriteback { - private final LinkedList _pendingBundles = new LinkedList<>(); - private final LinkedHashMap _notFlushedBundles = new LinkedHashMap<>(); - - private final Object _flushWaitSynchronizer = new Object(); - private final AtomicLong _lastWrittenTx = new AtomicLong(-1); - private final AtomicLong _counter = new AtomicLong(); - private final AtomicLong _waitedTotal = new AtomicLong(0); - @Inject - ObjectPersistentStore objectPersistentStore; - @ConfigProperty(name = "dhfs.objects.writeback.limit") - long sizeLimit; - private long currentSize = 0; - private ExecutorService _writebackExecutor; - private ExecutorService _commitExecutor; - private ExecutorService _statusExecutor; - private volatile boolean _ready = false; - - void init(@Observes @Priority(110) StartupEvent event) { - { - BasicThreadFactory factory = new BasicThreadFactory.Builder() - .namingPattern("tx-writeback-%d") - .build(); - - _writebackExecutor = Executors.newSingleThreadExecutor(factory); - _writebackExecutor.submit(this::writeback); - } - - { - BasicThreadFactory factory = new BasicThreadFactory.Builder() - .namingPattern("writeback-commit-%d") - .build(); - - _commitExecutor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(), factory); - } - _statusExecutor = Executors.newSingleThreadExecutor(); - _statusExecutor.submit(() -> { - try { - while (true) { - Thread.sleep(1000); - if (currentSize > 0) - Log.info("Tx commit status: size=" - + currentSize / 1024 / 1024 + "MB"); - } - } catch (InterruptedException ignored) { - } - }); - _ready = true; - } - - void shutdown(@Observes @Priority(890) ShutdownEvent event) throws InterruptedException { - Log.info("Waiting for all transactions to drain"); - - synchronized (_flushWaitSynchronizer) { - _ready = false; - while (currentSize > 0) { - _flushWaitSynchronizer.wait(); - } - } - - _writebackExecutor.shutdownNow(); - Log.info("Total tx bundle wait time: " + _waitedTotal.get() + "ms"); - } - - private void verifyReady() { - if (!_ready) throw new IllegalStateException("Not doing transactions while shutting down!"); - } - - private void writeback() { - while (!Thread.interrupted()) { - try { - TxBundle bundle = new TxBundle(0); - synchronized (_pendingBundles) { - while (_pendingBundles.isEmpty() || !_pendingBundles.peek()._ready) - _pendingBundles.wait(); - - long diff = 0; - while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { - var toCompress = _pendingBundles.poll(); - diff -= toCompress.calculateTotalSize(); - bundle.compress(toCompress); - } - diff += bundle.calculateTotalSize(); - synchronized (_flushWaitSynchronizer) { - currentSize += diff; - } - } - - var latch = new CountDownLatch(bundle._committed.size() + bundle._meta.size()); - ConcurrentLinkedQueue errors = new ConcurrentLinkedQueue<>(); - - for (var c : bundle._committed.values()) { - _commitExecutor.execute(() -> { - try { - Log.trace("Writing new " + c.newMeta.getName()); - objectPersistentStore.writeNewObject(c.newMeta.getName(), c.newMeta, c.newData); - } catch (Throwable t) { - Log.error("Error writing " + c.newMeta.getName(), t); - errors.add(t); - } finally { - latch.countDown(); - } - }); - } - for (var c : bundle._meta.values()) { - _commitExecutor.execute(() -> { - try { - Log.trace("Writing (meta) " + c.newMeta.getName()); - objectPersistentStore.writeNewObjectMeta(c.newMeta.getName(), c.newMeta); - } catch (Throwable t) { - Log.error("Error writing " + c.newMeta.getName(), t); - errors.add(t); - } finally { - latch.countDown(); - } - }); - } - if (Log.isDebugEnabled()) - for (var d : bundle._deleted.keySet()) - Log.debug("Deleting from persistent storage " + d.getMeta().getName()); // FIXME: For tests - - latch.await(); - if (!errors.isEmpty()) { - throw new RuntimeException("Errors in writeback!"); - } - objectPersistentStore.commitTx( - new TxManifest( - Stream.concat(bundle._committed.keySet().stream().map(t -> t.getMeta().getName()), - bundle._meta.keySet().stream().map(t -> t.getMeta().getName())).collect(Collectors.toCollection(ArrayList::new)), - bundle._deleted.keySet().stream().map(t -> t.getMeta().getName()).collect(Collectors.toCollection(ArrayList::new)) - )); - Log.trace("Bundle " + bundle.getId() + " committed"); - - - List> callbacks = new ArrayList<>(); - synchronized (_notFlushedBundles) { - _lastWrittenTx.set(bundle.getId()); - while (!_notFlushedBundles.isEmpty() && _notFlushedBundles.firstEntry().getKey() <= bundle.getId()) { - callbacks.add(_notFlushedBundles.pollFirstEntry().getValue().setCommitted()); - } - } - callbacks.forEach(l -> l.forEach(VoidFn::apply)); - - synchronized (_flushWaitSynchronizer) { - currentSize -= ((TxBundle) bundle).calculateTotalSize(); - // FIXME: - if (currentSize <= sizeLimit || !_ready) - _flushWaitSynchronizer.notifyAll(); - } - } catch (InterruptedException ignored) { - } catch (Exception e) { - Log.error("Uncaught exception in writeback", e); - } catch (Throwable o) { - Log.error("Uncaught THROWABLE in writeback", o); - } - } - Log.info("Writeback thread exiting"); - } - - @Override - public com.usatiuk.dhfs.objects.jrepository.TxBundle createBundle() { - verifyReady(); - boolean wait = false; - while (true) { - if (wait) { - synchronized (_flushWaitSynchronizer) { - long started = System.currentTimeMillis(); - while (currentSize > sizeLimit) { - try { - _flushWaitSynchronizer.wait(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - long waited = System.currentTimeMillis() - started; - _waitedTotal.addAndGet(waited); - if (Log.isTraceEnabled()) - Log.trace("Thread " + Thread.currentThread().getName() + " waited for tx bundle for " + waited + " ms"); - wait = false; - } - } - synchronized (_pendingBundles) { - synchronized (_flushWaitSynchronizer) { - if (currentSize > sizeLimit) { - if (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { - var target = _pendingBundles.poll(); - - long diff = -target.calculateTotalSize(); - while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) { - var toCompress = _pendingBundles.poll(); - diff -= toCompress.calculateTotalSize(); - target.compress(toCompress); - } - diff += target.calculateTotalSize(); - currentSize += diff; - _pendingBundles.addFirst(target); - } - } - - if (currentSize > sizeLimit) { - wait = true; - continue; - } - } - synchronized (_notFlushedBundles) { - var bundle = new TxBundle(_counter.incrementAndGet()); - _pendingBundles.addLast(bundle); - _notFlushedBundles.put(bundle.getId(), bundle); - return bundle; - } - } - } - } - - @Override - public void commitBundle(com.usatiuk.dhfs.objects.jrepository.TxBundle bundle) { - verifyReady(); - synchronized (_pendingBundles) { - ((TxBundle) bundle).setReady(); - if (_pendingBundles.peek() == bundle) - _pendingBundles.notify(); - synchronized (_flushWaitSynchronizer) { - currentSize += ((TxBundle) bundle).calculateTotalSize(); - } - } - } - - @Override - public void dropBundle(com.usatiuk.dhfs.objects.jrepository.TxBundle bundle) { - verifyReady(); - synchronized (_pendingBundles) { - Log.warn("Dropped bundle: " + bundle); - _pendingBundles.remove((TxBundle) bundle); - synchronized (_flushWaitSynchronizer) { - currentSize -= ((TxBundle) bundle).calculateTotalSize(); - } - } - } - - @Override - public void fence(long bundleId) { - var latch = new CountDownLatch(1); - asyncFence(bundleId, latch::countDown); - try { - latch.await(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - - @Override - public void asyncFence(long bundleId, VoidFn fn) { - verifyReady(); - if (bundleId < 0) throw new IllegalArgumentException("txId should be >0!"); - if (_lastWrittenTx.get() >= bundleId) { - fn.apply(); - return; - } - synchronized (_notFlushedBundles) { - if (_lastWrittenTx.get() >= bundleId) { - fn.apply(); - return; - } - _notFlushedBundles.get(bundleId).addCallback(fn); - } - } - - @Getter - private static class TxManifest implements com.usatiuk.dhfs.objects.repository.persistence.TxManifest { - private final ArrayList _written; - private final ArrayList _deleted; - - private TxManifest(ArrayList written, ArrayList deleted) { - _written = written; - _deleted = deleted; - } - } - - private class TxBundle implements com.usatiuk.dhfs.objects.jrepository.TxBundle { - private final HashMap, CommittedEntry> _committed = new HashMap<>(); - private final HashMap, CommittedMeta> _meta = new HashMap<>(); - private final HashMap, Integer> _deleted = new HashMap<>(); - private final ArrayList _callbacks = new ArrayList<>(); - private long _txId; - @Getter - private volatile boolean _ready = false; - private long _size = -1; - private boolean _wasCommitted = false; - - private TxBundle(long txId) {_txId = txId;} - - @Override - public long getId() { - return _txId; - } - - public void setReady() { - _ready = true; - } - - public void addCallback(VoidFn callback) { - synchronized (_callbacks) { - if (_wasCommitted) throw new IllegalStateException(); - _callbacks.add(callback); - } - } - - public List setCommitted() { - synchronized (_callbacks) { - _wasCommitted = true; - return Collections.unmodifiableList(_callbacks); - } - } - - @Override - public void commit(JObject obj, ObjectMetadataP meta, JObjectDataP data) { - synchronized (_committed) { - _committed.put(obj, new CommittedEntry(meta, data, obj.estimateSize())); - } - } - - @Override - public void commitMetaChange(JObject obj, ObjectMetadataP meta) { - synchronized (_meta) { - _meta.put(obj, new CommittedMeta(meta, obj.estimateSize())); - } - } - - @Override - public void delete(JObject obj) { - synchronized (_deleted) { - _deleted.put(obj, obj.estimateSize()); - } - } - - - public long calculateTotalSize() { - if (_size >= 0) return _size; - long out = 0; - for (var c : _committed.values()) - out += c.size; - for (var c : _meta.values()) - out += c.size; - for (var c : _deleted.entrySet()) - out += c.getValue(); - _size = out; - return _size; - } - - public void compress(TxBundle other) { - if (_txId >= other._txId) - throw new IllegalArgumentException("Compressing an older bundle into newer"); - - _txId = other._txId; - _size = -1; - - for (var d : other._deleted.entrySet()) { - _committed.remove(d.getKey()); - _meta.remove(d.getKey()); - _deleted.put(d.getKey(), d.getValue()); - } - - for (var c : other._committed.entrySet()) { - _committed.put(c.getKey(), c.getValue()); - _meta.remove(c.getKey()); - _deleted.remove(c.getKey()); - } - - for (var m : other._meta.entrySet()) { - var deleted = _deleted.remove(m.getKey()); - if (deleted != null) { - _committed.put(m.getKey(), new CommittedEntry(m.getValue().newMeta, null, m.getKey().estimateSize())); - continue; - } - var committed = _committed.remove(m.getKey()); - if (committed != null) { - _committed.put(m.getKey(), new CommittedEntry(m.getValue().newMeta, committed.newData, m.getKey().estimateSize())); - continue; - } - _meta.put(m.getKey(), m.getValue()); - } - } - - private record CommittedEntry(ObjectMetadataP newMeta, JObjectDataP newData, int size) {} - - private record CommittedMeta(ObjectMetadataP newMeta, int size) {} - - private record Deleted(JObject handle) {} - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java deleted file mode 100644 index fcb5a07e..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/CertificateTools.java +++ /dev/null @@ -1,63 +0,0 @@ -package com.usatiuk.dhfs.objects.repository; - -import org.apache.commons.codec.digest.DigestUtils; -import org.bouncycastle.asn1.ASN1ObjectIdentifier; -import org.bouncycastle.asn1.x500.X500Name; -import org.bouncycastle.asn1.x509.BasicConstraints; -import org.bouncycastle.cert.CertIOException; -import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; -import org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder; -import org.bouncycastle.jce.provider.BouncyCastleProvider; -import org.bouncycastle.operator.ContentSigner; -import org.bouncycastle.operator.OperatorCreationException; -import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder; - -import java.io.ByteArrayInputStream; -import java.io.InputStream; -import java.math.BigInteger; -import java.security.*; -import java.security.cert.CertificateException; -import java.security.cert.CertificateFactory; -import java.security.cert.X509Certificate; -import java.util.Calendar; -import java.util.Date; - -public class CertificateTools { - - public static X509Certificate certFromBytes(byte[] bytes) throws CertificateException { - CertificateFactory certFactory = CertificateFactory.getInstance("X.509"); - InputStream in = new ByteArrayInputStream(bytes); - return (X509Certificate) certFactory.generateCertificate(in); - } - - public static KeyPair generateKeyPair() throws NoSuchAlgorithmException { - KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); - keyGen.initialize(2048); //FIXME: - return keyGen.generateKeyPair(); - } - - public static X509Certificate generateCertificate(KeyPair keyPair, String subject) throws CertificateException, CertIOException, NoSuchAlgorithmException, OperatorCreationException { - Provider bcProvider = new BouncyCastleProvider(); - Security.addProvider(bcProvider); - - Date startDate = new Date(); - - X500Name cnName = new X500Name("CN=" + subject); - BigInteger certSerialNumber = new BigInteger(DigestUtils.sha256(subject)); - - Calendar calendar = Calendar.getInstance(); - calendar.setTime(startDate); - calendar.add(Calendar.YEAR, 999); - - Date endDate = calendar.getTime(); - - ContentSigner contentSigner = new JcaContentSignerBuilder("SHA256WithRSA").build(keyPair.getPrivate()); - - JcaX509v3CertificateBuilder certBuilder = new JcaX509v3CertificateBuilder(cnName, certSerialNumber, startDate, endDate, cnName, keyPair.getPublic()); - - BasicConstraints basicConstraints = new BasicConstraints(false); - certBuilder.addExtension(new ASN1ObjectIdentifier("2.5.29.19"), true, basicConstraints); - - return new JcaX509CertificateConverter().setProvider(bcProvider).getCertificate(certBuilder.build(contentSigner)); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/ConflictResolver.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/ConflictResolver.java deleted file mode 100644 index 1f4391cc..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/ConflictResolver.java +++ /dev/null @@ -1,10 +0,0 @@ -package com.usatiuk.dhfs.objects.repository; - -import com.usatiuk.dhfs.objects.jrepository.JObject; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; - -import java.util.UUID; - -public interface ConflictResolver { - void resolve(UUID conflictHost, ObjectHeader conflictHeader, JObjectData conflictData, JObject conflictSource); -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java deleted file mode 100644 index 29a53d88..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PeerManager.java +++ /dev/null @@ -1,277 +0,0 @@ -package com.usatiuk.dhfs.objects.repository; - -import com.usatiuk.dhfs.objects.repository.peersync.PeerSyncApiClientDynamic; -import com.usatiuk.dhfs.objects.repository.peersync.PersistentPeerInfo; -import com.usatiuk.dhfs.objects.repository.webapi.AvailablePeerInfo; -import io.quarkus.logging.Log; -import io.quarkus.runtime.ShutdownEvent; -import io.quarkus.runtime.StartupEvent; -import io.quarkus.scheduler.Scheduled; -import io.smallrye.common.annotation.Blocking; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import jakarta.inject.Inject; -import lombok.Getter; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.io.IOException; -import java.security.cert.CertificateException; -import java.util.*; -import java.util.concurrent.*; - -@ApplicationScoped -public class PeerManager { - private final TransientPeersState _transientPeersState = new TransientPeersState(); - private final ConcurrentMap _seenButNotAdded = new ConcurrentHashMap<>(); - // FIXME: Ideally not call them on every ping - private final ArrayList _connectedListeners = new ArrayList<>(); - private final ArrayList _disconnectedListeners = new ArrayList<>(); - @Inject - PersistentPeerDataService persistentPeerDataService; - @Inject - SyncHandler syncHandler; - @Inject - RpcClientFactory rpcClientFactory; - @Inject - PeerSyncApiClientDynamic peerSyncApiClient; - @ConfigProperty(name = "dhfs.objects.sync.ping.timeout") - long pingTimeout; - private ExecutorService _heartbeatExecutor; - @Getter - private boolean _ready = false; - - // Note: keep priority updated with below - void init(@Observes @Priority(600) StartupEvent event) throws IOException { - _heartbeatExecutor = Executors.newVirtualThreadPerTaskExecutor(); - - // Note: newly added hosts aren't in _transientPeersState - // but that's ok as they don't have initialSyncDone set - for (var h : persistentPeerDataService.getHostUuids()) - _transientPeersState.runWriteLocked(d -> d.get(h)); - - _ready = true; - } - - void shutdown(@Observes @Priority(50) ShutdownEvent event) throws IOException { - _ready = false; - } - - @Scheduled(every = "${dhfs.objects.reconnect_interval}", concurrentExecution = Scheduled.ConcurrentExecution.SKIP) - @Blocking - public void tryConnectAll() { - if (!_ready) return; - try { - _heartbeatExecutor.invokeAll(persistentPeerDataService.getHostUuids() - .stream() - .>map(host -> () -> { - try { - if (isReachable(host)) - Log.trace("Heartbeat: " + host); - else - Log.debug("Trying to connect to " + host); - if (pingCheck(host)) - handleConnectionSuccess(host); - else - handleConnectionError(host); - } catch (Exception e) { - Log.error("Failed to connect to " + host, e); - } - return null; - }).toList(), 30, TimeUnit.SECONDS); //FIXME: - } catch (InterruptedException iex) { - Log.error("Heartbeat was interrupted"); - } - } - - // Note: registrations should be completed with Priority < 600 - public void registerConnectEventListener(ConnectionEventListener listener) { - if (_ready) throw new IllegalStateException("Already initialized"); - synchronized (_connectedListeners) { - _connectedListeners.add(listener); - } - } - - // Note: registrations should be completed with Priority < 600 - public void registerDisconnectEventListener(ConnectionEventListener listener) { - if (_ready) throw new IllegalStateException("Already initialized"); - synchronized (_disconnectedListeners) { - _disconnectedListeners.add(listener); - } - } - - public void handleConnectionSuccess(UUID host) { - if (!_ready) return; - - boolean wasReachable = isReachable(host); - - boolean shouldSyncObj = persistentPeerDataService.markInitialObjSyncDone(host); - boolean shouldSyncOp = persistentPeerDataService.markInitialOpSyncDone(host); - - if (shouldSyncObj) - syncHandler.pushInitialResyncObj(host); - if (shouldSyncOp) - syncHandler.pushInitialResyncOp(host); - - _transientPeersState.runWriteLocked(d -> { - d.get(host).setReachable(true); - return null; - }); - - if (wasReachable) return; - - Log.info("Connected to " + host); - - for (var l : _connectedListeners) { - l.apply(host); - } - } - - public void handleConnectionError(UUID host) { - boolean wasReachable = isReachable(host); - - if (wasReachable) - Log.info("Lost connection to " + host); - - _transientPeersState.runWriteLocked(d -> { - d.get(host).setReachable(false); - return null; - }); - - for (var l : _disconnectedListeners) { - l.apply(host); - } - } - - // FIXME: - private boolean pingCheck(UUID host) { - TransientPeerState state = _transientPeersState.runReadLocked(s -> s.getCopy(host)); - - try { - return rpcClientFactory.withObjSyncClient(host.toString(), state.getAddr(), state.getSecurePort(), pingTimeout, c -> { - var ret = c.ping(PingRequest.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).build()); - if (!UUID.fromString(ret.getSelfUuid()).equals(host)) { - throw new IllegalStateException("Ping selfUuid returned " + ret.getSelfUuid() + " but expected " + host); - } - return true; - }); - } catch (Exception ignored) { - Log.debug("Host " + host + " is unreachable: " + ignored.getMessage() + " " + ignored.getCause()); - return false; - } - } - - public boolean isReachable(UUID host) { - return _transientPeersState.runReadLocked(d -> d.get(host).isReachable()); - } - - public TransientPeerState getTransientState(UUID host) { - return _transientPeersState.runReadLocked(d -> d.getCopy(host)); - } - - public List getAvailableHosts() { - return _transientPeersState.runReadLocked(d -> d.getStates().entrySet().stream() - .filter(e -> e.getValue().isReachable()) - .map(Map.Entry::getKey).toList()); - } - - public List getUnavailableHosts() { - return _transientPeersState.runReadLocked(d -> d.getStates().entrySet().stream() - .filter(e -> !e.getValue().isReachable()) - .map(Map.Entry::getKey).toList()); - } - - public HostStateSnapshot getHostStateSnapshot() { - ArrayList available = new ArrayList<>(); - ArrayList unavailable = new ArrayList<>(); - _transientPeersState.runReadLocked(d -> { - for (var v : d.getStates().entrySet()) { - if (v.getValue().isReachable()) - available.add(v.getKey()); - else - unavailable.add(v.getKey()); - } - return null; - } - ); - return new HostStateSnapshot(available, unavailable); - } - - public void notifyAddr(UUID host, String addr, Integer port, Integer securePort) { - if (host.equals(persistentPeerDataService.getSelfUuid())) { - return; - } - - var state = new TransientPeerState(); - state.setAddr(addr); - state.setPort(port); - state.setSecurePort(securePort); - - if (!persistentPeerDataService.existsHost(host)) { - var prev = _seenButNotAdded.put(host, state); - // Needed for tests - if (prev == null) - Log.debug("Ignoring new address from unknown host " + ": addr=" + addr + " port=" + port); - return; - } else { - _seenButNotAdded.remove(host); - } - - _transientPeersState.runWriteLocked(d -> { -// Log.trace("Updating connection info for " + host + ": addr=" + addr + " port=" + port); - d.get(host).setAddr(addr); - d.get(host).setPort(port); - d.get(host).setSecurePort(securePort); - return null; - }); - } - - public void removeRemoteHost(UUID host) { - persistentPeerDataService.removeHost(host); - // Race? - _transientPeersState.runWriteLocked(d -> { - d.getStates().remove(host); - return null; - }); - } - - public void addRemoteHost(UUID host) { - if (!_seenButNotAdded.containsKey(host)) { - throw new IllegalStateException("Host " + host + " is not seen"); - } - if (persistentPeerDataService.existsHost(host)) { - throw new IllegalStateException("Host " + host + " is already added"); - } - - var state = _seenButNotAdded.get(host); - - // FIXME: race? - - var info = peerSyncApiClient.getSelfInfo(state.getAddr(), state.getPort()); - - try { - persistentPeerDataService.addHost( - new PersistentPeerInfo(UUID.fromString(info.selfUuid()), - CertificateTools.certFromBytes(Base64.getDecoder().decode(info.cert())))); - Log.info("Added host: " + host.toString()); - } catch (CertificateException e) { - throw new RuntimeException(e); - } - } - - public Collection getSeenButNotAddedHosts() { - return _seenButNotAdded.entrySet().stream() - .filter(e -> !persistentPeerDataService.existsHost(e.getKey())) - .map(e -> new AvailablePeerInfo(e.getKey().toString(), e.getValue().getAddr(), e.getValue().getPort())) - .toList(); - } - - @FunctionalInterface - public interface ConnectionEventListener { - void apply(UUID host); - } - - public record HostStateSnapshot(List available, List unavailable) { - } - -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java deleted file mode 100644 index 0413d8b8..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentPeerDataService.java +++ /dev/null @@ -1,361 +0,0 @@ -package com.usatiuk.dhfs.objects.repository; - -import com.usatiuk.dhfs.utils.SerializationHelper; -import com.usatiuk.dhfs.ShutdownChecker; -import com.usatiuk.dhfs.objects.jrepository.*; -import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; -import com.usatiuk.dhfs.objects.repository.peersync.PeerDirectory; -import com.usatiuk.dhfs.objects.repository.peersync.PeerDirectoryLocal; -import com.usatiuk.dhfs.objects.repository.peersync.PersistentPeerInfo; -import com.usatiuk.dhfs.objects.repository.peertrust.PeerTrustManager; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import io.quarkus.logging.Log; -import io.quarkus.runtime.ShutdownEvent; -import io.quarkus.runtime.StartupEvent; -import jakarta.annotation.Nullable; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import jakarta.inject.Inject; -import org.apache.commons.lang3.SerializationUtils; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.security.KeyPair; -import java.security.cert.X509Certificate; -import java.util.List; -import java.util.Objects; -import java.util.Optional; -import java.util.UUID; -import java.util.concurrent.ExecutorService; - -import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; - -@ApplicationScoped -public class PersistentPeerDataService { - final String dataFileName = "hosts"; - @ConfigProperty(name = "dhfs.objects.root") - String dataRoot; - @Inject - PeerTrustManager peerTrustManager; - @Inject - JObjectManager jObjectManager; - @Inject - ExecutorService executorService; - @Inject - InvalidationQueueService invalidationQueueService; - @Inject - RpcClientFactory rpcClientFactory; - @Inject - ShutdownChecker shutdownChecker; - @Inject - JObjectTxManager jObjectTxManager; - @Inject - SoftJObjectFactory softJObjectFactory; - SoftJObject peerDirectory; - SoftJObject peerDirectoryLocal; - private PersistentRemoteHosts _persistentData = new PersistentRemoteHosts(); - private UUID _selfUuid; - - void init(@Observes @Priority(300) StartupEvent event) throws IOException { - Paths.get(dataRoot).toFile().mkdirs(); - Log.info("Initializing with root " + dataRoot); - if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) { - Log.info("Reading hosts"); - _persistentData = SerializationHelper.deserialize(Files.readAllBytes(Paths.get(dataRoot).resolve(dataFileName))); - } else if (Paths.get(dataRoot).resolve(dataFileName + ".bak").toFile().exists()) { - Log.warn("Reading hosts from backup"); - _persistentData = SerializationHelper.deserialize(Files.readAllBytes(Paths.get(dataRoot).resolve(dataFileName))); - } - _selfUuid = _persistentData.runReadLocked(PersistentRemoteHostsData::getSelfUuid); - - if (_persistentData.runReadLocked(d -> d.getSelfCertificate() == null)) { - jObjectTxManager.executeTxAndFlush(() -> { - _persistentData.runWriteLocked(d -> { - try { - Log.info("Generating a key pair, please wait"); - d.setSelfKeyPair(CertificateTools.generateKeyPair()); - d.setSelfCertificate(CertificateTools.generateCertificate(d.getSelfKeyPair(), _selfUuid.toString())); - } catch (Exception e) { - throw new RuntimeException("Failed generating cert", e); - } - return null; - }); - var newpd = new PeerDirectory(); - jObjectManager.put(new PersistentPeerInfo(_selfUuid, getSelfCertificate()), Optional.of(PeerDirectory.PeerDirectoryObjName)); - newpd.getPeers().add(_selfUuid); - jObjectManager.put(newpd, Optional.empty()); - jObjectManager.put(new PeerDirectoryLocal(), Optional.empty()); - }); - } - - peerDirectory = softJObjectFactory.create(PeerDirectory.class, PeerDirectory.PeerDirectoryObjName); - peerDirectoryLocal = softJObjectFactory.create(PeerDirectoryLocal.class, PeerDirectoryLocal.PeerDirectoryLocalObjName); - - if (!shutdownChecker.lastShutdownClean()) { - _persistentData.getData().getIrregularShutdownCounter().addAndGet(1); - jObjectTxManager.executeTxAndFlush(() -> { - peerDirectoryLocal.get().rwLock(); - peerDirectoryLocal.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - try { - peerDirectoryLocal.get().getData().getInitialObjSyncDone().clear(); - peerDirectoryLocal.get().bumpVer(); - } finally { - peerDirectoryLocal.get().rwUnlock(); - } - }); - } - - jObjectManager.registerWriteListener(PersistentPeerInfo.class, this::pushPeerUpdates); - jObjectManager.registerWriteListener(PeerDirectory.class, this::pushPeerUpdates); - - // FIXME: Warn on failed resolves? - peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - peerTrustManager.reloadTrustManagerHosts(getHosts()); - return null; - }); - - Files.writeString(Paths.get(dataRoot, "self_uuid"), _selfUuid.toString()); - Log.info("Self uuid is: " + _selfUuid.toString()); - writeData(); - } - - void shutdown(@Observes @Priority(300) ShutdownEvent event) throws IOException { - Log.info("Saving hosts"); - writeData(); - Log.info("Shutdown"); - } - - private void writeData() { - try { - if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) - Files.move(Paths.get(dataRoot).resolve(dataFileName), Paths.get(dataRoot).resolve(dataFileName + ".bak"), REPLACE_EXISTING); - Files.write(Paths.get(dataRoot).resolve(dataFileName), SerializationUtils.serialize(_persistentData)); - } catch (IOException iex) { - Log.error("Error writing persistent hosts data", iex); - throw new RuntimeException(iex); - } - } - - private void pushPeerUpdates() { - pushPeerUpdates(null); - } - - private void pushPeerUpdates(@Nullable JObject obj) { - if (obj != null) - Log.info("Scheduling certificate update after " + obj.getMeta().getName() + " was updated"); - executorService.submit(() -> { - updateCerts(); - invalidationQueueService.pushInvalidationToAll(PeerDirectory.PeerDirectoryObjName); - for (var p : peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getPeers().stream().toList())) - invalidationQueueService.pushInvalidationToAll(PersistentPeerInfo.getNameFromUuid(p)); - }); - } - - private JObject getPeer(UUID uuid) { - var got = jObjectManager.get(PersistentPeerInfo.getNameFromUuid(uuid)).orElseThrow(() -> new IllegalStateException("Peer " + uuid + " not found")); - got.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - if (d == null) throw new IllegalStateException("Could not resolve peer " + uuid); - if (!(d instanceof PersistentPeerInfo)) - throw new IllegalStateException("Peer " + uuid + " is of wrong type!"); - return null; - }); - return (JObject) got; - } - - private List getPeersSnapshot() { - return peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, - (m, d) -> d.getPeers().stream().map(u -> { - try { - return getPeer(u).runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m2, d2) -> d2); - } catch (Exception e) { - Log.warn("Error making snapshot of peer " + u, e); - return null; - } - }).filter(Objects::nonNull).toList()); - } - - public UUID getSelfUuid() { - if (_selfUuid == null) - throw new IllegalStateException(); - else return _selfUuid; - } - - public String getUniqueId() { - String sb = String.valueOf(_selfUuid) + - _persistentData.getData().getIrregularShutdownCounter() + - "_" + - _persistentData.getData().getSelfCounter().addAndGet(1); - return sb; - } - - public PersistentPeerInfo getInfo(UUID name) { - return getPeer(name).runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d); - } - - public List getHosts() { - return getPeersSnapshot().stream().filter(i -> !i.getUuid().equals(_selfUuid)).toList(); - } - - public List getHostUuids() { - return peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getPeers().stream().filter(i -> !i.equals(_selfUuid)).toList()); - } - - public List getHostUuidsAndSelf() { - return peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getPeers().stream().toList()); - } - - public List getHostsNoNulls() { - for (int i = 0; i < 5; i++) { - try { - return peerDirectory.get() - .runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, - (m, d) -> d.getPeers().stream() - .map(u -> getPeer(u).runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m2, d2) -> d2)) - .filter(e -> !e.getUuid().equals(_selfUuid)).toList()); - } catch (Exception e) { - Log.warn("Error when making snapshot of hosts: " + e.getMessage()); - try { - Thread.sleep(i * 2); - } catch (InterruptedException ignored) { - } - } - } - throw new StatusRuntimeException(Status.ABORTED.withDescription("Could not make a snapshot of peers in 5 tries!")); - } - - public boolean addHost(PersistentPeerInfo persistentPeerInfo) { - return jObjectTxManager.executeTx(() -> { - if (persistentPeerInfo.getUuid().equals(_selfUuid)) return false; - - boolean added = peerDirectory.get().runWriteLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d, b, v) -> { - boolean addedInner = d.getPeers().add(persistentPeerInfo.getUuid()); - if (addedInner) { - jObjectManager.put(persistentPeerInfo, Optional.of(m.getName())); - b.apply(); - } - return addedInner; - }); - return added; - }); - } - - public boolean removeHost(UUID host) { - return jObjectTxManager.executeTx(() -> { - boolean removed = peerDirectory.get().runWriteLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d, b, v) -> { - boolean removedInner = d.getPeers().remove(host); - Log.info("Removing host: " + host + (removedInner ? " removed" : " did not exists")); - if (removedInner) { - peerDirectoryLocal.get().rwLock(); - peerDirectoryLocal.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - try { - peerDirectoryLocal.get().getData().getInitialObjSyncDone().remove(host); - peerDirectoryLocal.get().getData().getInitialOpSyncDone().remove(host); - peerDirectoryLocal.get().bumpVer(); - } finally { - peerDirectoryLocal.get().rwUnlock(); - } - getPeer(host).runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (mp, dp, bp, vp) -> { - mp.removeRef(m.getName()); - return null; - }); - b.apply(); - } - return removedInner; - }); - return removed; - }); - } - - private void updateCerts() { - try { - peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - peerTrustManager.reloadTrustManagerHosts(getHostsNoNulls()); - // Fixme:? I don't think it should be needed with custom trust store - // but it doesn't work? - rpcClientFactory.dropCache(); - return null; - }); - } catch (Exception ex) { - Log.warn("Error when refreshing certificates, will retry: " + ex.getMessage()); - pushPeerUpdates(); - } - } - - public boolean existsHost(UUID uuid) { - return peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getPeers().contains(uuid)); - } - - public PersistentPeerInfo getHost(UUID uuid) { - if (!existsHost(uuid)) - throw new StatusRuntimeException(Status.NOT_FOUND); - return getPeer(uuid).runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d); - } - - public KeyPair getSelfKeypair() { - return _persistentData.runReadLocked(PersistentRemoteHostsData::getSelfKeyPair); - } - - public X509Certificate getSelfCertificate() { - return _persistentData.runReadLocked(PersistentRemoteHostsData::getSelfCertificate); - } - - // Returns true if host's initial sync wasn't done before, and marks it as done - public boolean markInitialOpSyncDone(UUID connectedHost) { - return jObjectTxManager.executeTx(() -> { - peerDirectoryLocal.get().rwLock(); - try { - peerDirectoryLocal.get().local(); - boolean contained = peerDirectoryLocal.get().getData().getInitialOpSyncDone().contains(connectedHost); - - if (!contained) - peerDirectoryLocal.get().local().mutate(new JMutator() { - @Override - public boolean mutate(PeerDirectoryLocal object) { - object.getInitialOpSyncDone().add(connectedHost); - return true; - } - - @Override - public void revert(PeerDirectoryLocal object) { - object.getInitialOpSyncDone().remove(connectedHost); - } - }); - return !contained; - } finally { - peerDirectoryLocal.get().rwUnlock(); - } - }); - } - - public boolean markInitialObjSyncDone(UUID connectedHost) { - return jObjectTxManager.executeTx(() -> { - peerDirectoryLocal.get().rwLock(); - try { - peerDirectoryLocal.get().local(); - boolean contained = peerDirectoryLocal.get().getData().getInitialObjSyncDone().contains(connectedHost); - - if (!contained) - peerDirectoryLocal.get().local().mutate(new JMutator() { - @Override - public boolean mutate(PeerDirectoryLocal object) { - object.getInitialObjSyncDone().add(connectedHost); - return true; - } - - @Override - public void revert(PeerDirectoryLocal object) { - object.getInitialObjSyncDone().remove(connectedHost); - } - }); - return !contained; - } finally { - peerDirectoryLocal.get().rwUnlock(); - } - }); - } - -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHosts.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHosts.java deleted file mode 100644 index c4e93c8a..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHosts.java +++ /dev/null @@ -1,40 +0,0 @@ -package com.usatiuk.dhfs.objects.repository; - -import lombok.Getter; - -import java.io.Serial; -import java.io.Serializable; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -public class PersistentRemoteHosts implements Serializable { - @Serial - private static final long serialVersionUID = 1; - - @Getter - private final PersistentRemoteHostsData _data = new PersistentRemoteHostsData(); - private final ReadWriteLock _lock = new ReentrantReadWriteLock(); - - public R runReadLocked(PersistentRemoteHostsFn fn) { - _lock.readLock().lock(); - try { - return fn.apply(_data); - } finally { - _lock.readLock().unlock(); - } - } - - public R runWriteLocked(PersistentRemoteHostsFn fn) { - _lock.writeLock().lock(); - try { - return fn.apply(_data); - } finally { - _lock.writeLock().unlock(); - } - } - - @FunctionalInterface - public interface PersistentRemoteHostsFn { - R apply(PersistentRemoteHostsData hostsData); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java deleted file mode 100644 index a6b0c8f3..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/PersistentRemoteHostsData.java +++ /dev/null @@ -1,29 +0,0 @@ -package com.usatiuk.dhfs.objects.repository; - -import lombok.Getter; -import lombok.Setter; - -import java.io.Serial; -import java.io.Serializable; -import java.security.KeyPair; -import java.security.cert.X509Certificate; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicLong; - -public class PersistentRemoteHostsData implements Serializable { - @Serial - private static final long serialVersionUID = 1L; - - @Getter - private final UUID _selfUuid = UUID.randomUUID(); - @Getter - private final AtomicLong _selfCounter = new AtomicLong(); - @Getter - private final AtomicLong _irregularShutdownCounter = new AtomicLong(); - @Getter - @Setter - private X509Certificate _selfCertificate = null; - @Getter - @Setter - private KeyPair _selfKeyPair = null; -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java deleted file mode 100644 index a9a277c4..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceClient.java +++ /dev/null @@ -1,174 +0,0 @@ -package com.usatiuk.dhfs.objects.repository; - -import com.google.common.collect.Maps; -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.jrepository.*; -import com.usatiuk.dhfs.objects.persistence.JObjectDataP; -import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; -import com.usatiuk.dhfs.objects.repository.opsupport.Op; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import io.quarkus.logging.Log; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.inject.Inject; -import org.apache.commons.lang3.tuple.Pair; - -import javax.annotation.Nullable; -import java.util.*; -import java.util.concurrent.Callable; -import java.util.concurrent.ConcurrentLinkedDeque; -import java.util.concurrent.Executors; -import java.util.stream.Collectors; - -@ApplicationScoped -public class RemoteObjectServiceClient { - @Inject - PersistentPeerDataService persistentPeerDataService; - - @Inject - RpcClientFactory rpcClientFactory; - - @Inject - JObjectManager jObjectManager; - - @Inject - SyncHandler syncHandler; - @Inject - InvalidationQueueService invalidationQueueService; - @Inject - ProtoSerializer dataProtoSerializer; - @Inject - ProtoSerializer opProtoSerializer; - @Inject - JObjectTxManager jObjectTxManager; - - public Pair getSpecificObject(UUID host, String name) { - return rpcClientFactory.withObjSyncClient(host, client -> { - var reply = client.getObject(GetObjectRequest.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).setName(name).build()); - return Pair.of(reply.getObject().getHeader(), reply.getObject().getContent()); - }); - } - - public JObjectDataP getObject(JObject jObject) { - jObject.assertRwLock(); - - var targets = jObject.runReadLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (md, d) -> { - var ourVersion = md.getOurVersion(); - if (ourVersion >= 1) - return md.getRemoteCopies().entrySet().stream() - .filter(entry -> entry.getValue().equals(ourVersion)) - .map(Map.Entry::getKey).toList(); - else - return persistentPeerDataService.getHostUuids(); - }); - - if (targets.isEmpty()) - throw new IllegalStateException("No targets for object " + jObject.getMeta().getName()); - - Log.info("Downloading object " + jObject.getMeta().getName() + " from " + targets.stream().map(UUID::toString).collect(Collectors.joining(", "))); - - return rpcClientFactory.withObjSyncClient(targets, client -> { - var reply = client.getObject(GetObjectRequest.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).setName(jObject.getMeta().getName()).build()); - - var receivedMap = new HashMap(); - for (var e : reply.getObject().getHeader().getChangelog().getEntriesList()) { - receivedMap.put(UUID.fromString(e.getHost()), e.getVersion()); - } - - return jObjectTxManager.executeTx(() -> { - return jObject.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (md, d, b, v) -> { - var unexpected = !Objects.equals( - Maps.filterValues(md.getChangelog(), val -> val != 0), - Maps.filterValues(receivedMap, val -> val != 0)); - - if (unexpected) { - try { - syncHandler.handleOneUpdate(UUID.fromString(reply.getSelfUuid()), reply.getObject().getHeader()); - } catch (SyncHandler.OutdatedUpdateException ignored) { - Log.info("Outdated update of " + md.getName() + " from " + reply.getSelfUuid()); - invalidationQueueService.pushInvalidationToOne(UUID.fromString(reply.getSelfUuid()), md.getName()); // True? - throw new StatusRuntimeException(Status.ABORTED.withDescription("Received outdated object version")); - } catch (Exception e) { - Log.error("Received unexpected object version from " + reply.getSelfUuid() - + " for " + reply.getObject().getHeader().getName() + " and conflict resolution failed", e); - throw new StatusRuntimeException(Status.ABORTED.withDescription("Received unexpected object version")); - } - } - - return reply.getObject().getContent(); - }); - }); - }); - } - - @Nullable - public IndexUpdateReply notifyUpdate(JObject obj, UUID host) { - var builder = IndexUpdatePush.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()); - - var header = obj - .runReadLocked( - obj.getMeta().getKnownClass().isAnnotationPresent(PushResolution.class) - ? JObjectManager.ResolutionStrategy.LOCAL_ONLY - : JObjectManager.ResolutionStrategy.NO_RESOLUTION, - (m, d) -> { - if (obj.getMeta().isDeleted()) return null; - if (m.getKnownClass().isAnnotationPresent(PushResolution.class) && d == null) - Log.warn("Object " + m.getName() + " is marked as PushResolution but no resolution found"); - if (m.getKnownClass().isAnnotationPresent(PushResolution.class)) - return m.toRpcHeader(dataProtoSerializer.serialize(d)); - else - return m.toRpcHeader(); - }); - if (header == null) return null; - jObjectTxManager.executeTx(obj::markSeen); - builder.setHeader(header); - - var send = builder.build(); - - return rpcClientFactory.withObjSyncClient(host, client -> client.indexUpdate(send)); - } - - public OpPushReply pushOps(List ops, String queueName, UUID host) { - for (Op op : ops) { - for (var ref : op.getEscapedRefs()) { - jObjectTxManager.executeTx(() -> { - jObjectManager.get(ref).ifPresent(JObject::markSeen); - }); - } - } - var builder = OpPushMsg.newBuilder() - .setSelfUuid(persistentPeerDataService.getSelfUuid().toString()) - .setQueueId(queueName); - for (var op : ops) - builder.addMsg(opProtoSerializer.serialize(op)); - return rpcClientFactory.withObjSyncClient(host, client -> client.opPush(builder.build())); - } - - public Collection canDelete(Collection targets, String object, Collection ourReferrers) { - ConcurrentLinkedDeque results = new ConcurrentLinkedDeque<>(); - Log.trace("Asking canDelete for " + object + " from " + targets.stream().map(UUID::toString).collect(Collectors.joining(", "))); - try (var executor = Executors.newVirtualThreadPerTaskExecutor()) { - try { - executor.invokeAll(targets.stream().>map(h -> () -> { - try { - var req = CanDeleteRequest.newBuilder() - .setSelfUuid(persistentPeerDataService.getSelfUuid().toString()) - .setName(object); - req.addAllOurReferrers(ourReferrers); - var res = rpcClientFactory.withObjSyncClient(h, client -> client.canDelete(req.build())); - if (res != null) - results.add(res); - } catch (Exception e) { - Log.debug("Error when asking canDelete for object " + object, e); - } - return null; - }).toList()); - } catch (InterruptedException e) { - Log.warn("Interrupted waiting for canDelete for object " + object); - } - if (!executor.shutdownNow().isEmpty()) - Log.warn("Didn't ask all targets when asking canDelete for " + object); - } - return results; - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java deleted file mode 100644 index 17b9bb22..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RemoteObjectServiceServer.java +++ /dev/null @@ -1,184 +0,0 @@ -package com.usatiuk.dhfs.objects.repository; - -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.jrepository.DeletedObjectAccessException; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.jrepository.JObjectManager; -import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; -import com.usatiuk.dhfs.objects.persistence.JObjectDataP; -import com.usatiuk.dhfs.objects.repository.autosync.AutoSyncProcessor; -import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; -import com.usatiuk.dhfs.objects.repository.opsupport.Op; -import com.usatiuk.dhfs.objects.repository.opsupport.OpObjectRegistry; -import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import io.quarkus.grpc.GrpcService; -import io.quarkus.logging.Log; -import io.smallrye.common.annotation.Blocking; -import io.smallrye.mutiny.Uni; -import jakarta.annotation.security.RolesAllowed; -import jakarta.inject.Inject; - -import java.util.UUID; - -// Note: RunOnVirtualThread hangs somehow -@GrpcService -@RolesAllowed("cluster-member") -public class RemoteObjectServiceServer implements DhfsObjectSyncGrpc { - @Inject - SyncHandler syncHandler; - - @Inject - JObjectManager jObjectManager; - - @Inject - PeerManager remoteHostManager; - - @Inject - AutoSyncProcessor autoSyncProcessor; - - @Inject - PersistentPeerDataService persistentPeerDataService; - - @Inject - InvalidationQueueService invalidationQueueService; - - @Inject - ProtoSerializer dataProtoSerializer; - @Inject - ProtoSerializer opProtoSerializer; - - @Inject - OpObjectRegistry opObjectRegistry; - - @Inject - JObjectTxManager jObjectTxManager; - - @Override - @Blocking - public Uni getObject(GetObjectRequest request) { - if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid()))) - throw new StatusRuntimeException(Status.UNAUTHENTICATED); - - Log.info("<-- getObject: " + request.getName() + " from " + request.getSelfUuid()); - - var obj = jObjectManager.get(request.getName()).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND)); - - // Does @Blocking break this? - return Uni.createFrom().emitter(emitter -> { - var replyObj = jObjectTxManager.executeTx(() -> { - // Obj.markSeen before markSeen of its children - obj.markSeen(); - return obj.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (meta, data) -> { - if (meta.isOnlyLocal()) - throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription("Trying to get local-only object")); - if (data == null) { - Log.info("<-- getObject FAIL: " + request.getName() + " from " + request.getSelfUuid()); - throw new StatusRuntimeException(Status.ABORTED.withDescription("Not available locally")); - } - data.extractRefs().forEach(ref -> - jObjectManager.get(ref) - .orElseThrow(() -> new IllegalStateException("Non-hydrated refs for local object?")) - .markSeen()); - - return ApiObject.newBuilder() - .setHeader(obj.getMeta().toRpcHeader()) - .setContent(dataProtoSerializer.serialize(obj.getData())).build(); - }); - }); - var ret = GetObjectReply.newBuilder() - .setSelfUuid(persistentPeerDataService.getSelfUuid().toString()) - .setObject(replyObj).build(); - // TODO: Could this cause problems if we wait for too long? - obj.commitFenceAsync(() -> emitter.complete(ret)); - }); - } - - @Override - @Blocking - public Uni canDelete(CanDeleteRequest request) { - if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid()))) - throw new StatusRuntimeException(Status.UNAUTHENTICATED); - - Log.info("<-- canDelete: " + request.getName() + " from " + request.getSelfUuid()); - - var builder = CanDeleteReply.newBuilder(); - - var obj = jObjectManager.get(request.getName()); - - builder.setSelfUuid(persistentPeerDataService.getSelfUuid().toString()); - builder.setObjName(request.getName()); - - if (obj.isPresent()) try { - boolean tryUpdate = obj.get().runReadLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d) -> { - if (m.isDeleted() && !m.isDeletionCandidate()) - throw new IllegalStateException("Object " + m.getName() + " is deleted but not a deletion candidate"); - builder.setDeletionCandidate(m.isDeletionCandidate()); - builder.addAllReferrers(m.getReferrers()); - return m.isDeletionCandidate() && !m.isDeleted(); - }); - // FIXME -// if (tryUpdate) { -// obj.get().runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> { -// return null; -// }); -// } - } catch (DeletedObjectAccessException dox) { - builder.setDeletionCandidate(true); - } - else { - builder.setDeletionCandidate(true); - } - - var ret = builder.build(); - - if (!ret.getDeletionCandidate()) - for (var rr : request.getOurReferrersList()) - autoSyncProcessor.add(rr); - - return Uni.createFrom().item(ret); - } - - @Override - @Blocking - public Uni indexUpdate(IndexUpdatePush request) { - if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid()))) - throw new StatusRuntimeException(Status.UNAUTHENTICATED); - -// Log.info("<-- indexUpdate: " + request.getHeader().getName()); - return jObjectTxManager.executeTxAndFlush(() -> { - return Uni.createFrom().item(syncHandler.handleRemoteUpdate(request)); - }); - } - - @Override - @Blocking - public Uni opPush(OpPushMsg request) { - if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid()))) - throw new StatusRuntimeException(Status.UNAUTHENTICATED); - - try { - var objs = request.getMsgList().stream().map(opProtoSerializer::deserialize).toList(); - jObjectTxManager.executeTxAndFlush(() -> { - opObjectRegistry.acceptExternalOps(request.getQueueId(), UUID.fromString(request.getSelfUuid()), objs); - }); - } catch (Exception e) { - Log.error(e, e); - throw e; - } - return Uni.createFrom().item(OpPushReply.getDefaultInstance()); - } - - @Override - @Blocking - public Uni ping(PingRequest request) { - if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT); - - return Uni.createFrom().item(PingReply.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).build()); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RpcChannelFactory.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RpcChannelFactory.java deleted file mode 100644 index 3239ec7d..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RpcChannelFactory.java +++ /dev/null @@ -1,69 +0,0 @@ -package com.usatiuk.dhfs.objects.repository; - -import com.usatiuk.dhfs.objects.repository.peertrust.PeerTrustManager; -import io.grpc.ChannelCredentials; -import io.grpc.ManagedChannel; -import io.grpc.TlsChannelCredentials; -import io.grpc.netty.NettyChannelBuilder; -import io.quarkus.runtime.ShutdownEvent; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import jakarta.inject.Inject; - -import javax.net.ssl.KeyManagerFactory; -import java.security.KeyStore; -import java.security.cert.Certificate; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; - -//FIXME: Leaks! -@ApplicationScoped -public class RpcChannelFactory { - @Inject - PersistentPeerDataService persistentPeerDataService; - @Inject - PeerTrustManager peerTrustManager; - private ConcurrentMap _secureChannelCache = new ConcurrentHashMap<>(); - - void shutdown(@Observes @Priority(100000) ShutdownEvent event) { - for (var c : _secureChannelCache.values()) c.shutdownNow(); - } - - private ChannelCredentials getChannelCredentials() { - try { - KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType()); - ks.load(null, null); - - ks.setKeyEntry("clientkey", persistentPeerDataService.getSelfKeypair().getPrivate(), null, new Certificate[]{persistentPeerDataService.getSelfCertificate()}); - - KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - keyManagerFactory.init(ks, null); - - ChannelCredentials creds = TlsChannelCredentials.newBuilder().trustManager(peerTrustManager).keyManager(keyManagerFactory.getKeyManagers()).build(); - return creds; - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - ManagedChannel getSecureChannel(String host, String address, int port) { - var key = new SecureChannelKey(host, address, port); - return _secureChannelCache.computeIfAbsent(key, (k) -> { - return NettyChannelBuilder.forAddress(address, port, getChannelCredentials()).overrideAuthority(host).idleTimeout(10, TimeUnit.SECONDS).build(); - }); - } - - public void dropCache() { - var oldS = _secureChannelCache; - _secureChannelCache = new ConcurrentHashMap<>(); - oldS.values().forEach(ManagedChannel::shutdown); - } - - private record SecureChannelKey(String host, String address, int port) { - } - - private record InsecureChannelKey(String address, int port) { - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java deleted file mode 100644 index aff24f85..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/RpcClientFactory.java +++ /dev/null @@ -1,88 +0,0 @@ -package com.usatiuk.dhfs.objects.repository; - -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import io.quarkus.logging.Log; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.inject.Inject; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; - -// TODO: Dedup this -@ApplicationScoped -public class RpcClientFactory { - @ConfigProperty(name = "dhfs.objects.sync.timeout") - long syncTimeout; - - @Inject - PeerManager remoteHostManager; - - @Inject - RpcChannelFactory rpcChannelFactory; - // FIXME: Leaks! - private ConcurrentMap _objSyncCache = new ConcurrentHashMap<>(); - - public R withObjSyncClient(Collection targets, ObjectSyncClientFunction fn) { - var shuffledList = new ArrayList<>(targets); - Collections.shuffle(shuffledList); - for (UUID target : shuffledList) { - try { - return withObjSyncClient(target, fn); - } catch (StatusRuntimeException e) { - if (e.getStatus().getCode().equals(Status.UNAVAILABLE.getCode())) - Log.debug("Host " + target + " is unreachable: " + e.getMessage()); - else - Log.warn("When calling " + target + " " + e.getMessage()); - } catch (Exception e) { - Log.warn("When calling " + target + " " + e.getMessage()); - } - } - throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("No reachable targets!")); - } - - public R withObjSyncClient(UUID target, ObjectSyncClientFunction fn) { - var hostinfo = remoteHostManager.getTransientState(target); - boolean reachable = remoteHostManager.isReachable(target); - - if (hostinfo.getAddr() == null) - throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("Address for " + target + " not yet known")); - - if (!reachable) - throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("Not known to be reachable: " + target)); - - return withObjSyncClient(target.toString(), hostinfo.getAddr(), hostinfo.getSecurePort(), syncTimeout, fn); - } - - public R withObjSyncClient(String host, String addr, int port, long timeout, ObjectSyncClientFunction fn) { - var key = new ObjSyncStubKey(host, addr, port); - var stub = _objSyncCache.computeIfAbsent(key, (k) -> { - var channel = rpcChannelFactory.getSecureChannel(host, addr, port); - return DhfsObjectSyncGrpcGrpc.newBlockingStub(channel) - .withMaxOutboundMessageSize(Integer.MAX_VALUE) - .withMaxInboundMessageSize(Integer.MAX_VALUE); - - }); - return fn.apply(stub.withDeadlineAfter(timeout, TimeUnit.SECONDS)); - } - - public void dropCache() { - rpcChannelFactory.dropCache(); - _objSyncCache = new ConcurrentHashMap<>(); - } - - @FunctionalInterface - public interface ObjectSyncClientFunction { - R apply(DhfsObjectSyncGrpcGrpc.DhfsObjectSyncGrpcBlockingStub client); - } - - private record ObjSyncStubKey(String host, String address, int port) { - } - -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java deleted file mode 100644 index 136041a8..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/SyncHandler.java +++ /dev/null @@ -1,207 +0,0 @@ -package com.usatiuk.dhfs.objects.repository; - -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.jrepository.JObject; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.jrepository.JObjectManager; -import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; -import com.usatiuk.dhfs.objects.persistence.JObjectDataP; -import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService; -import com.usatiuk.dhfs.objects.repository.opsupport.OpObjectRegistry; -import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; -import io.grpc.Status; -import io.quarkus.logging.Log; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.inject.Instance; -import jakarta.inject.Inject; - -import java.util.HashMap; -import java.util.Objects; -import java.util.Optional; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -@ApplicationScoped -public class SyncHandler { - @Inject - JObjectManager jObjectManager; - @Inject - PeerManager remoteHostManager; - @Inject - RemoteObjectServiceClient remoteObjectServiceClient; - @Inject - InvalidationQueueService invalidationQueueService; - @Inject - Instance conflictResolvers; - @Inject - PersistentPeerDataService persistentPeerDataService; - @Inject - ProtoSerializer dataProtoSerializer; - @Inject - OpObjectRegistry opObjectRegistry; - @Inject - JObjectTxManager jObjectTxManager; - - public void pushInitialResyncObj(UUID host) { - Log.info("Doing initial object push for " + host); - - var objs = jObjectManager.findAll(); - - for (var obj : objs) { - Log.trace("IS: " + obj + " to " + host); - invalidationQueueService.pushInvalidationToOne(host, obj); - } - } - - public void pushInitialResyncOp(UUID host) { - Log.info("Doing initial op push for " + host); - - jObjectTxManager.executeTxAndFlush( - () -> { - opObjectRegistry.pushBootstrapData(host); - } - ); - } - - public void handleOneUpdate(UUID from, ObjectHeader header) { - AtomicReference> foundExt = new AtomicReference<>(); - - boolean conflict = jObjectTxManager.executeTx(() -> { - JObject found = jObjectManager.getOrPut(header.getName(), JObjectData.class, Optional.empty()); - foundExt.set(found); - - var receivedTotalVer = header.getChangelog().getEntriesList() - .stream().map(ObjectChangelogEntry::getVersion).reduce(0L, Long::sum); - - var receivedMap = new HashMap(); - for (var e : header.getChangelog().getEntriesList()) { - receivedMap.put(UUID.fromString(e.getHost()), e.getVersion()); - } - - return found.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (md, data, bump, invalidate) -> { - if (md.getRemoteCopies().getOrDefault(from, 0L) > receivedTotalVer) { - Log.error("Received older index update than was known for host: " - + from + " " + header.getName()); - throw new OutdatedUpdateException(); - } - - String rcv = ""; - for (var e : header.getChangelog().getEntriesList()) { - rcv += e.getHost() + ": " + e.getVersion() + "; "; - } - String ours = ""; - for (var e : md.getChangelog().entrySet()) { - ours += e.getKey() + ": " + e.getValue() + "; "; - } - Log.trace("Handling update: " + header.getName() + " from " + from + "\n" + "ours: " + ours + " \n" + "received: " + rcv); - - boolean updatedRemoteVersion = false; - - var oldRemoteVer = md.getRemoteCopies().put(from, receivedTotalVer); - if (oldRemoteVer == null || !oldRemoteVer.equals(receivedTotalVer)) updatedRemoteVersion = true; - - boolean hasLower = false; - boolean hasHigher = false; - for (var e : Stream.concat(md.getChangelog().keySet().stream(), receivedMap.keySet().stream()).collect(Collectors.toSet())) { - if (receivedMap.getOrDefault(e, 0L) < md.getChangelog().getOrDefault(e, 0L)) - hasLower = true; - if (receivedMap.getOrDefault(e, 0L) > md.getChangelog().getOrDefault(e, 0L)) - hasHigher = true; - } - - if (hasLower && hasHigher) { - Log.info("Conflict on update (inconsistent version): " + header.getName() + " from " + from); - return true; - } - - if (hasLower) { - Log.info("Received older index update than known: " - + from + " " + header.getName()); - throw new OutdatedUpdateException(); - } - - if (hasHigher) { - invalidate.apply(); - md.getChangelog().clear(); - md.getChangelog().putAll(receivedMap); - md.getChangelog().putIfAbsent(persistentPeerDataService.getSelfUuid(), 0L); - if (header.hasPushedData()) - found.externalResolution(dataProtoSerializer.deserialize(header.getPushedData())); - return false; - } else if (data == null && header.hasPushedData()) { - found.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY); - if (found.getData() == null) - found.externalResolution(dataProtoSerializer.deserialize(header.getPushedData())); - } - - assert Objects.equals(receivedTotalVer, md.getOurVersion()); - - if (!updatedRemoteVersion) - Log.debug("No action on update: " + header.getName() + " from " + from); - - return false; - }); - }); - - // TODO: Is the lock gap here ok? - if (conflict) { - Log.info("Trying conflict resolution: " + header.getName() + " from " + from); - var found = foundExt.get(); - - JObjectData theirsData; - ObjectHeader theirsHeader; - if (header.hasPushedData()) { - theirsHeader = header; - theirsData = dataProtoSerializer.deserialize(header.getPushedData()); - } else { - var got = remoteObjectServiceClient.getSpecificObject(from, header.getName()); - theirsData = dataProtoSerializer.deserialize(got.getRight()); - theirsHeader = got.getLeft(); - } - - jObjectTxManager.executeTx(() -> { - var resolverClass = found.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - if (d == null) - throw new StatusRuntimeExceptionNoStacktrace(Status.UNAVAILABLE.withDescription("No local data when conflict " + header.getName())); - return d.getConflictResolver(); - }); - var resolver = conflictResolvers.select(resolverClass); - resolver.get().resolve(from, theirsHeader, theirsData, found); - }); - Log.info("Resolved conflict for " + from + " " + header.getName()); - } - - } - - public IndexUpdateReply handleRemoteUpdate(IndexUpdatePush request) { - // TODO: Dedup - try { - handleOneUpdate(UUID.fromString(request.getSelfUuid()), request.getHeader()); - } catch (OutdatedUpdateException ignored) { - Log.warn("Outdated update of " + request.getHeader().getName() + " from " + request.getSelfUuid()); - invalidationQueueService.pushInvalidationToOne(UUID.fromString(request.getSelfUuid()), request.getHeader().getName()); - } catch (Exception ex) { - Log.info("Error when handling update from " + request.getSelfUuid() + " of " + request.getHeader().getName(), ex); - throw ex; - } - - return IndexUpdateReply.getDefaultInstance(); - } - - protected static class OutdatedUpdateException extends RuntimeException { - OutdatedUpdateException() { - super(); - } - - OutdatedUpdateException(String message) { - super(message); - } - - @Override - public synchronized Throwable fillInStackTrace() { - return this; - } - } -} \ No newline at end of file diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeerState.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeerState.java deleted file mode 100644 index 21843ddc..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeerState.java +++ /dev/null @@ -1,34 +0,0 @@ -package com.usatiuk.dhfs.objects.repository; - -import lombok.AllArgsConstructor; -import lombok.Getter; -import lombok.NoArgsConstructor; -import lombok.Setter; - -@AllArgsConstructor -@NoArgsConstructor -public class TransientPeerState { - @Getter - @Setter - private boolean _reachable = false; - @Getter - @Setter - private String _addr; - @Getter - @Setter - private int _port; - @Getter - @Setter - private int _securePort; - - public TransientPeerState(boolean reachable) { - _reachable = reachable; - } - - public TransientPeerState(TransientPeerState source) { - _reachable = source._reachable; - _addr = source._addr; - _port = source._port; - _securePort = source._securePort; - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersState.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersState.java deleted file mode 100644 index d5773451..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersState.java +++ /dev/null @@ -1,32 +0,0 @@ -package com.usatiuk.dhfs.objects.repository; - -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -public class TransientPeersState { - private final TransientPeersStateData _data = new TransientPeersStateData(); - private final ReadWriteLock _lock = new ReentrantReadWriteLock(); - - public R runReadLocked(TransientPeersStaten fn) { - _lock.readLock().lock(); - try { - return fn.apply(_data); - } finally { - _lock.readLock().unlock(); - } - } - - public R runWriteLocked(TransientPeersStaten fn) { - _lock.writeLock().lock(); - try { - return fn.apply(_data); - } finally { - _lock.writeLock().unlock(); - } - } - - @FunctionalInterface - public interface TransientPeersStaten { - R apply(TransientPeersStateData hostsData); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersStateData.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersStateData.java deleted file mode 100644 index 974dd5d9..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/TransientPeersStateData.java +++ /dev/null @@ -1,22 +0,0 @@ -package com.usatiuk.dhfs.objects.repository; - -import lombok.Getter; - -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.UUID; - - -public class TransientPeersStateData { - - @Getter - private final Map _states = new LinkedHashMap<>(); - - TransientPeerState get(UUID host) { - return _states.computeIfAbsent(host, k -> new TransientPeerState()); - } - - TransientPeerState getCopy(UUID host) { - return new TransientPeerState(get(host)); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/autosync/AutoSyncProcessor.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/autosync/AutoSyncProcessor.java deleted file mode 100644 index 0220c443..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/autosync/AutoSyncProcessor.java +++ /dev/null @@ -1,122 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.autosync; - -import com.usatiuk.dhfs.objects.jrepository.*; -import com.usatiuk.dhfs.objects.repository.peersync.PeerDirectory; -import com.usatiuk.dhfs.objects.repository.peersync.PersistentPeerInfo; -import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue; -import io.quarkus.logging.Log; -import io.quarkus.runtime.ShutdownEvent; -import io.quarkus.runtime.Startup; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import jakarta.inject.Inject; -import org.apache.commons.lang3.concurrent.BasicThreadFactory; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; - -@ApplicationScoped -public class AutoSyncProcessor { - private final HashSetDelayedBlockingQueue _pending = new HashSetDelayedBlockingQueue<>(0); - private final HashSetDelayedBlockingQueue _retries = new HashSetDelayedBlockingQueue<>(10000); //FIXME: - @Inject - JObjectManager jObjectManager; - @ConfigProperty(name = "dhfs.objects.autosync.threads") - int autosyncThreads; - @ConfigProperty(name = "dhfs.objects.autosync.download-all") - boolean downloadAll; - @Inject - ExecutorService executorService; - @Inject - JObjectTxManager jObjectTxManager; - private ExecutorService _autosyncExcecutor; - - @Startup - void init() { - BasicThreadFactory factory = new BasicThreadFactory.Builder() - .namingPattern("autosync-%d") - .build(); - - _autosyncExcecutor = Executors.newFixedThreadPool(autosyncThreads, factory); - for (int i = 0; i < autosyncThreads; i++) { - _autosyncExcecutor.submit(this::autosync); - } - - if (downloadAll) { - jObjectManager.registerMetaWriteListener(JObjectData.class, this::alwaysSaveCallback); - } else { - jObjectManager.registerMetaWriteListener(PersistentPeerInfo.class, this::alwaysSaveCallback); - jObjectManager.registerMetaWriteListener(PeerDirectory.class, this::alwaysSaveCallback); - } - - if (downloadAll) - executorService.submit(() -> { - for (var obj : jObjectManager.findAll()) { - var got = jObjectManager.get(obj); - if (got.isEmpty() || !got.get().getMeta().isHaveLocalCopy()) - add(obj); - } - }); - } - - private void alwaysSaveCallback(JObject obj) { - obj.assertRwLock(); - if (obj.getMeta().isDeleted()) return; - if (obj.getData() != null) return; - if (obj.getMeta().isHaveLocalCopy()) return; - - add(obj.getMeta().getName()); - } - - void shutdown(@Observes @Priority(10) ShutdownEvent event) { - _autosyncExcecutor.shutdownNow(); - } - - public void add(String name) { - _pending.add(name); - } - - private void autosync() { - try { - while (!Thread.interrupted()) { - String name = null; - - while (name == null) { - name = _pending.tryGet(); - if (name == null) - name = _retries.tryGet(); - if (name == null) - name = _pending.get(1000L); //FIXME: - } - - try { - String finalName = name; - jObjectTxManager.executeTx(() -> { - jObjectManager.get(finalName).ifPresent(obj -> { - boolean ok = obj.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, i, v) -> { - if (m.isOnlyLocal()) return true; // FIXME: - if (m.isDeleted()) return true; - if (m.isDeletionCandidate()) return false; - if (obj.getMeta().isHaveLocalCopy()) return true; - return obj.tryResolve(JObjectManager.ResolutionStrategy.REMOTE); - }); - if (!ok) { - Log.debug("Failed downloading object " + obj.getMeta().getName() + ", will retry."); - _retries.add(obj.getMeta().getName()); - } - }); - }); - } catch (DeletedObjectAccessException ignored) { - } catch (Exception e) { - Log.debug("Failed downloading object " + name + ", will retry.", e); - _retries.add(name); - } - } - } catch (InterruptedException ignored) { - } - Log.info("Autosync thread exiting"); - - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java deleted file mode 100644 index 63f3e7a1..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueData.java +++ /dev/null @@ -1,17 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.invalidation; - -import lombok.Getter; -import org.apache.commons.collections4.MultiValuedMap; -import org.apache.commons.collections4.multimap.HashSetValuedHashMap; - -import java.io.Serial; -import java.io.Serializable; -import java.util.UUID; - -public class DeferredInvalidationQueueData implements Serializable { - @Serial - private static final long serialVersionUID = 1L; - - @Getter - private final MultiValuedMap _deferredInvalidations = new HashSetValuedHashMap<>(); -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java deleted file mode 100644 index e62e4d19..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/DeferredInvalidationQueueService.java +++ /dev/null @@ -1,92 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.invalidation; - -import com.usatiuk.dhfs.utils.SerializationHelper; -import com.usatiuk.dhfs.objects.repository.PeerManager; -import io.quarkus.logging.Log; -import io.quarkus.runtime.ShutdownEvent; -import io.quarkus.runtime.StartupEvent; -import io.quarkus.scheduler.Scheduled; -import io.smallrye.common.annotation.Blocking; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import jakarta.inject.Inject; -import org.apache.commons.lang3.SerializationUtils; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.util.UUID; - -import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; - -@ApplicationScoped -public class DeferredInvalidationQueueService { - private static final String dataFileName = "invqueue"; - @Inject - PeerManager remoteHostManager; - @Inject - InvalidationQueueService invalidationQueueService; - @ConfigProperty(name = "dhfs.objects.root") - String dataRoot; - // FIXME: DB when? - private DeferredInvalidationQueueData _persistentData = new DeferredInvalidationQueueData(); - - void init(@Observes @Priority(290) StartupEvent event) throws IOException { - Paths.get(dataRoot).toFile().mkdirs(); - Log.info("Initializing with root " + dataRoot); - if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) { - Log.info("Reading invalidation queue"); - _persistentData = SerializationHelper.deserialize(Files.readAllBytes(Paths.get(dataRoot).resolve(dataFileName))); - } else if (Paths.get(dataRoot).resolve(dataFileName + ".bak").toFile().exists()) { - Log.warn("Reading invalidation queue from backup"); - _persistentData = SerializationHelper.deserialize(Files.readAllBytes(Paths.get(dataRoot).resolve(dataFileName))); - } - remoteHostManager.registerConnectEventListener(this::returnForHost); - } - - void shutdown(@Observes @Priority(300) ShutdownEvent event) throws IOException { - Log.info("Saving deferred invalidations"); - writeData(); - Log.info("Saved deferred invalidations"); - } - - - private void writeData() { - try { - if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) - Files.move(Paths.get(dataRoot).resolve(dataFileName), Paths.get(dataRoot).resolve(dataFileName + ".bak"), REPLACE_EXISTING); - Files.write(Paths.get(dataRoot).resolve(dataFileName), SerializationUtils.serialize(_persistentData)); - } catch (IOException iex) { - Log.error("Error writing deferred invalidations data", iex); - throw new RuntimeException(iex); - } - } - - // FIXME: - @Scheduled(every = "15s", concurrentExecution = Scheduled.ConcurrentExecution.SKIP) - @Blocking - void periodicReturn() { - for (var reachable : remoteHostManager.getAvailableHosts()) - returnForHost(reachable); - } - - void returnForHost(UUID host) { - synchronized (this) { - var col = _persistentData.getDeferredInvalidations().get(host); - for (var s : col) { - Log.trace("Un-deferred invalidation to " + host + " of " + s); - invalidationQueueService.pushDeferredInvalidations(host, s); - } - col.clear(); - } - } - - void defer(UUID host, String object) { - synchronized (this) { - Log.trace("Deferred invalidation to " + host + " of " + object); - _persistentData.getDeferredInvalidations().put(host, object); - } - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueue.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueue.java deleted file mode 100644 index 1a1158b6..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueue.java +++ /dev/null @@ -1,4 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.invalidation; - -public class InvalidationQueue { -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java deleted file mode 100644 index b5424c28..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/invalidation/InvalidationQueueService.java +++ /dev/null @@ -1,180 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.invalidation; - -import com.usatiuk.dhfs.objects.jrepository.DeletedObjectAccessException; -import com.usatiuk.dhfs.objects.jrepository.JObject; -import com.usatiuk.dhfs.objects.jrepository.JObjectManager; -import com.usatiuk.dhfs.objects.repository.PeerManager; -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; -import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; -import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue; -import io.quarkus.logging.Log; -import io.quarkus.runtime.ShutdownEvent; -import io.quarkus.runtime.StartupEvent; -import io.vertx.core.impl.ConcurrentHashSet; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import jakarta.inject.Inject; -import org.apache.commons.lang3.concurrent.BasicThreadFactory; -import org.apache.commons.lang3.tuple.Pair; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.util.UUID; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; - -@ApplicationScoped -public class InvalidationQueueService { - private final HashSetDelayedBlockingQueue> _queue; - private final AtomicReference> _toAllQueue = new AtomicReference<>(new ConcurrentHashSet<>()); - @Inject - PeerManager remoteHostManager; - @Inject - RemoteObjectServiceClient remoteObjectServiceClient; - @Inject - JObjectManager jObjectManager; - @Inject - PersistentPeerDataService persistentPeerDataService; - @Inject - DeferredInvalidationQueueService deferredInvalidationQueueService; - @ConfigProperty(name = "dhfs.objects.invalidation.threads") - int threads; - private ExecutorService _executor; - private volatile boolean _shutdown = false; - - public InvalidationQueueService(@ConfigProperty(name = "dhfs.objects.invalidation.delay") int delay) { - _queue = new HashSetDelayedBlockingQueue<>(delay); - } - - void init(@Observes @Priority(300) StartupEvent event) throws InterruptedException { - BasicThreadFactory factory = new BasicThreadFactory.Builder() - .namingPattern("invalidation-%d") - .build(); - - _executor = Executors.newFixedThreadPool(threads, factory); - - for (int i = 0; i < threads; i++) { - _executor.submit(this::sender); - } - } - - void shutdown(@Observes @Priority(10) ShutdownEvent event) throws InterruptedException { - _shutdown = true; - _executor.shutdownNow(); - if (!_executor.awaitTermination(30, TimeUnit.SECONDS)) { - Log.error("Failed to shut down invalidation sender thread"); - } - var data = _queue.close(); - Log.info("Will defer " + data.size() + " invalidations on shutdown"); - for (var e : data) - deferredInvalidationQueueService.defer(e.getLeft(), e.getRight()); - } - - private void sender() { - while (!_shutdown) { - try { - try { - if (!_queue.hasImmediate()) { - ConcurrentHashSet toAllQueue; - - while (true) { - toAllQueue = _toAllQueue.get(); - if (toAllQueue != null) { - if (_toAllQueue.compareAndSet(toAllQueue, null)) - break; - } else { - break; - } - } - - if (toAllQueue != null) { - var hostInfo = remoteHostManager.getHostStateSnapshot(); - for (var o : toAllQueue) { - for (var h : hostInfo.available()) - _queue.add(Pair.of(h, o)); - for (var u : hostInfo.unavailable()) - deferredInvalidationQueueService.defer(u, o); - } - } - } - - var data = _queue.getAllWait(100, _queue.getDelay()); // TODO: config? - if (data.isEmpty()) continue; - String stats = "Sent invalidation: "; - long success = 0; - - for (var e : data) { - if (!persistentPeerDataService.existsHost(e.getLeft())) continue; - - if (!remoteHostManager.isReachable(e.getLeft())) { - deferredInvalidationQueueService.defer(e.getLeft(), e.getRight()); - continue; - } - - try { - jObjectManager.get(e.getRight()).ifPresent(obj -> { - remoteObjectServiceClient.notifyUpdate(obj, e.getLeft()); - }); - success++; - } catch (DeletedObjectAccessException ignored) { - } catch (Exception ex) { - Log.info("Failed to send invalidation to " + e.getLeft() + ", will retry", ex); - pushInvalidationToOne(e.getLeft(), e.getRight()); - } - if (_shutdown) { - Log.info("Invalidation sender exiting"); - break; - } - } - - stats += success + "/" + data.size() + " "; - Log.info(stats); - } catch (InterruptedException ie) { - throw ie; - } catch (Exception e) { - Log.error("Exception in invalidation sender thread: ", e); - } - } catch (InterruptedException ignored) { - } - } - Log.info("Invalidation sender exiting"); - } - - public void pushInvalidationToAll(JObject obj) { - if (obj.getMeta().isOnlyLocal()) return; - while (true) { - var queue = _toAllQueue.get(); - if (queue == null) { - var nq = new ConcurrentHashSet(); - if (!_toAllQueue.compareAndSet(null, nq)) continue; - queue = nq; - } - - queue.add(obj.getMeta().getName()); - - if (_toAllQueue.get() == queue) break; - } - } - - public void pushInvalidationToOne(UUID host, JObject obj) { - if (obj.getMeta().isOnlyLocal()) return; - if (remoteHostManager.isReachable(host)) - _queue.add(Pair.of(host, obj.getMeta().getName())); - else - deferredInvalidationQueueService.defer(host, obj.getMeta().getName()); - } - - public void pushInvalidationToAll(String name) { - pushInvalidationToAll(jObjectManager.get(name).orElseThrow(() -> new IllegalArgumentException("Object " + name + " not found"))); - } - - public void pushInvalidationToOne(UUID host, String name) { - pushInvalidationToOne(host, jObjectManager.get(name).orElseThrow(() -> new IllegalArgumentException("Object " + name + " not found"))); - } - - protected void pushDeferredInvalidations(UUID host, String name) { - _queue.add(Pair.of(host, name)); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/Op.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/Op.java deleted file mode 100644 index 4feec145..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/Op.java +++ /dev/null @@ -1,11 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.opsupport; - -import com.usatiuk.autoprotomap.runtime.ProtoMirror; -import com.usatiuk.dhfs.objects.repository.OpPushPayload; - -import java.util.Collection; - -@ProtoMirror(OpPushPayload.class) -public interface Op { - Collection getEscapedRefs(); -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObject.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObject.java deleted file mode 100644 index edc13484..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObject.java +++ /dev/null @@ -1,22 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.opsupport; - -import java.util.List; -import java.util.UUID; - -public interface OpObject { - String getId(); - - boolean hasPendingOpsForHost(UUID host); - - List getPendingOpsForHost(UUID host, int limit); - - void commitOpForHost(UUID host, Op op); - - void pushBootstrap(UUID host); - - boolean acceptExternalOp(UUID from, Op op); - - Op getPeriodicPushOp(); - - void addToTx(); -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObjectRegistry.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObjectRegistry.java deleted file mode 100644 index d62de3bf..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpObjectRegistry.java +++ /dev/null @@ -1,47 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.opsupport; - -import com.usatiuk.dhfs.objects.repository.PeerManager; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.inject.Inject; - -import java.util.List; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; - -@ApplicationScoped -public class OpObjectRegistry { - private final ConcurrentHashMap _objects = new ConcurrentHashMap<>(); - @Inject - OpSender opSender; - @Inject - PeerManager remoteHostManager; - - public void registerObject(OpObject obj) { - _objects.put(obj.getId(), obj); - remoteHostManager.registerConnectEventListener(host -> { - opSender.push(obj); - }); - } - - public void acceptExternalOps(String objId, UUID from, List ops) { - var got = _objects.get(objId); - if (got == null) - throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("Queue with id " + objId + " not registered")); - got.addToTx(); - boolean push = false; - for (Op op : ops) - push |= got.acceptExternalOp(from, op); - if (push) - opSender.push(got); - } - - public void pushBootstrapData(UUID host) { - for (var o : _objects.values()) { - // FIXME: Split transactions for objects? - o.addToTx(); - o.pushBootstrap(host); - } - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpSender.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpSender.java deleted file mode 100644 index 3bf3b647..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/opsupport/OpSender.java +++ /dev/null @@ -1,109 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.opsupport; - -import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; -import com.usatiuk.dhfs.objects.repository.PeerManager; -import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; -import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue; -import io.quarkus.logging.Log; -import io.quarkus.runtime.ShutdownEvent; -import io.quarkus.runtime.Startup; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import jakarta.inject.Inject; -import org.apache.commons.lang3.concurrent.BasicThreadFactory; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.util.List; -import java.util.UUID; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; - -@ApplicationScoped -public class OpSender { - private static final int _threads = 1; - private final HashSetDelayedBlockingQueue _queue = new HashSetDelayedBlockingQueue<>(0); // FIXME: - @Inject - PeerManager remoteHostManager; - @Inject - RemoteObjectServiceClient remoteObjectServiceClient; - @Inject - JObjectTxManager jObjectTxManager; - @ConfigProperty(name = "dhfs.objects.opsender.batch-size") - int batchSize; - private ExecutorService _executor; - private volatile boolean _shutdown = false; - - @Startup - void init() { - BasicThreadFactory factory = new BasicThreadFactory.Builder() - .namingPattern("opsender-%d") - .build(); - - _executor = Executors.newFixedThreadPool(_threads, factory); - - for (int i = 0; i < _threads; i++) { - _executor.submit(this::sender); - } - } - - void shutdown(@Observes @Priority(10) ShutdownEvent event) throws InterruptedException { - _shutdown = true; - _executor.shutdownNow(); - if (!_executor.awaitTermination(30, TimeUnit.SECONDS)) { - Log.error("Failed to shut down op sender thread"); - } - } - - private void sender() { - while (!_shutdown) { - try { - var got = _queue.get(); - for (var h : remoteHostManager.getAvailableHosts()) { - sendForHost(got, h); - } - } catch (InterruptedException ignored) { - } catch (Throwable ex) { - Log.error("In op sender: ", ex); - } - } - } - - void sendForHost(OpObject obj, UUID host) { - // Must be peeked before getPendingOpForHost - var periodicPushOp = obj.getPeriodicPushOp(); - - if (!obj.hasPendingOpsForHost(host)) { - if (periodicPushOp == null) return; - try { - remoteObjectServiceClient.pushOps(List.of(periodicPushOp), obj.getId(), host); - Log.debug("Sent periodic op update to " + host + "of" + obj.getId()); - } catch (Throwable e) { - Log.warn("Error pushing periodic op for " + host + " of " + obj.getId(), e); - } - return; - } - - while (obj.hasPendingOpsForHost(host)) { - List collected = obj.getPendingOpsForHost(host, batchSize); - try { - // The peer should finish the call only if it had persisted everything - remoteObjectServiceClient.pushOps(collected, obj.getId(), host); - // If we crash here, it's ok, the peer will just skip these ops the next time we send them - jObjectTxManager.executeTx(() -> { - obj.addToTx(); - for (var op : collected) - obj.commitOpForHost(host, op); - }); - Log.info("Sent " + collected.size() + " op updates to " + host + "of" + obj.getId()); - } catch (Throwable e) { - Log.warn("Error sending op to " + host, e); - } - } - } - - public void push(OpObject queue) { - _queue.readd(queue); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryBroadcaster.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryBroadcaster.java deleted file mode 100644 index 07443419..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryBroadcaster.java +++ /dev/null @@ -1,117 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.peerdiscovery; - -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; -import io.quarkus.arc.properties.IfBuildProperty; -import io.quarkus.logging.Log; -import io.quarkus.runtime.ShutdownEvent; -import io.quarkus.runtime.Startup; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import jakarta.inject.Inject; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.net.*; - -@ApplicationScoped -@IfBuildProperty(name = "dhfs.local-discovery", stringValue = "true") -public class LocalPeerDiscoveryBroadcaster { - - @Inject - PersistentPeerDataService persistentPeerDataService; - - @ConfigProperty(name = "quarkus.http.port") - Integer ourPort; - - @ConfigProperty(name = "quarkus.http.ssl-port") - Integer ourSecurePort; - - @ConfigProperty(name = "dhfs.objects.peerdiscovery.port") - Integer broadcastPort; - - @ConfigProperty(name = "dhfs.objects.peerdiscovery.interval") - Integer broadcastInterval; - - private Thread _broadcasterThread; - - private DatagramSocket _socket; - - @Startup - void init() throws SocketException { - _socket = new DatagramSocket(); - _socket.setBroadcast(true); - - _broadcasterThread = new Thread(this::broadcast); - _broadcasterThread.setName("LocalPeerDiscoveryBroadcaster"); - _broadcasterThread.start(); - } - - void shutdown(@Observes @Priority(10) ShutdownEvent event) { - _socket.close(); - _broadcasterThread.interrupt(); - while (_broadcasterThread.isAlive()) { - try { - _broadcasterThread.join(); - } catch (InterruptedException ignored) { - } - } - } - - private void broadcast() { - try { - while (!Thread.interrupted() && !_socket.isClosed()) { - Thread.sleep(broadcastInterval); - - try { - var sendData = PeerDiscoveryInfo.newBuilder() - .setUuid(persistentPeerDataService.getSelfUuid().toString()) - .setPort(ourPort) - .setSecurePort(ourSecurePort) - .build(); - - var sendBytes = sendData.toByteArray(); - - DatagramPacket sendPacket - = new DatagramPacket(sendBytes, sendBytes.length, - InetAddress.getByName("255.255.255.255"), broadcastPort); - - _socket.send(sendPacket); - - var interfaces = NetworkInterface.getNetworkInterfaces(); - while (interfaces.hasMoreElements()) { - NetworkInterface networkInterface = interfaces.nextElement(); - - try { - if (networkInterface.isLoopback() || !networkInterface.isUp()) { - continue; - } - } catch (Exception e) { - continue; - } - - for (InterfaceAddress interfaceAddress : networkInterface.getInterfaceAddresses()) { - InetAddress broadcast = interfaceAddress.getBroadcast(); - if (broadcast == null) { - continue; - } - - try { - sendPacket = new DatagramPacket(sendBytes, sendBytes.length, broadcast, broadcastPort); - _socket.send(sendPacket); - } catch (Exception ignored) { - continue; - } - -// Log.trace(getClass().getName() + "Broadcast sent to: " + broadcast.getHostAddress() -// + ", at: " + networkInterface.getDisplayName()); - } - } - - } catch (Exception ignored) { - } - } - } catch (InterruptedException ignored) { - } - Log.info("PeerDiscoveryServer stopped"); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryClient.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryClient.java deleted file mode 100644 index c46ccee1..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peerdiscovery/LocalPeerDiscoveryClient.java +++ /dev/null @@ -1,73 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.peerdiscovery; - -import com.google.protobuf.InvalidProtocolBufferException; -import com.usatiuk.dhfs.objects.repository.PeerManager; -import io.quarkus.arc.properties.IfBuildProperty; -import io.quarkus.logging.Log; -import io.quarkus.runtime.ShutdownEvent; -import io.quarkus.runtime.Startup; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import jakarta.inject.Inject; - -import java.net.*; -import java.nio.ByteBuffer; -import java.util.UUID; - -@ApplicationScoped -@IfBuildProperty(name = "dhfs.local-discovery", stringValue = "true") -public class LocalPeerDiscoveryClient { - - @Inject - PeerManager remoteHostManager; - - private Thread _clientThread; - - private DatagramSocket _socket; - - @Startup - void init() throws SocketException, UnknownHostException { - _socket = new DatagramSocket(42069, InetAddress.getByName("0.0.0.0")); - _socket.setBroadcast(true); - - _clientThread = new Thread(this::client); - _clientThread.setName("LocalPeerDiscoveryClient"); - _clientThread.start(); - } - - void shutdown(@Observes @Priority(10) ShutdownEvent event) throws InterruptedException { - _socket.close(); - _clientThread.interrupt(); - _clientThread.interrupt(); - while (_clientThread.isAlive()) { - try { - _clientThread.join(); - } catch (InterruptedException ignored) { - } - } - } - - private void client() { - while (!Thread.interrupted() && !_socket.isClosed()) { - try { - byte[] buf = new byte[10000]; - DatagramPacket packet = new DatagramPacket(buf, buf.length); - _socket.receive(packet); - - try { - var got = PeerDiscoveryInfo.parseFrom(ByteBuffer.wrap(buf, 0, packet.getLength())); - - remoteHostManager.notifyAddr(UUID.fromString(got.getUuid()), packet.getAddress().getHostAddress(), got.getPort(), got.getSecurePort()); - - } catch (InvalidProtocolBufferException e) { - continue; - } - } catch (Exception ex) { - Log.error(ex); - } - } - Log.info("PeerDiscoveryClient stopped"); - } - -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectory.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectory.java deleted file mode 100644 index 6479f13e..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectory.java +++ /dev/null @@ -1,46 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.peersync; - -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.jrepository.PushResolution; -import com.usatiuk.dhfs.objects.repository.ConflictResolver; -import lombok.Getter; - -import java.io.Serial; -import java.util.Collection; -import java.util.LinkedHashSet; -import java.util.Set; -import java.util.UUID; - -@PushResolution -public class PeerDirectory extends JObjectData { - public static final String PeerDirectoryObjName = "peer_directory"; - @Serial - private static final long serialVersionUID = 1; - @Getter - private final Set _peers = new LinkedHashSet<>(); - - @Override - public String getName() { - return PeerDirectoryObjName; - } - - @Override - public Class getConflictResolver() { - return PeerDirectoryConflictResolver.class; - } - - @Override - public Class getRefType() { - return PersistentPeerInfo.class; - } - - @Override - public Collection extractRefs() { - return _peers.stream().map(PersistentPeerInfo::getNameFromUuid).toList(); - } - - @Override - public int estimateSize() { - return _peers.size() * 32; - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryConflictResolver.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryConflictResolver.java deleted file mode 100644 index 4c80234b..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryConflictResolver.java +++ /dev/null @@ -1,73 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.peersync; - -import com.usatiuk.dhfs.objects.jrepository.JObject; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.jrepository.JObjectManager; -import com.usatiuk.dhfs.objects.repository.ConflictResolver; -import com.usatiuk.dhfs.objects.repository.ObjectHeader; -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; -import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import io.quarkus.logging.Log; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.inject.Inject; -import org.apache.commons.lang3.NotImplementedException; - -import java.util.*; - -@ApplicationScoped -public class PeerDirectoryConflictResolver implements ConflictResolver { - @Inject - PersistentPeerDataService persistentPeerDataService; - - @Inject - RemoteObjectServiceClient remoteObjectServiceClient; - - @Inject - JObjectManager jObjectManager; - - @Override - public void resolve(UUID conflictHost, ObjectHeader theirsHeader, JObjectData theirsData, JObject ours) { - var theirsDir = (PeerDirectory) theirsData; - if (!theirsDir.getClass().equals(PeerDirectory.class)) { - Log.error("Object type mismatch!"); - throw new NotImplementedException(); - } - - ours.runWriteLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, oursDirU, bump, invalidate) -> { - if (oursDirU == null) - throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict but we don't have local copy")); - if (!(oursDirU instanceof PeerDirectory oursPD)) - throw new NotImplementedException("Type conflict for " + ours.getMeta().getName() + ", directory was expected"); - - LinkedHashSet mergedChildren = new LinkedHashSet<>(oursPD.getPeers()); - mergedChildren.addAll(theirsDir.getPeers()); - Map newChangelog = new LinkedHashMap<>(m.getChangelog()); - - for (var entry : theirsHeader.getChangelog().getEntriesList()) { - newChangelog.merge(UUID.fromString(entry.getHost()), entry.getVersion(), Long::max); - } - - boolean wasChanged = oursPD.getPeers().size() != mergedChildren.size(); - - if (m.getBestVersion() > newChangelog.values().stream().reduce(0L, Long::sum)) - throw new StatusRuntimeException(Status.ABORTED.withDescription("Race when conflict resolving")); - - if (wasChanged) { - newChangelog.merge(persistentPeerDataService.getSelfUuid(), 1L, Long::sum); - - for (var child : mergedChildren) { - if (!oursPD.getPeers().contains(child)) { - jObjectManager.getOrPut(PersistentPeerInfo.getNameFromUuid(child), PersistentPeerInfo.class, Optional.of(oursPD.getName())); - } - } - - oursPD.getPeers().addAll(mergedChildren); - } - - m.setChangelog(newChangelog); - return null; - }); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocal.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocal.java deleted file mode 100644 index e3e54994..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocal.java +++ /dev/null @@ -1,27 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.peersync; - -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.jrepository.OnlyLocal; -import lombok.Getter; - -import java.util.HashSet; -import java.util.UUID; - -@OnlyLocal -public class PeerDirectoryLocal extends JObjectData { - public static final String PeerDirectoryLocalObjName = "peer_directory_local"; - @Getter - private final HashSet _initialOpSyncDone = new HashSet<>(); - @Getter - private final HashSet _initialObjSyncDone = new HashSet<>(); - - @Override - public String getName() { - return PeerDirectoryLocalObjName; - } - - @Override - public int estimateSize() { - return 1024; //FIXME: - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocalSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocalSerializer.java deleted file mode 100644 index ba2650e4..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectoryLocalSerializer.java +++ /dev/null @@ -1,26 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.peersync; - -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.persistence.PeerDirectoryLocalP; -import jakarta.inject.Singleton; - -import java.util.Objects; -import java.util.UUID; - -@Singleton -public class PeerDirectoryLocalSerializer implements ProtoSerializer { - @Override - public PeerDirectoryLocal deserialize(PeerDirectoryLocalP message) { - var ret = new PeerDirectoryLocal(); - ret.getInitialOpSyncDone().addAll(message.getInitialOpSyncDonePeersList().stream().map(UUID::fromString).toList()); - ret.getInitialObjSyncDone().addAll(message.getInitialObjSyncDonePeersList().stream().map(UUID::fromString).toList()); - return ret; - } - - @Override - public PeerDirectoryLocalP serialize(PeerDirectoryLocal object) { - return PeerDirectoryLocalP.newBuilder() - .addAllInitialObjSyncDonePeers(() -> object.getInitialObjSyncDone().stream().map(Objects::toString).iterator()) - .addAllInitialOpSyncDonePeers(() -> object.getInitialOpSyncDone().stream().map(Objects::toString).iterator()).build(); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectorySerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectorySerializer.java deleted file mode 100644 index da83117b..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerDirectorySerializer.java +++ /dev/null @@ -1,23 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.peersync; - -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.persistence.PeerDirectoryP; -import jakarta.inject.Singleton; - -import java.util.Objects; -import java.util.UUID; - -@Singleton -public class PeerDirectorySerializer implements ProtoSerializer { - @Override - public PeerDirectory deserialize(PeerDirectoryP message) { - var ret = new PeerDirectory(); - message.getPeersList().stream().map(UUID::fromString).forEach(ret.getPeers()::add); - return ret; - } - - @Override - public PeerDirectoryP serialize(PeerDirectory object) { - return PeerDirectoryP.newBuilder().addAllPeers(() -> object.getPeers().stream().map(Objects::toString).iterator()).build(); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java deleted file mode 100644 index e51e4d02..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerInfo.java +++ /dev/null @@ -1,4 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.peersync; - -public record PeerInfo(String selfUuid, String cert) { -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApi.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApi.java deleted file mode 100644 index e030b030..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApi.java +++ /dev/null @@ -1,26 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.peersync; - -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; -import jakarta.inject.Inject; -import jakarta.ws.rs.GET; -import jakarta.ws.rs.Path; - -import java.security.cert.CertificateEncodingException; -import java.util.Base64; - -@Path("/peer-info") -public class PeerSyncApi { - @Inject - PersistentPeerDataService persistentPeerDataService; - - @Path("self") - @GET - public PeerInfo getSelfInfo() { - try { - return new PeerInfo(persistentPeerDataService.getSelfUuid().toString(), - Base64.getEncoder().encodeToString(persistentPeerDataService.getSelfCertificate().getEncoded())); - } catch (CertificateEncodingException e) { - throw new RuntimeException(e); - } - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClient.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClient.java deleted file mode 100644 index bb96a480..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClient.java +++ /dev/null @@ -1,11 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.peersync; - -import jakarta.ws.rs.GET; -import jakarta.ws.rs.Path; - -@Path("/peer-info") -public interface PeerSyncApiClient { - @Path("self") - @GET - PeerInfo getSelfInfo(); -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClientDynamic.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClientDynamic.java deleted file mode 100644 index f0d39caa..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PeerSyncApiClientDynamic.java +++ /dev/null @@ -1,19 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.peersync; - -import io.quarkus.rest.client.reactive.QuarkusRestClientBuilder; -import jakarta.enterprise.context.ApplicationScoped; - -import java.net.URI; -import java.util.concurrent.TimeUnit; - -@ApplicationScoped -public class PeerSyncApiClientDynamic { - public PeerInfo getSelfInfo(String addr, int port) { - var client = QuarkusRestClientBuilder.newBuilder() - .baseUri(URI.create("http://" + addr + ":" + port)) - .connectTimeout(5, TimeUnit.SECONDS) - .readTimeout(5, TimeUnit.SECONDS) - .build(PeerSyncApiClient.class); - return client.getSelfInfo(); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfo.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfo.java deleted file mode 100644 index 81f7e29b..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfo.java +++ /dev/null @@ -1,48 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.peersync; - -import com.usatiuk.dhfs.files.conflicts.NoOpConflictResolver; -import com.usatiuk.dhfs.objects.jrepository.AssumedUnique; -import com.usatiuk.dhfs.objects.jrepository.JObjectData; -import com.usatiuk.dhfs.objects.jrepository.PushResolution; -import com.usatiuk.dhfs.objects.repository.ConflictResolver; -import lombok.AllArgsConstructor; -import lombok.EqualsAndHashCode; -import lombok.Getter; - -import java.io.Serial; -import java.security.cert.X509Certificate; -import java.util.Collection; -import java.util.List; -import java.util.UUID; - -@Getter -@AllArgsConstructor -@EqualsAndHashCode(callSuper = false) -@PushResolution -@AssumedUnique -public class PersistentPeerInfo extends JObjectData { - @Serial - private static final long serialVersionUID = 1; - - private final UUID _uuid; - private final X509Certificate _certificate; - - public static String getNameFromUuid(UUID uuid) { - return "peer_" + uuid; - } - - @Override - public String getName() { - return getNameFromUuid(_uuid); - } - - @Override - public Class getConflictResolver() { - return NoOpConflictResolver.class; - } - - @Override - public Collection extractRefs() { - return List.of(); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfoSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfoSerializer.java deleted file mode 100644 index 924fbc6d..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peersync/PersistentPeerInfoSerializer.java +++ /dev/null @@ -1,38 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.peersync; - -import com.google.protobuf.UnsafeByteOperations; -import com.usatiuk.autoprotomap.runtime.ProtoSerializer; -import com.usatiuk.dhfs.objects.persistence.PersistentPeerInfoP; -import com.usatiuk.dhfs.objects.repository.CertificateTools; -import jakarta.inject.Singleton; - -import java.security.cert.CertificateEncodingException; -import java.security.cert.CertificateException; -import java.util.UUID; - -@Singleton -public class PersistentPeerInfoSerializer implements ProtoSerializer { - @Override - public PersistentPeerInfo deserialize(PersistentPeerInfoP message) { - try { - return new PersistentPeerInfo( - UUID.fromString(message.getUuid()), - CertificateTools.certFromBytes(message.getCert().toByteArray()) - ); - } catch (CertificateException e) { - throw new RuntimeException(e); - } - } - - @Override - public PersistentPeerInfoP serialize(PersistentPeerInfo object) { - try { - return PersistentPeerInfoP.newBuilder() - .setUuid(object.getUuid().toString()) - .setCert(UnsafeByteOperations.unsafeWrap(object.getCertificate().getEncoded())) - .build(); - } catch (CertificateEncodingException e) { - throw new RuntimeException(e); - } - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerRolesAugmentor.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerRolesAugmentor.java deleted file mode 100644 index 2d3914f0..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerRolesAugmentor.java +++ /dev/null @@ -1,51 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.peertrust; - -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; -import io.quarkus.logging.Log; -import io.quarkus.security.credential.CertificateCredential; -import io.quarkus.security.identity.AuthenticationRequestContext; -import io.quarkus.security.identity.SecurityIdentity; -import io.quarkus.security.identity.SecurityIdentityAugmentor; -import io.quarkus.security.runtime.QuarkusSecurityIdentity; -import io.smallrye.mutiny.Uni; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.inject.Inject; - -import java.util.UUID; -import java.util.function.Supplier; - -@ApplicationScoped -public class PeerRolesAugmentor implements SecurityIdentityAugmentor { - @Inject - PersistentPeerDataService persistentPeerDataService; - - @Override - public Uni augment(SecurityIdentity identity, AuthenticationRequestContext context) { - return Uni.createFrom().item(build(identity)); - } - - private Supplier build(SecurityIdentity identity) { - if (identity.isAnonymous()) { - return () -> identity; - } else { - QuarkusSecurityIdentity.Builder builder = QuarkusSecurityIdentity.builder(identity); - - var uuid = identity.getPrincipal().getName().substring(3); - - try { - var entry = persistentPeerDataService.getHost(UUID.fromString(uuid)); - - if (!entry.getCertificate().equals(identity.getCredential(CertificateCredential.class).getCertificate())) { - Log.error("Certificate mismatch for " + uuid); - return () -> identity; - } - - builder.addRole("cluster-member"); - return builder::build; - } catch (Exception e) { - Log.error("Error when checking certificate for " + uuid, e); - return () -> identity; - } - } - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustManager.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustManager.java deleted file mode 100644 index ae0d8359..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustManager.java +++ /dev/null @@ -1,71 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.peertrust; - -import com.usatiuk.dhfs.objects.repository.peersync.PersistentPeerInfo; -import io.quarkus.logging.Log; -import jakarta.enterprise.context.ApplicationScoped; -import org.apache.commons.lang3.tuple.Pair; - -import javax.net.ssl.TrustManager; -import javax.net.ssl.TrustManagerFactory; -import javax.net.ssl.X509TrustManager; -import java.io.IOException; -import java.security.KeyStore; -import java.security.KeyStoreException; -import java.security.NoSuchAlgorithmException; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import java.util.Collection; -import java.util.concurrent.atomic.AtomicReference; - -@ApplicationScoped -public class PeerTrustManager implements X509TrustManager { - private final AtomicReference trustManager = new AtomicReference<>(); - - @Override - public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException { - trustManager.get().checkClientTrusted(chain, authType); - } - - @Override - public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException { - trustManager.get().checkServerTrusted(chain, authType); - } - - @Override - public X509Certificate[] getAcceptedIssuers() { - return trustManager.get().getAcceptedIssuers(); - } - - public synchronized void reloadTrustManagerHosts(Collection hosts) { - try { - Log.info("Trying to reload trust manager: " + hosts.size() + " known hosts"); - reloadTrustManager(hosts.stream().map(hostInfo -> - Pair.of(hostInfo.getUuid().toString(), hostInfo.getCertificate())).toList()); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - private synchronized void reloadTrustManager(Collection> certs) throws KeyStoreException, NoSuchAlgorithmException, CertificateException, IOException { - KeyStore ts = KeyStore.getInstance(KeyStore.getDefaultType()); - ts.load(null, null); - - for (var cert : certs) { - ts.setCertificateEntry(cert.getLeft(), cert.getRight()); - } - - TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); - tmf.init(ts); - - TrustManager[] tms = tmf.getTrustManagers(); - for (var tm : tms) { - if (tm instanceof X509TrustManager) { - trustManager.set((X509TrustManager) tm); - return; - } - } - - throw new NoSuchAlgorithmException("No X509TrustManager in TrustManagerFactory"); - } - -} \ No newline at end of file diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustServerCustomizer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustServerCustomizer.java deleted file mode 100644 index 167465f6..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/peertrust/PeerTrustServerCustomizer.java +++ /dev/null @@ -1,44 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.peertrust; - - -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; -import io.quarkus.vertx.http.HttpServerOptionsCustomizer; -import io.vertx.core.http.HttpServerOptions; -import io.vertx.core.net.KeyCertOptions; -import io.vertx.core.net.TrustOptions; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.inject.Inject; - -import javax.net.ssl.KeyManagerFactory; -import java.security.KeyStore; -import java.security.cert.Certificate; - -@ApplicationScoped -public class PeerTrustServerCustomizer implements HttpServerOptionsCustomizer { - - @Inject - PeerTrustManager peerTrustManager; - - @Inject - PersistentPeerDataService persistentPeerDataService; - - @Override - public void customizeHttpsServer(HttpServerOptions options) { - try { - KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType()); - ks.load(null, null); - - ks.setKeyEntry("sslkey", - persistentPeerDataService.getSelfKeypair().getPrivate(), null, - new Certificate[]{persistentPeerDataService.getSelfCertificate()}); - - KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - keyManagerFactory.init(ks, null); - - options.setKeyCertOptions(KeyCertOptions.wrap(keyManagerFactory)); - options.setTrustOptions(TrustOptions.wrap(peerTrustManager)); - } catch (Exception e) { - throw new RuntimeException("Error configuring https: ", e); - } - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/FileObjectPersistentStore.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/FileObjectPersistentStore.java deleted file mode 100644 index 493a8323..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/FileObjectPersistentStore.java +++ /dev/null @@ -1,541 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.persistence; - -import com.google.protobuf.ByteString; -import com.google.protobuf.CodedOutputStream; -import com.google.protobuf.UnsafeByteOperations; -import com.usatiuk.dhfs.utils.SerializationHelper; -import com.usatiuk.dhfs.objects.persistence.JObjectDataP; -import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP; -import com.usatiuk.dhfs.supportlib.DhfsSupport; -import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer; -import com.usatiuk.dhfs.utils.ByteUtils; -import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import io.quarkus.logging.Log; -import io.quarkus.runtime.ShutdownEvent; -import io.quarkus.runtime.StartupEvent; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import net.openhft.hashing.LongHashFunction; -import org.apache.commons.lang3.concurrent.BasicThreadFactory; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import javax.annotation.Nonnull; -import java.io.*; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.nio.file.Files; -import java.nio.file.NoSuchFileException; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.Objects; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; - -import static java.nio.file.StandardCopyOption.ATOMIC_MOVE; -import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; - -// File format: -// 64-bit metadata serialized size -// 64-bit offset of "rest of" metadata (if -1 then file has no data, -// if 0 then file has data and metadata fits into META_BLOCK_SIZE) -// Until META_BLOCK_SIZE - metadata (encoded as ObjectMetadataP) -// data (encoded as JObjectDataP) -// rest of metadata - -@ApplicationScoped -public class FileObjectPersistentStore implements ObjectPersistentStore { - private final int META_BLOCK_SIZE = DhfsSupport.PAGE_SIZE; - private final Path _root; - private final Path _txManifest; - private ExecutorService _flushExecutor; - private RandomAccessFile _txFile; - private volatile boolean _ready = false; - - public FileObjectPersistentStore(@ConfigProperty(name = "dhfs.objects.persistence.files.root") String root) { - this._root = Path.of(root).resolve("objects"); - _txManifest = Path.of(root).resolve("cur-tx-manifest"); - } - - void init(@Observes @Priority(100) StartupEvent event) throws IOException { - if (!_root.toFile().exists()) { - Log.info("Initializing with root " + _root); - _root.toFile().mkdirs(); - for (int i = 0; i < 256; i++) { - _root.resolve(String.valueOf(i)).toFile().mkdirs(); - } - } - if (!Files.exists(_txManifest)) { - Files.createFile(_txManifest); - } - _txFile = new RandomAccessFile(_txManifest.toFile(), "rw"); - { - BasicThreadFactory factory = new BasicThreadFactory.Builder() - .namingPattern("persistent-commit-%d") - .build(); - - _flushExecutor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(), factory); - } - - tryReplay(); - Log.info("Transaction replay done"); - _ready = true; - } - - void shutdown(@Observes @Priority(900) ShutdownEvent event) throws IOException { - _ready = false; - Log.debug("Deleting manifest file"); - _txFile.close(); - Files.delete(_txManifest); - Log.debug("Manifest file deleted"); - } - - private void verifyReady() { - if (!_ready) throw new IllegalStateException("Wrong service order!"); - } - - private void tryReplay() { - var read = readTxManifest(); - if (read != null) - commitTxImpl(read, false); - } - - private Path getObjPath(@Nonnull String obj) { - int h = Objects.hash(obj); - int p1 = h & 0b00000000_00000000_11111111_00000000; - return _root.resolve(String.valueOf(p1 >> 8)).resolve(obj); - } - - private Path getTmpObjPath(@Nonnull String obj) { - int h = Objects.hash(obj); - int p1 = h & 0b00000000_00000000_11111111_00000000; - return _root.resolve(String.valueOf(p1 >> 8)).resolve(obj + ".tmp"); - } - - private void findAllObjectsImpl(Collection out, Path path) { - var read = path.toFile().listFiles(); - if (read == null) return; - - for (var s : read) { - if (s.isDirectory()) { - findAllObjectsImpl(out, s.toPath()); - } else { - if (s.getName().endsWith(".tmp")) continue; // FIXME: - out.add(s.getName()); - } - } - } - - @Nonnull - @Override - public Collection findAllObjects() { - verifyReady(); - ArrayList out = new ArrayList<>(); - findAllObjectsImpl(out, _root); - return Collections.unmodifiableCollection(out); - } - - @Nonnull - @Override - public JObjectDataP readObject(String name) { - verifyReady(); - var path = getObjPath(name); - try (var rf = new RandomAccessFile(path.toFile(), "r")) { - var longBuf = new byte[8]; - rf.seek(8); - rf.readFully(longBuf); - int metaOff = Math.toIntExact(ByteUtils.bytesToLong(longBuf)); - - if (metaOff < 0) - throw new StatusRuntimeException(Status.NOT_FOUND); - - int toRead; - - if (metaOff > 0) - toRead = metaOff - META_BLOCK_SIZE; - else - toRead = Math.toIntExact(rf.length()) - META_BLOCK_SIZE; - - rf.seek(META_BLOCK_SIZE); - - ByteBuffer buf = UninitializedByteBuffer.allocateUninitialized(toRead); - fillBuffer(buf, rf.getChannel()); - buf.flip(); - - var bs = UnsafeByteOperations.unsafeWrap(buf); - // This way, the input will be considered "immutable" which would allow avoiding copies - // when parsing byte arrays - var ch = bs.newCodedInput(); - ch.enableAliasing(true); - return JObjectDataP.parseFrom(ch); - } catch (EOFException | FileNotFoundException | NoSuchFileException fx) { - throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND); - } catch (IOException e) { - Log.error("Error reading file " + path, e); - throw new StatusRuntimeExceptionNoStacktrace(Status.INTERNAL); - } - } - - @Nonnull - @Override - public ObjectMetadataP readObjectMeta(String name) { - verifyReady(); - var path = getObjPath(name); - try (var rf = new RandomAccessFile(path.toFile(), "r")) { - int len = Math.toIntExact(rf.length()); - var buf = UninitializedByteBuffer.allocateUninitialized(META_BLOCK_SIZE); - fillBuffer(buf, rf.getChannel()); - - buf.flip(); - int metaSize = Math.toIntExact(buf.getLong()); - int metaOff = Math.toIntExact(buf.getLong()); - - ByteBuffer extraBuf; - - if (metaOff > 0) { - extraBuf = UninitializedByteBuffer.allocateUninitialized(len - metaOff); - rf.seek(metaOff); - fillBuffer(extraBuf, rf.getChannel()); - } else if (metaOff < 0) { - if (len > META_BLOCK_SIZE) { - extraBuf = UninitializedByteBuffer.allocateUninitialized(len - META_BLOCK_SIZE); - fillBuffer(extraBuf, rf.getChannel()); - } else { - extraBuf = null; - } - } else { - extraBuf = null; - } - - ByteString bs = UnsafeByteOperations.unsafeWrap(buf.position(16).slice()); - if (extraBuf != null) { - extraBuf.flip(); - bs = bs.concat(UnsafeByteOperations.unsafeWrap(extraBuf)); - } - - bs = bs.substring(0, metaSize); - - return ObjectMetadataP.parseFrom(bs); - } catch (FileNotFoundException | NoSuchFileException fx) { - throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND); - } catch (IOException e) { - Log.error("Error reading file " + path, e); - throw new StatusRuntimeExceptionNoStacktrace(Status.INTERNAL); - } - } - - private void fillBuffer(ByteBuffer dst, FileChannel src) throws IOException { - int rem = dst.remaining(); - int readTotal = 0; - int readCur = 0; - while (readTotal < rem && (readCur = src.read(dst)) != -1) { - readTotal += readCur; - } - if (rem != readTotal) - throw new EOFException(); - } - - private void writeObjectImpl(Path path, ObjectMetadataP meta, JObjectDataP data, boolean sync) throws IOException { - try (var fsb = new FileOutputStream(path.toFile(), false)) { - int metaSize = meta.getSerializedSize() + 16; - int dataSize = data == null ? 0 : data.getSerializedSize(); - - // Avoids CodedOutputStream flushing all the time - var metaBb = UninitializedByteBuffer.allocateUninitialized(Math.max(META_BLOCK_SIZE, meta.getSerializedSize() + 16)); - metaBb.putLong(metaSize - 16); - if (data == null) - metaBb.putLong(-1); - else if (metaSize <= META_BLOCK_SIZE) - metaBb.putLong(0); - else - metaBb.putLong(META_BLOCK_SIZE + dataSize); - { - var metaBbOut = CodedOutputStream.newInstance(metaBb); - meta.writeTo(metaBbOut); - metaBbOut.flush(); - metaBb.flip(); - } - - if (fsb.getChannel().write(metaBb.limit(META_BLOCK_SIZE)) != META_BLOCK_SIZE) - throw new IOException("Could not write to file"); - - if (data != null) { - var dataBb = UninitializedByteBuffer.allocateUninitialized(dataSize); - var dataBbOut = CodedOutputStream.newInstance(dataBb); - data.writeTo(dataBbOut); - dataBbOut.flush(); - dataBb.flip(); - if (fsb.getChannel().write(dataBb) != dataSize) - throw new IOException("Could not write to file"); - } - - if (metaSize > META_BLOCK_SIZE) { - if (fsb.getChannel().write(metaBb.limit(metaSize).position(META_BLOCK_SIZE)) != metaSize - META_BLOCK_SIZE) - throw new IOException("Could not write to file"); - } - - if (sync) { - fsb.flush(); - fsb.getFD().sync(); - } - } - } - - private void writeObjectMetaImpl(Path path, ObjectMetadataP meta, boolean sync) throws IOException { - try (var rf = new RandomAccessFile(path.toFile(), "rw"); - var ch = rf.getChannel()) { - int len = Math.toIntExact(rf.length()); - - int metaSize = meta.getSerializedSize() + 16; - int dataSize; - int metaOff; - - if (len != 0) { - var buf = UninitializedByteBuffer.allocateUninitialized(META_BLOCK_SIZE); - fillBuffer(buf, rf.getChannel()); - - buf.flip(); - buf.position(8); - metaOff = Math.toIntExact(buf.getLong()); - } else { - metaOff = -1; - } - - if (metaOff > 0) { - dataSize = metaOff - META_BLOCK_SIZE; - } else if (metaOff < 0) { - dataSize = 0; - } else { - dataSize = len - META_BLOCK_SIZE; - } - - ch.truncate(Math.max(metaSize, META_BLOCK_SIZE) + dataSize); - ch.position(0); - - // Avoids CodedOutputStream flushing all the time - var metaBb = UninitializedByteBuffer.allocateUninitialized(Math.max(META_BLOCK_SIZE, meta.getSerializedSize() + 16)); - metaBb.putLong(metaSize - 16); - if (dataSize == 0) - metaBb.putLong(-1); - else if (metaSize <= META_BLOCK_SIZE) - metaBb.putLong(0); - else - metaBb.putLong(META_BLOCK_SIZE + dataSize); - { - var metaBbOut = CodedOutputStream.newInstance(metaBb); - meta.writeTo(metaBbOut); - metaBbOut.flush(); - metaBb.flip(); - } - - if (ch.write(metaBb.limit(META_BLOCK_SIZE)) != META_BLOCK_SIZE) - throw new IOException("Could not write to file"); - - if (metaSize > META_BLOCK_SIZE) { - ch.position(META_BLOCK_SIZE + dataSize); - if (ch.write(metaBb.limit(metaSize).position(META_BLOCK_SIZE)) != metaSize - META_BLOCK_SIZE) - throw new IOException("Could not write to file"); - } - - if (sync) - rf.getFD().sync(); - } - } - - @Override - public void writeObjectDirect(String name, ObjectMetadataP meta, JObjectDataP data) { - verifyReady(); - try { - var path = getObjPath(name); - writeObjectImpl(path, meta, data, false); - } catch (IOException e) { - Log.error("Error writing file " + name, e); - throw new StatusRuntimeExceptionNoStacktrace(Status.INTERNAL); - } - } - - @Override - public void writeObjectMetaDirect(String name, ObjectMetadataP meta) { - verifyReady(); - try { - var path = getObjPath(name); - writeObjectMetaImpl(path, meta, false); - } catch (IOException e) { - Log.error("Error writing file " + name, e); - throw new StatusRuntimeExceptionNoStacktrace(Status.INTERNAL); - } - } - - @Override - public void writeNewObject(String name, ObjectMetadataP meta, JObjectDataP data) { - verifyReady(); - try { - var tmpPath = getTmpObjPath(name); - writeObjectImpl(tmpPath, meta, data, true); - } catch (IOException e) { - Log.error("Error writing new file " + name, e); - } - } - - @Override - public void writeNewObjectMeta(String name, ObjectMetadataP meta) { - verifyReady(); - // TODO COW - try { - var path = getObjPath(name); - var tmpPath = getTmpObjPath(name); - if (path.toFile().exists()) - Files.copy(path, getTmpObjPath(name)); - writeObjectMetaImpl(tmpPath, meta, true); - } catch (IOException e) { - Log.error("Error writing new file meta " + name, e); - } - } - - private TxManifest readTxManifest() { - try { - var channel = _txFile.getChannel(); - - if (channel.size() == 0) - return null; - - channel.position(0); - - var buf = ByteBuffer.allocate(Math.toIntExact(channel.size())); - - fillBuffer(buf, channel); - buf.flip(); - - long checksum = buf.getLong(); - var data = buf.slice(); - var hash = LongHashFunction.xx3().hashBytes(data); - - if (hash != checksum) - throw new StatusRuntimeExceptionNoStacktrace(Status.DATA_LOSS.withDescription("Transaction manifest checksum mismatch!")); - - return SerializationHelper.deserialize(data.array(), data.arrayOffset()); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - private void putTxManifest(TxManifest manifest) { - try { - var channel = _txFile.getChannel(); - var data = SerializationHelper.serializeArray(manifest); - channel.truncate(data.length + 8); - channel.position(0); - var hash = LongHashFunction.xx3().hashBytes(data); - if (channel.write(ByteUtils.longToBb(hash)) != 8) - throw new StatusRuntimeExceptionNoStacktrace(Status.INTERNAL); - if (channel.write(ByteBuffer.wrap(data)) != data.length) - throw new StatusRuntimeExceptionNoStacktrace(Status.INTERNAL); - channel.force(true); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - @Override - public void commitTx(TxManifest manifest) { - verifyReady(); - commitTxImpl(manifest, true); - } - - public void commitTxImpl(TxManifest manifest, boolean failIfNotFound) { - try { - if (manifest.getDeleted().isEmpty() && manifest.getWritten().isEmpty()) { - Log.debug("Empty manifest, skipping"); - return; - } - - putTxManifest(manifest); - - var latch = new CountDownLatch(manifest.getWritten().size() + manifest.getDeleted().size()); - ConcurrentLinkedQueue errors = new ConcurrentLinkedQueue<>(); - - for (var n : manifest.getWritten()) { - _flushExecutor.execute(() -> { - try { - Files.move(getTmpObjPath(n), getObjPath(n), ATOMIC_MOVE, REPLACE_EXISTING); - } catch (Throwable t) { - if (!failIfNotFound && (t instanceof NoSuchFileException)) return; - Log.error("Error writing " + n, t); - errors.add(t); - } finally { - latch.countDown(); - } - }); - } - for (var d : manifest.getDeleted()) { - _flushExecutor.execute(() -> { - try { - deleteImpl(getObjPath(d)); - } catch (Throwable t) { - Log.error("Error deleting " + d, t); - errors.add(t); - } finally { - latch.countDown(); - } - }); - } - - latch.await(); - - if (!errors.isEmpty()) { - throw new RuntimeException("Errors when commiting tx!"); - } - - // No real need to truncate here -// try (var channel = _txFile.getChannel()) { -// channel.truncate(0); -// } -// } catch (IOException e) { -// Log.error("Failed committing transaction to disk: ", e); -// throw new RuntimeException(e); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - - private void deleteImpl(Path path) { - try { - Files.delete(path); - } catch (NoSuchFileException ignored) { - } catch (IOException e) { - Log.error("Error deleting file " + path, e); - throw new StatusRuntimeExceptionNoStacktrace(Status.INTERNAL); - } - } - - @Override - public void deleteObjectDirect(String name) { - verifyReady(); - deleteImpl(getObjPath(name)); - } - - @Override - public long getTotalSpace() { - verifyReady(); - return _root.toFile().getTotalSpace(); - } - - @Override - public long getFreeSpace() { - verifyReady(); - return _root.toFile().getFreeSpace(); - } - - @Override - public long getUsableSpace() { - verifyReady(); - return _root.toFile().getUsableSpace(); - } - -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/ObjectPersistentStore.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/ObjectPersistentStore.java deleted file mode 100644 index 1f5f2af1..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/ObjectPersistentStore.java +++ /dev/null @@ -1,37 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.persistence; - -import com.usatiuk.dhfs.objects.persistence.JObjectDataP; -import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP; - -import javax.annotation.Nonnull; -import java.util.Collection; - -public interface ObjectPersistentStore { - @Nonnull - Collection findAllObjects(); - - @Nonnull - JObjectDataP readObject(String name); - - @Nonnull - ObjectMetadataP readObjectMeta(String name); - - void writeObjectDirect(String name, ObjectMetadataP meta, JObjectDataP data); - - void writeObjectMetaDirect(String name, ObjectMetadataP meta); - - void writeNewObject(String name, ObjectMetadataP meta, JObjectDataP data); - - void writeNewObjectMeta(String name, ObjectMetadataP meta); - - void commitTx(TxManifest names); - - // Deletes object metadata and data - void deleteObjectDirect(String name); - - long getTotalSpace(); - - long getFreeSpace(); - - long getUsableSpace(); -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/TxManifest.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/TxManifest.java deleted file mode 100644 index 2cbc7b5f..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/persistence/TxManifest.java +++ /dev/null @@ -1,11 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.persistence; - -import java.io.Serializable; -import java.util.List; - -// FIXME: Serializable -public interface TxManifest extends Serializable { - List getWritten(); - - List getDeleted(); -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/AvailablePeerInfo.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/AvailablePeerInfo.java deleted file mode 100644 index 3232b9f0..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/AvailablePeerInfo.java +++ /dev/null @@ -1,4 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.webapi; - -public record AvailablePeerInfo(String uuid, String addr, int port) { -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerDelete.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerDelete.java deleted file mode 100644 index 2d646474..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerDelete.java +++ /dev/null @@ -1,4 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.webapi; - -public record KnownPeerDelete(String uuid) { -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerInfo.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerInfo.java deleted file mode 100644 index 5fbd9eb7..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerInfo.java +++ /dev/null @@ -1,4 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.webapi; - -public record KnownPeerInfo(String uuid) { -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerPut.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerPut.java deleted file mode 100644 index f1e109f8..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/KnownPeerPut.java +++ /dev/null @@ -1,4 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.webapi; - -public record KnownPeerPut(String uuid) { -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java deleted file mode 100644 index 4d8a3102..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/repository/webapi/ManagementApi.java +++ /dev/null @@ -1,46 +0,0 @@ -package com.usatiuk.dhfs.objects.repository.webapi; - -import com.usatiuk.dhfs.objects.repository.PeerManager; -import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService; -import jakarta.inject.Inject; -import jakarta.ws.rs.DELETE; -import jakarta.ws.rs.GET; -import jakarta.ws.rs.PUT; -import jakarta.ws.rs.Path; - -import java.util.Collection; -import java.util.List; -import java.util.UUID; - -@Path("/objects-manage") -public class ManagementApi { - @Inject - PeerManager remoteHostManager; - - @Inject - PersistentPeerDataService persistentPeerDataService; - - @Path("known-peers") - @GET - public List knownPeers() { - return persistentPeerDataService.getHostsNoNulls().stream().map(h -> new KnownPeerInfo(h.getUuid().toString())).toList(); - } - - @Path("known-peers") - @PUT - public void addPeer(KnownPeerPut knownPeerPut) { - remoteHostManager.addRemoteHost(UUID.fromString(knownPeerPut.uuid())); - } - - @Path("known-peers") - @DELETE - public void DeletePeer(KnownPeerDelete knownPeerDelete) { - remoteHostManager.removeRemoteHost(UUID.fromString(knownPeerDelete.uuid())); - } - - @Path("available-peers") - @GET - public Collection availablePeers() { - return remoteHostManager.getSeenButNotAddedHosts(); - } -} diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/webui/WebUiRouter.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/webui/WebUiRouter.java deleted file mode 100644 index 2f285c42..00000000 --- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/webui/WebUiRouter.java +++ /dev/null @@ -1,54 +0,0 @@ -package com.usatiuk.dhfs.webui; - -import io.quarkus.runtime.StartupEvent; -import io.vertx.core.http.HttpServerRequest; -import io.vertx.ext.web.Router; -import io.vertx.ext.web.RoutingContext; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.Optional; - -@ApplicationScoped -public class WebUiRouter { - - @ConfigProperty(name = "dhfs.webui.root") - Optional root; - - void installRoute(@Observes StartupEvent startupEvent, Router router) { - root.ifPresent(r -> { - router.route().path("/").handler(ctx -> ctx.redirect("/webui")); - router.route() - .path("/webui/*") - .handler(this::handle); - }); - } - - public void handle(RoutingContext event) { - var indexHtml = Paths.get(root.orElseThrow(() -> new IllegalStateException("Web ui root not set but handler called")), "index.html").toString(); - - HttpServerRequest request = event.request(); - String requestedPath = Path.of(event.currentRoute().getPath()).relativize(Path.of(event.normalizedPath())).toString(); - - if ("/".equals(requestedPath)) { - request.response().sendFile(indexHtml); - return; - } - - Path requested = Paths.get(root.get(), requestedPath); - if (!requested.normalize().startsWith(Paths.get(root.get()))) { - request.response().setStatusCode(404).end(); - return; - } - - event.vertx().fileSystem().lprops(requested.toString(), exists -> { - if (exists.succeeded() && exists.result().isRegularFile()) - request.response().sendFile(requested.toString()); - else - request.response().sendFile(indexHtml); - }); - } -} diff --git a/dhfs-parent/server-old/src/main/proto/dhfs_objects_peer_discovery.proto b/dhfs-parent/server-old/src/main/proto/dhfs_objects_peer_discovery.proto deleted file mode 100644 index a1bc1866..00000000 --- a/dhfs-parent/server-old/src/main/proto/dhfs_objects_peer_discovery.proto +++ /dev/null @@ -1,13 +0,0 @@ -syntax = "proto3"; - -option java_multiple_files = true; -option java_package = "com.usatiuk.dhfs.objects.repository.peerdiscovery"; -option java_outer_classname = "DhfsObjectPeerDiscoveryApi"; - -package dhfs.objects.peerdiscovery; - -message PeerDiscoveryInfo { - string uuid = 1; - uint32 port = 2; - uint32 securePort = 3; -} diff --git a/dhfs-parent/server-old/src/main/proto/dhfs_objects_serial.proto b/dhfs-parent/server-old/src/main/proto/dhfs_objects_serial.proto deleted file mode 100644 index 0f93fdd5..00000000 --- a/dhfs-parent/server-old/src/main/proto/dhfs_objects_serial.proto +++ /dev/null @@ -1,155 +0,0 @@ -syntax = "proto3"; - -option java_multiple_files = true; -option java_package = "com.usatiuk.dhfs.objects.persistence"; -option java_outer_classname = "DhfsObjectPersistence"; - -package dhfs.objects.persistence; - -message ObjectMetadataP { - string name = 1; - map remoteCopies = 2; - string knownClass = 3; - bool seen = 4; - bool deleted = 5; - repeated string confirmedDeletes = 6; - repeated string referrers = 7; - map changelog = 8; - repeated string savedRefs = 9; - bool frozen = 10; - bool haveLocalCopy = 11; -} - -message FsNodeP { - string uuid = 1; - int64 mode = 2; - int64 ctime = 3; - int64 mtime = 4; -} - -message FilePChunksEntry { - int64 start = 1; - string id = 2; -} - -message FileP { - FsNodeP fsNode = 1; - repeated FilePChunksEntry chunks = 2; - bool symlink = 3; - int64 size = 4; -} - -message DirectoryP { - FsNodeP fsNode = 1; - map children = 2; -} - -message ChunkDataP { - string name = 1; - bytes data = 2; -} - -message PeerDirectoryP { - repeated string peers = 1; -} - -message PersistentPeerInfoP { - string uuid = 1; - bytes cert = 2; -} - -message JKleppmannTreeNodeMetaFileP { - string name = 1; - string fileIno = 2; -} - -message JKleppmannTreeNodeMetaDirectoryP { - string name = 1; -} - -message JKleppmannTreeNodeMetaP { - oneof meta { - JKleppmannTreeNodeMetaFileP jKleppmannTreeNodeMetaFile = 1; - JKleppmannTreeNodeMetaDirectoryP jKleppmannTreeNodeMetaDirectory = 2; - } -} - -message JKleppmannTreeOpP { - int64 timestamp = 1; - string peer = 2; - string newParentId = 3; - JKleppmannTreeNodeMetaP meta = 4; - string child = 5; -} - -message JKleppmannTreeNodePChildrenEntry { - string key = 1; - string value = 2; -} - -message JKleppmannTreeNodeP { - optional string parent = 1; - string id = 2; - repeated JKleppmannTreeNodePChildrenEntry children = 3; - optional JKleppmannTreeNodeMetaP meta = 4; - optional JKleppmannTreeOpP lastEffectiveOp = 5; -} - -message JKleppmannTreePersistentDataPQueueEntry { - int64 clock = 1; - string uuid = 2; - JKleppmannTreeOpP op = 3; -} - -message JKleppmannTreePersistentDataPQueue { - string node = 1; - repeated JKleppmannTreePersistentDataPQueueEntry entries = 2; -} - -message JKleppmannTreePersistentDataPTimestampEntry { - string host = 1; - int64 timestamp = 2; -} - -message JKleppmannTreeOpLogEffectP { - optional JKleppmannTreeOpP oldEffectiveMove = 1; - optional string oldParent = 2; - optional JKleppmannTreeNodeMetaP oldMeta = 3; - JKleppmannTreeOpP effectiveOp = 4; - string newParentId = 5; - JKleppmannTreeNodeMetaP newMeta = 6; - string selfId = 7; -} - -message JKleppmannTreeOpLogPEntry { - int64 clock = 1; - string uuid = 2; - JKleppmannTreeOpP op = 3; - repeated JKleppmannTreeOpLogEffectP effects = 4; -} - -message JKleppmannTreePersistentDataP { - string treeName = 1; - int64 clock = 2; - repeated JKleppmannTreePersistentDataPQueue queues = 3; - repeated JKleppmannTreePersistentDataPTimestampEntry peerLog = 4; - repeated JKleppmannTreeOpLogPEntry opLog = 5; -} - -message PeerDirectoryLocalP { - repeated string initialOpSyncDonePeers = 1; - repeated string initialObjSyncDonePeers = 2; -} - -message JObjectDataP { - oneof obj { - FileP file = 2; - DirectoryP directory = 3; - ChunkDataP chunkData = 5; - PeerDirectoryP peerDirectory = 6; - PersistentPeerInfoP persistentPeerInfo = 7; - JKleppmannTreeNodeP jKleppmannTreeNode = 8; - JKleppmannTreePersistentDataP jKleppmannTreePersistentData = 9; - PeerDirectoryLocalP peerDirectoryLocal = 10; - } -} \ No newline at end of file diff --git a/dhfs-parent/server-old/src/main/proto/dhfs_objects_sync.proto b/dhfs-parent/server-old/src/main/proto/dhfs_objects_sync.proto deleted file mode 100644 index 8ef94946..00000000 --- a/dhfs-parent/server-old/src/main/proto/dhfs_objects_sync.proto +++ /dev/null @@ -1,102 +0,0 @@ -syntax = "proto3"; - -import "dhfs_objects_serial.proto"; - -option java_multiple_files = true; -option java_package = "com.usatiuk.dhfs.objects.repository"; -option java_outer_classname = "DhfsObjectSyncApi"; - -package dhfs.objects.sync; - -service DhfsObjectSyncGrpc { - rpc GetObject (GetObjectRequest) returns (GetObjectReply) {} - rpc CanDelete (CanDeleteRequest) returns (CanDeleteReply) {} - rpc IndexUpdate (IndexUpdatePush) returns (IndexUpdateReply) {} - rpc OpPush (OpPushMsg) returns (OpPushReply) {} - - rpc Ping (PingRequest) returns (PingReply) {} -} - -message PingRequest { - string selfUuid = 1; -} - -message PingReply { - string selfUuid = 1; -} - -message ObjectChangelogEntry { - string host = 1; - uint64 version = 2; -} - -message ObjectChangelog { - repeated ObjectChangelogEntry entries = 1; -} - -message ObjectHeader { - string name = 2; - ObjectChangelog changelog = 5; - optional dhfs.objects.persistence.JObjectDataP pushedData = 6; -} - -message ApiObject { - ObjectHeader header = 1; - dhfs.objects.persistence.JObjectDataP content = 2; -} - -message GetObjectRequest { - string selfUuid = 10; - - string name = 2; -} - -message GetObjectReply { - string selfUuid = 10; - - ApiObject object = 1; -} - -message CanDeleteRequest { - string selfUuid = 10; - - string name = 2; - repeated string ourReferrers = 3; -} - -message CanDeleteReply { - string selfUuid = 10; - string objName = 1; - bool deletionCandidate = 2; - repeated string referrers = 3; -} - -message IndexUpdatePush { - string selfUuid = 10; - - ObjectHeader header = 1; -} - -message IndexUpdateReply {} - -message JKleppmannTreePeriodicPushOpP { - string fromUuid = 1; - int64 timestamp = 2; -} - -message OpPushPayload { - oneof payload { - dhfs.objects.persistence.JKleppmannTreeOpP jKleppmannTreeOpWrapper = 1; - JKleppmannTreePeriodicPushOpP jKleppmannTreePeriodicPushOp = 2; - } -} - -message OpPushMsg { - string selfUuid = 10; - string queueId = 1; - repeated OpPushPayload msg = 2; -} - -message OpPushReply { - -} \ No newline at end of file diff --git a/dhfs-parent/server-old/src/main/resources/application.properties b/dhfs-parent/server-old/src/main/resources/application.properties deleted file mode 100644 index 8309619c..00000000 --- a/dhfs-parent/server-old/src/main/resources/application.properties +++ /dev/null @@ -1,46 +0,0 @@ -quarkus.grpc.server.use-separate-server=false -dhfs.objects.persistence.files.root=${HOME}/dhfs_default/data/objs -dhfs.objects.root=${HOME}/dhfs_default/data/stuff -dhfs.objects.peerdiscovery.port=42069 -dhfs.objects.peerdiscovery.interval=5000 -dhfs.objects.sync.timeout=30 -dhfs.objects.sync.ping.timeout=5 -dhfs.objects.invalidation.threads=4 -dhfs.objects.invalidation.delay=1000 -dhfs.objects.reconnect_interval=5s -dhfs.objects.write_log=false -dhfs.objects.periodic-push-op-interval=5m -dhfs.fuse.root=${HOME}/dhfs_default/fuse -dhfs.fuse.debug=false -dhfs.fuse.enabled=true -dhfs.files.allow_recursive_delete=false -dhfs.files.target_chunk_size=2097152 -# Writes strictly smaller than this will try to merge with blocks nearby -dhfs.files.write_merge_threshold=0.8 -# If a merge would result in a block of greater size than this, stop merging -dhfs.files.write_merge_limit=1.2 -# Don't take blocks of this size and above when merging -dhfs.files.write_merge_max_chunk_to_take=1 -dhfs.files.write_last_chunk_limit=1.5 -dhfs.objects.writeback.delay=100 -dhfs.objects.writeback.limit=134217728 -dhfs.objects.lru.limit=134217728 -dhfs.objects.lru.print-stats=false -dhfs.objects.writeback.watermark-high=0.6 -dhfs.objects.writeback.watermark-low=0.4 -dhfs.objects.writeback.threads=4 -dhfs.objects.deletion.delay=1000 -dhfs.objects.deletion.can-delete-retry-delay=10000 -dhfs.objects.ref_verification=true -dhfs.files.use_hash_for_chunks=false -dhfs.objects.autosync.threads=2 -dhfs.objects.autosync.download-all=false -dhfs.objects.move-processor.threads=4 -dhfs.objects.ref-processor.threads=4 -dhfs.objects.opsender.batch-size=100 -dhfs.objects.lock_timeout_secs=15 -dhfs.local-discovery=true -quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE -quarkus.log.category."com.usatiuk.dhfs".level=TRACE -quarkus.http.insecure-requests=enabled -quarkus.http.ssl.client-auth=required diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/TempDataProfile.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/TempDataProfile.java deleted file mode 100644 index 03f74be5..00000000 --- a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/TempDataProfile.java +++ /dev/null @@ -1,29 +0,0 @@ -package com.usatiuk.dhfs; - -import io.quarkus.test.junit.QuarkusTestProfile; - -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.HashMap; -import java.util.Map; - -abstract public class TempDataProfile implements QuarkusTestProfile { - protected void getConfigOverrides(Map toPut) {} - - @Override - final public Map getConfigOverrides() { - Path tempDirWithPrefix; - try { - tempDirWithPrefix = Files.createTempDirectory("dhfs-test"); - } catch (IOException e) { - throw new RuntimeException(e); - } - var ret = new HashMap(); - ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString()); - ret.put("dhfs.objects.root", tempDirWithPrefix.resolve("dhfs_root_d_test").toString()); - ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString()); - getConfigOverrides(ret); - return ret; - } -} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/TestDataCleaner.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/TestDataCleaner.java deleted file mode 100644 index 2a6979a6..00000000 --- a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/TestDataCleaner.java +++ /dev/null @@ -1,44 +0,0 @@ -package com.usatiuk.dhfs; - -import io.quarkus.logging.Log; -import io.quarkus.runtime.ShutdownEvent; -import io.quarkus.runtime.StartupEvent; -import jakarta.annotation.Priority; -import jakarta.enterprise.context.ApplicationScoped; -import jakarta.enterprise.event.Observes; -import org.eclipse.microprofile.config.inject.ConfigProperty; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; -import java.util.Objects; - -@ApplicationScoped -public class TestDataCleaner { - @ConfigProperty(name = "dhfs.objects.persistence.files.root") - String tempDirectory; - @ConfigProperty(name = "dhfs.objects.root") - String tempDirectoryIdx; - - void init(@Observes @Priority(1) StartupEvent event) throws IOException { - try { - purgeDirectory(Path.of(tempDirectory).toFile()); - purgeDirectory(Path.of(tempDirectoryIdx).toFile()); - } catch (Exception ignored) { - Log.warn("Couldn't cleanup test data on init"); - } - } - - void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException { - purgeDirectory(Path.of(tempDirectory).toFile()); - purgeDirectory(Path.of(tempDirectoryIdx).toFile()); - } - - void purgeDirectory(File dir) { - for (File file : Objects.requireNonNull(dir.listFiles())) { - if (file.isDirectory()) - purgeDirectory(file); - file.delete(); - } - } -} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/benchmarks/Benchmarker.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/benchmarks/Benchmarker.java deleted file mode 100644 index 86ad0fb3..00000000 --- a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/benchmarks/Benchmarker.java +++ /dev/null @@ -1,83 +0,0 @@ -package com.usatiuk.dhfs.benchmarks; - -import io.quarkus.logging.Log; -import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics; - -import java.util.Arrays; -import java.util.function.Supplier; - -public class Benchmarker { - static long[] runLatency(Supplier fn, int iterations) { - var out = new long[iterations]; - - int hash = 1; - - for (int i = 0; i < iterations; i++) { - long startNanos = System.nanoTime(); - var cur = fn.get(); - long stopNanos = System.nanoTime(); - out[i] = stopNanos - startNanos; - hash = hash * 31 + cur.hashCode(); - } - - System.out.println("\nHash: " + hash); - - return out; - } - - static long[] runThroughput(Supplier fn, int iterations, long iterationTime) { - var out = new long[iterations]; - - int hash = 1; - - for (int i = 0; i < iterations; i++) { - long startMillis = System.currentTimeMillis(); - long count = 0; - // FIXME: That's probably janky - while (System.currentTimeMillis() - startMillis < iterationTime) { - var res = fn.get(); - count++; - hash = hash * 31 + res.hashCode(); - } - System.out.println("Ran iteration " + i + "/" + iterations + " count=" + count); - out[i] = count; - } - - System.out.println("\nHash: " + hash); - - return out; - } - - static void printStats(double[] data, String unit) { - DescriptiveStatistics stats = new DescriptiveStatistics(); - for (var r : data) { - stats.addValue(r); - } - Log.info("\n" + stats + - "\n 50%: " + stats.getPercentile(50) + " " + unit + - "\n 90%: " + stats.getPercentile(90) + " " + unit + - "\n 95%: " + stats.getPercentile(95) + " " + unit + - "\n 99%: " + stats.getPercentile(99) + " " + unit + - "\n 99.9%: " + stats.getPercentile(99.9) + " " + unit + - "\n 99.99%: " + stats.getPercentile(99.99) + " " + unit - ); - - } - - static void runAndPrintMixSimple(String name, Supplier fn, int latencyIterations, int thrptIterations, int thrptIterationTime, int warmupIterations, int warmupIterationTime) { - System.out.println("\n=========\n" + "Running " + name + "\n=========\n"); - System.out.println("==Warmup=="); - runThroughput(fn, warmupIterations, warmupIterationTime); - System.out.println("==Warmup done=="); - System.out.println("==Throughput=="); - var thrpt = runThroughput(fn, thrptIterations, thrptIterationTime); - printStats(Arrays.stream(thrpt).mapToDouble(o -> (double) o / 1000).toArray(), "ops/s"); - System.out.println("==Throughput done=="); - System.out.println("==Latency=="); - var lat = runLatency(fn, latencyIterations); - printStats(Arrays.stream(lat).mapToDouble(o -> (double) o).toArray(), "ns/op"); - System.out.println("==Latency done=="); - System.out.println("\n=========\n" + name + " done" + "\n=========\n"); - } - -} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java deleted file mode 100644 index 96acf3f5..00000000 --- a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/benchmarks/DhfsFileBenchmarkTest.java +++ /dev/null @@ -1,52 +0,0 @@ -package com.usatiuk.dhfs.benchmarks; - -import com.google.protobuf.UnsafeByteOperations; -import com.usatiuk.dhfs.TempDataProfile; -import com.usatiuk.dhfs.files.service.DhfsFileService; -import io.quarkus.test.junit.QuarkusTest; -import io.quarkus.test.junit.TestProfile; -import jakarta.inject.Inject; -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; - -import java.nio.ByteBuffer; -import java.util.Map; - -class Profiles { - public static class DhfsFuseTestProfile extends TempDataProfile { - @Override - protected void getConfigOverrides(Map ret) { - ret.put("quarkus.log.category.\"com.usatiuk.dhfs\".level", "INFO"); - ret.put("dhfs.fuse.enabled", "false"); - ret.put("dhfs.objects.ref_verification", "false"); - } - } -} - -@QuarkusTest -@TestProfile(Profiles.DhfsFuseTestProfile.class) -public class DhfsFileBenchmarkTest { - @Inject - DhfsFileService dhfsFileService; - - @Test - @Disabled - void openRootTest() { - Benchmarker.runAndPrintMixSimple("dhfsFileService.open(\"\")", - () -> { - return dhfsFileService.open(""); - }, 1_000_000, 5, 1000, 5, 1000); - } - - @Test - @Disabled - void writeMbTest() { - String file = dhfsFileService.create("/writeMbTest", 0777).get(); - var bb = ByteBuffer.allocateDirect(1024 * 1024); - Benchmarker.runAndPrintMixSimple("dhfsFileService.write(\"\")", - () -> { - var thing = UnsafeByteOperations.unsafeWrap(bb); - return dhfsFileService.write(file, dhfsFileService.size(file), thing); - }, 1_000, 10, 100, 1, 100); - } -} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTest.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTest.java deleted file mode 100644 index 93cc42b8..00000000 --- a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTest.java +++ /dev/null @@ -1,9 +0,0 @@ -package com.usatiuk.dhfs.files; - -import io.quarkus.test.junit.QuarkusTest; -import io.quarkus.test.junit.TestProfile; - -@QuarkusTest -@TestProfile(Profiles.DhfsFileServiceSimpleTestProfile.class) -public class DhfsFileServiceSimpleTest extends DhfsFileServiceSimpleTestImpl { -} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java deleted file mode 100644 index 8bea5c7e..00000000 --- a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestImpl.java +++ /dev/null @@ -1,288 +0,0 @@ -package com.usatiuk.dhfs.files; - -import com.google.protobuf.ByteString; -import com.usatiuk.dhfs.TempDataProfile; -import com.usatiuk.dhfs.files.objects.ChunkData; -import com.usatiuk.dhfs.files.objects.File; -import com.usatiuk.dhfs.files.service.DhfsFileService; -import com.usatiuk.dhfs.objects.jrepository.DeletedObjectAccessException; -import com.usatiuk.dhfs.objects.jrepository.JObjectManager; -import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager; -import com.usatiuk.kleppmanntree.AlreadyExistsException; -import jakarta.inject.Inject; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -import java.util.Map; -import java.util.Optional; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; - -import static org.awaitility.Awaitility.await; - -class Profiles { - public static class DhfsFileServiceSimpleTestProfile extends TempDataProfile { - @Override - protected void getConfigOverrides(Map ret) { - ret.put("dhfs.fuse.enabled", "false"); - } - } - - public static class DhfsFileServiceSimpleTestProfileNoChunking extends TempDataProfile { - @Override - protected void getConfigOverrides(Map ret) { - ret.put("dhfs.fuse.enabled", "false"); - ret.put("dhfs.files.target_chunk_size", "-1"); - } - } - - public static class DhfsFileServiceSimpleTestProfileSmallChunking extends TempDataProfile { - @Override - protected void getConfigOverrides(Map ret) { - ret.put("dhfs.fuse.enabled", "false"); - ret.put("dhfs.files.target_chunk_size", "3"); - } - } -} - -public class DhfsFileServiceSimpleTestImpl { - @Inject - DhfsFileService fileService; - @Inject - JObjectManager jObjectManager; - @Inject - JObjectTxManager jObjectTxManager; - - @Test - void readTest() { - var fuuid = UUID.randomUUID(); - { - ChunkData c1 = new ChunkData(ByteString.copyFrom("12345".getBytes())); - ChunkData c2 = new ChunkData(ByteString.copyFrom("678".getBytes())); - ChunkData c3 = new ChunkData(ByteString.copyFrom("91011".getBytes())); - File f = new File(fuuid, 777, false); - f.getChunks().put(0L, c1.getName()); - f.getChunks().put((long) c1.getBytes().size(), c2.getName()); - f.getChunks().put((long) c1.getBytes().size() + c2.getBytes().size(), c3.getName()); - - // FIXME: dhfs_files - - var c1o = new AtomicReference(); - var c2o = new AtomicReference(); - var c3o = new AtomicReference(); - var fo = new AtomicReference(); - - jObjectTxManager.executeTx(() -> { - c1o.set(jObjectManager.put(c1, Optional.of(f.getName())).getMeta().getName()); - c2o.set(jObjectManager.put(c2, Optional.of(f.getName())).getMeta().getName()); - c3o.set(jObjectManager.put(c3, Optional.of(f.getName())).getMeta().getName()); - fo.set(jObjectManager.put(f, Optional.empty()).getMeta().getName()); - }); - - var all = jObjectManager.findAll(); - Assertions.assertTrue(all.contains(c1o.get())); - Assertions.assertTrue(all.contains(c2o.get())); - Assertions.assertTrue(all.contains(c3o.get())); - Assertions.assertTrue(all.contains(fo.get())); - } - - String all = "1234567891011"; - - { - for (int start = 0; start < all.length(); start++) { - for (int end = start; end <= all.length(); end++) { - var read = fileService.read(fuuid.toString(), start, end - start); - Assertions.assertArrayEquals(all.substring(start, end).getBytes(), read.get().toByteArray()); - } - } - } - } - - @Test - void dontMkdirTwiceTest() { - Assertions.assertDoesNotThrow(() -> fileService.mkdir("/dontMkdirTwiceTest", 777)); - Assertions.assertThrows(AlreadyExistsException.class, () -> fileService.mkdir("/dontMkdirTwiceTest", 777)); - } - - @Test - void writeTest() { - var ret = fileService.create("/writeTest", 777); - Assertions.assertTrue(ret.isPresent()); - - var uuid = ret.get(); - - fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); - fileService.write(uuid, 4, new byte[]{10, 11, 12}); - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); - fileService.write(uuid, 10, new byte[]{13, 14}); - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray()); - fileService.write(uuid, 6, new byte[]{15, 16}); - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray()); - fileService.write(uuid, 3, new byte[]{17, 18}); - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 17, 18, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray()); - } - - @Test - void removeTest() { - var ret = fileService.create("/removeTest", 777); - Assertions.assertTrue(ret.isPresent()); - - var uuid = ret.get(); - - fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); - - fileService.unlink("/removeTest"); - Assertions.assertFalse(fileService.open("/removeTest").isPresent()); - } - - @Test - void truncateTest1() { - var ret = fileService.create("/truncateTest1", 777); - Assertions.assertTrue(ret.isPresent()); - - var uuid = ret.get(); - - fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); - - fileService.truncate(uuid, 20); - fileService.write(uuid, 5, new byte[]{10, 11, 12, 13, 14, 15, 16, 17}); - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray()); - } - - @Test - void truncateTest2() { - var ret = fileService.create("/truncateTest2", 777); - Assertions.assertTrue(ret.isPresent()); - - var uuid = ret.get(); - - fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); - - fileService.truncate(uuid, 20); - fileService.write(uuid, 10, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 20}); - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, fileService.read(uuid, 0, 20).get().toByteArray()); - } - - @Test - void truncateTest3() { - var ret = fileService.create("/truncateTest3", 777); - Assertions.assertTrue(ret.isPresent()); - - var uuid = ret.get(); - - fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); - - fileService.truncate(uuid, 7); - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6,}, fileService.read(uuid, 0, 20).get().toByteArray()); - } - - @Test - void moveTest() { - var ret = fileService.create("/moveTest", 777); - Assertions.assertTrue(ret.isPresent()); - var uuid = ret.get(); - - fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); - - Assertions.assertTrue(fileService.rename("/moveTest", "/movedTest")); - Assertions.assertFalse(fileService.open("/moveTest").isPresent()); - Assertions.assertTrue(fileService.open("/movedTest").isPresent()); - - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, - fileService.read(fileService.open("/movedTest").get(), 0, 10).get().toByteArray()); - } - - @Test - void moveOverTest() throws InterruptedException { - var ret = fileService.create("/moveOverTest1", 777); - Assertions.assertTrue(ret.isPresent()); - var uuid = ret.get(); - var ret2 = fileService.create("/moveOverTest2", 777); - Assertions.assertTrue(ret2.isPresent()); - var uuid2 = ret2.get(); - - fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); - fileService.write(uuid2, 0, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29}); - Assertions.assertArrayEquals(new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29}, fileService.read(uuid2, 0, 10).get().toByteArray()); - - var oldfile = jObjectManager.get(ret2.get()).orElseThrow(IllegalStateException::new); - var chunk = oldfile.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.extractRefs()).stream().toList().get(0); - var chunkObj = jObjectManager.get(chunk).orElseThrow(IllegalStateException::new); - - Assertions.assertTrue(fileService.rename("/moveOverTest1", "/moveOverTest2")); - Assertions.assertFalse(fileService.open("/moveOverTest1").isPresent()); - Assertions.assertTrue(fileService.open("/moveOverTest2").isPresent()); - - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, - fileService.read(fileService.open("/moveOverTest2").get(), 0, 10).get().toByteArray()); - - await().atMost(5, TimeUnit.SECONDS).until(() -> { - try { - return chunkObj.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, - (m, d) -> !m.getReferrers().contains(uuid)); - } catch (DeletedObjectAccessException ignored) { - return true; - } - }); - } - - @Test - void readOverSizeTest() { - var ret = fileService.create("/readOverSizeTest", 777); - Assertions.assertTrue(ret.isPresent()); - var uuid = ret.get(); - - fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); - Assertions.assertArrayEquals(new byte[]{}, fileService.read(uuid, 20, 10).get().toByteArray()); - } - - @Test - void writeOverSizeTest() { - var ret = fileService.create("/writeOverSizeTest", 777); - Assertions.assertTrue(ret.isPresent()); - var uuid = ret.get(); - - fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); - fileService.write(uuid, 20, new byte[]{10, 11, 12, 13, 14, 15, 16, 17, 18, 19}); - Assertions.assertArrayEquals(new byte[]{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 - }, fileService.read(uuid, 0, 30).get().toByteArray()); - } - - @Test - void moveTest2() throws InterruptedException { - var ret = fileService.create("/moveTest2", 777); - Assertions.assertTrue(ret.isPresent()); - var uuid = ret.get(); - - fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray()); - - var oldfile = jObjectManager.get(uuid).orElseThrow(IllegalStateException::new); - var chunk = oldfile.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.extractRefs()).stream().toList().get(0); - var chunkObj = jObjectManager.get(chunk).orElseThrow(IllegalStateException::new); - - chunkObj.runReadLockedVoid(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> { - Assertions.assertTrue(m.getReferrers().contains(uuid)); - }); - - Assertions.assertTrue(fileService.rename("/moveTest2", "/movedTest2")); - Assertions.assertFalse(fileService.open("/moveTest2").isPresent()); - Assertions.assertTrue(fileService.open("/movedTest2").isPresent()); - - Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, - fileService.read(fileService.open("/movedTest2").get(), 0, 10).get().toByteArray()); - } -} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestNoChunkingTest.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestNoChunkingTest.java deleted file mode 100644 index 5aab68e4..00000000 --- a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestNoChunkingTest.java +++ /dev/null @@ -1,9 +0,0 @@ -package com.usatiuk.dhfs.files; - -import io.quarkus.test.junit.QuarkusTest; -import io.quarkus.test.junit.TestProfile; - -@QuarkusTest -@TestProfile(Profiles.DhfsFileServiceSimpleTestProfileNoChunking.class) -public class DhfsFileServiceSimpleTestNoChunkingTest extends DhfsFileServiceSimpleTestImpl { -} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestSmallChunkingTest.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestSmallChunkingTest.java deleted file mode 100644 index 2d9fdd78..00000000 --- a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/files/DhfsFileServiceSimpleTestSmallChunkingTest.java +++ /dev/null @@ -1,9 +0,0 @@ -package com.usatiuk.dhfs.files; - -import io.quarkus.test.junit.QuarkusTest; -import io.quarkus.test.junit.TestProfile; - -@QuarkusTest -@TestProfile(Profiles.DhfsFileServiceSimpleTestProfileSmallChunking.class) -public class DhfsFileServiceSimpleTestSmallChunkingTest extends DhfsFileServiceSimpleTestImpl { -} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/fuse/DhfsFuseTest.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/fuse/DhfsFuseTest.java deleted file mode 100644 index df800321..00000000 --- a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/fuse/DhfsFuseTest.java +++ /dev/null @@ -1,77 +0,0 @@ -package com.usatiuk.dhfs.fuse; - -import com.usatiuk.dhfs.TempDataProfile; -import io.quarkus.test.junit.QuarkusTest; -import io.quarkus.test.junit.TestProfile; -import org.eclipse.microprofile.config.inject.ConfigProperty; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; - -class Profiles { - public static class DhfsFuseTestProfile extends TempDataProfile { - } -} - -@QuarkusTest -@TestProfile(Profiles.DhfsFuseTestProfile.class) -public class DhfsFuseTest { - @ConfigProperty(name = "dhfs.fuse.root") - String root; - - @Test - void readWriteFileTest() throws IOException, InterruptedException { - byte[] testString = "test file thing".getBytes(); - Path testPath = Path.of(root).resolve("test1"); - - Assertions.assertDoesNotThrow(() -> Files.createFile(testPath)); - Assertions.assertDoesNotThrow(() -> Files.write(testPath, testString)); - Assertions.assertDoesNotThrow(() -> Files.readAllBytes(testPath)); - Assertions.assertArrayEquals(Files.readAllBytes(testPath), testString); - } - - @Test - void symlinkTest() throws IOException, InterruptedException { - byte[] testString = "symlinkedfile".getBytes(); - Path testPath = Path.of(root).resolve("symlinktarget"); - Path testSymlink = Path.of(root).resolve("symlinktest"); - - Assertions.assertDoesNotThrow(() -> Files.createFile(testPath)); - Assertions.assertDoesNotThrow(() -> Files.write(testPath, testString)); - Assertions.assertDoesNotThrow(() -> Files.readAllBytes(testPath)); - Assertions.assertArrayEquals(Files.readAllBytes(testPath), testString); - - Assertions.assertDoesNotThrow(() -> Files.createSymbolicLink(testSymlink, testPath)); - Assertions.assertTrue(() -> Files.isSymbolicLink(testSymlink)); - Assertions.assertEquals(testPath, Files.readSymbolicLink(testSymlink)); - Assertions.assertDoesNotThrow(() -> Files.readAllBytes(testSymlink)); - Assertions.assertArrayEquals(Files.readAllBytes(testSymlink), testString); - } - - @Test - void dontRemoveEmptyDirTest() throws IOException { - byte[] testString = "dontRemoveEmptyDirTestStr".getBytes(); - Path testDir = Path.of(root).resolve("dontRemoveEmptyDirTestDir"); - Path testFile = testDir.resolve("dontRemoveEmptyDirTestFile"); - - Assertions.assertDoesNotThrow(() -> Files.createDirectory(testDir)); - Assertions.assertDoesNotThrow(() -> Files.createFile(testFile)); - Assertions.assertDoesNotThrow(() -> Files.write(testFile, testString)); - Assertions.assertDoesNotThrow(() -> Files.readAllBytes(testFile)); - Assertions.assertArrayEquals(Files.readAllBytes(testFile), testString); - - Assertions.assertThrows(Exception.class, () -> Files.delete(testDir)); - Assertions.assertDoesNotThrow(() -> Files.readAllBytes(testFile)); - Assertions.assertArrayEquals(Files.readAllBytes(testFile), testString); - - Assertions.assertDoesNotThrow(() -> Files.delete(testFile)); - Assertions.assertDoesNotThrow(() -> Files.delete(testDir)); - Assertions.assertFalse(Files.exists(testDir)); - Assertions.assertFalse(Files.exists(testFile)); - Assertions.assertThrows(Exception.class, () -> Files.readAllBytes(testFile)); - } - -} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsFuseIT.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsFuseIT.java deleted file mode 100644 index b9d9f92d..00000000 --- a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsFuseIT.java +++ /dev/null @@ -1,352 +0,0 @@ -package com.usatiuk.dhfs.integration; - -import com.github.dockerjava.api.model.Device; -import io.quarkus.logging.Log; -import org.apache.commons.lang3.tuple.Pair; -import org.junit.jupiter.api.*; -import org.slf4j.LoggerFactory; -import org.testcontainers.DockerClientFactory; -import org.testcontainers.containers.GenericContainer; -import org.testcontainers.containers.Network; -import org.testcontainers.containers.output.Slf4jLogConsumer; -import org.testcontainers.containers.output.WaitingConsumer; -import org.testcontainers.containers.wait.strategy.Wait; - -import java.io.IOException; -import java.time.Duration; -import java.util.Objects; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.stream.Stream; - -import static org.awaitility.Awaitility.await; - -public class DhfsFuseIT { - GenericContainer container1; - GenericContainer container2; - - WaitingConsumer waitingConsumer1; - WaitingConsumer waitingConsumer2; - - String c1uuid; - String c2uuid; - - @BeforeEach - void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException { - Network network = Network.newNetwork(); - container1 = new GenericContainer<>(DhfsImage.getInstance()) - .withPrivilegedMode(true) - .withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse"))) - .waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network); - container2 = new GenericContainer<>(DhfsImage.getInstance()) - .withPrivilegedMode(true) - .withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse"))) - .waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network); - - Stream.of(container1, container2).parallel().forEach(GenericContainer::start); - - waitingConsumer1 = new WaitingConsumer(); - var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFuseIT.class)).withPrefix("1-" + testInfo.getDisplayName()); - container1.followOutput(loggingConsumer1.andThen(waitingConsumer1)); - waitingConsumer2 = new WaitingConsumer(); - var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFuseIT.class)).withPrefix("2-" + testInfo.getDisplayName()); - container2.followOutput(loggingConsumer2.andThen(waitingConsumer2)); - - c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); - c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); - - Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid)); - Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid)); - - waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); - - var c1curl = container1.execInContainer("/bin/sh", "-c", - "curl --header \"Content-Type: application/json\" " + - " --request PUT " + - " --data '{\"uuid\":\"" + c2uuid + "\"}' " + - " http://localhost:8080/objects-manage/known-peers"); - - var c2curl = container2.execInContainer("/bin/sh", "-c", - "curl --header \"Content-Type: application/json\" " + - " --request PUT " + - " --data '{\"uuid\":\"" + c1uuid + "\"}' " + - " http://localhost:8080/objects-manage/known-peers"); - - waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); - } - - @AfterEach - void stop() { - Stream.of(container1, container2).parallel().forEach(GenericContainer::stop); - } - - @Test - void readWriteFileTest() throws IOException, InterruptedException, TimeoutException { - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> - "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - } - - @Test - void readWriteRewriteFileTest() throws IOException, InterruptedException, TimeoutException { - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> - "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> - "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - } - - @Test - void createDelayedTest() throws IOException, InterruptedException, TimeoutException { - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> - "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - await().atMost(45, TimeUnit.SECONDS).until(() -> - "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - - var client = DockerClientFactory.instance().client(); - client.pauseContainerCmd(container2.getContainerId()).exec(); - - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS); - - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo newfile > /root/dhfs_default/fuse/testf2").getExitCode()); - - client.unpauseContainerCmd(container2.getContainerId()).exec(); - - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); - await().atMost(45, TimeUnit.SECONDS).until(() -> - "newfile\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf2").getStdout())); - await().atMost(45, TimeUnit.SECONDS).until(() -> - "newfile\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf2").getStdout())); - } - - @Test - void writeRewriteDelayedTest() throws IOException, InterruptedException, TimeoutException { - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> - "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - await().atMost(45, TimeUnit.SECONDS).until(() -> - "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - - var client = DockerClientFactory.instance().client(); - client.pauseContainerCmd(container2.getContainerId()).exec(); - - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS); - - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode()); - - client.unpauseContainerCmd(container2.getContainerId()).exec(); - - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); - - await().atMost(45, TimeUnit.SECONDS).until(() -> - "rewritten\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - await().atMost(45, TimeUnit.SECONDS).until(() -> - "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - } - - // TODO: How this fits with the tree? - @Test - @Disabled - void deleteDelayedTest() throws IOException, InterruptedException, TimeoutException { - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - - var client = DockerClientFactory.instance().client(); - client.pauseContainerCmd(container2.getContainerId()).exec(); - - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS); - - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/testf1").getExitCode()); - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Delaying deletion check"), 60, TimeUnit.SECONDS, 1); - - client.unpauseContainerCmd(container2.getContainerId()).exec(); - - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); - - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode()); - - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 1); - waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3); - - await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container2.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container1.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode()); - } - - @Test - void deleteTest() throws IOException, InterruptedException, TimeoutException { - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> - "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - await().atMost(45, TimeUnit.SECONDS).until(() -> - "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - - Log.info("Deleting"); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/testf1").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> - 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode()); - Log.info("Deleted"); - - // FIXME? - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3); - waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3); - - await().atMost(45, TimeUnit.SECONDS).until(() -> - 1 == container2.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> - 1 == container1.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode()); - } - - @Test - void moveFileTest() throws IOException, InterruptedException, TimeoutException { - Log.info("Creating"); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - Log.info("Listing"); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode()); - Log.info("Moving"); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/testf1 /root/dhfs_default/fuse/testf2").getExitCode()); - Log.info("Listing"); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode()); - Log.info("Reading"); - await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf2").getStdout())); - } - - @Test - void moveDirTest() throws IOException, InterruptedException, TimeoutException { - Log.info("Creating"); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/testdir").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testdir/testf1").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testdir/testf1").getStdout())); - Log.info("Listing"); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode()); - Log.info("Moving"); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/testdir2").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/testdir /root/dhfs_default/fuse/testdir2/testdirm").getExitCode()); - Log.info("Listing"); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode()); - Log.info("Reading"); - await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testdir2/testdirm/testf1").getStdout())); - } - - - // TODO: This probably shouldn't be working right now - @Test - void removeAddHostTest() throws IOException, InterruptedException, TimeoutException { - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - - var c2curl = container2.execInContainer("/bin/sh", "-c", - "curl --header \"Content-Type: application/json\" " + - " --request DELETE " + - " --data '{\"uuid\":\"" + c1uuid + "\"}' " + - " http://localhost:8080/objects-manage/known-peers"); - - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo jioadsd > /root/dhfs_default/fuse/newfile1").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo asvdkljm > /root/dhfs_default/fuse/newfile1").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo dfgvh > /root/dhfs_default/fuse/newfile2").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo dscfg > /root/dhfs_default/fuse/newfile2").getExitCode()); - - Log.info("Re-adding"); - container2.execInContainer("/bin/sh", "-c", - "curl --header \"Content-Type: application/json\" " + - " --request PUT " + - " --data '{\"uuid\":\"" + c1uuid + "\"}' " + - " http://localhost:8080/objects-manage/known-peers"); - waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); - - await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - await().atMost(45, TimeUnit.SECONDS).until(() -> { - Log.info("Listing removeAddHostTest"); - var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*"); - var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*"); - var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/"); - var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/"); - Log.info(cat1); - Log.info(cat2); - Log.info(ls1); - Log.info(ls2); - - return cat1.getStdout().contains("jioadsd") && cat1.getStdout().contains("asvdkljm") && cat1.getStdout().contains("dfgvh") && cat1.getStdout().contains("dscfg") - && cat2.getStdout().contains("jioadsd") && cat2.getStdout().contains("asvdkljm") && cat2.getStdout().contains("dfgvh") && cat2.getStdout().contains("dscfg"); - }); - } - - @Test - void dirConflictTest() throws IOException, InterruptedException, TimeoutException { - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode()); - boolean createFail = Stream.of(Pair.of(container1, "echo test1 >> /root/dhfs_default/fuse/testf"), - Pair.of(container2, "echo test2 >> /root/dhfs_default/fuse/testf")).parallel().map(p -> { - try { - return p.getLeft().execInContainer("/bin/sh", "-c", p.getRight()).getExitCode(); - } catch (Exception e) { - throw new RuntimeException(e); - } - }).anyMatch(r -> r != 0); - Assumptions.assumeTrue(!createFail, "Failed creating one or more files"); - await().atMost(45, TimeUnit.SECONDS).until(() -> { - var ls = container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse"); - var cat = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*"); - Log.info(ls); - Log.info(cat); - return cat.getStdout().contains("test1") && cat.getStdout().contains("test2"); - }); - } - - @Test - void dirCycleTest() throws IOException, InterruptedException, TimeoutException { - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/a").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/b").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo xqr489 >> /root/dhfs_default/fuse/a/testfa").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo ahinou >> /root/dhfs_default/fuse/b/testfb").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls -lavh /root/dhfs_default/fuse").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> { - var c2ls = container2.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f -exec cat {} \\;"); - return c2ls.getExitCode() == 0 && c2ls.getStdout().contains("xqr489") && c2ls.getStdout().contains("ahinou"); - }); - - var client = DockerClientFactory.instance().client(); - client.pauseContainerCmd(container1.getContainerId()).exec(); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/a /root/dhfs_default/fuse/b").getExitCode()); - client.pauseContainerCmd(container2.getContainerId()).exec(); - client.unpauseContainerCmd(container1.getContainerId()).exec(); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/b /root/dhfs_default/fuse/a").getExitCode()); - client.unpauseContainerCmd(container2.getContainerId()).exec(); - - - await().atMost(45, TimeUnit.SECONDS).until(() -> { - Log.info("Listing dirCycleTest"); - Log.info(container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse")); - Log.info(container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/a")); - Log.info(container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/b")); - Log.info(container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse")); - Log.info(container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/a")); - Log.info(container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/b")); - - var c1ls2 = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -maxdepth 3 -type f -exec cat {} \\;"); - Log.info(c1ls2); - var c2ls2 = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -maxdepth 3 -type f -exec cat {} \\;"); - Log.info(c2ls2); - - return c1ls2.getStdout().contains("xqr489") && c1ls2.getStdout().contains("ahinou") - && c2ls2.getStdout().contains("xqr489") && c2ls2.getStdout().contains("ahinou") - && c1ls2.getExitCode() == 0 && c2ls2.getExitCode() == 0; - }); - - } - -} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsFusex3IT.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsFusex3IT.java deleted file mode 100644 index b401b053..00000000 --- a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsFusex3IT.java +++ /dev/null @@ -1,293 +0,0 @@ -package com.usatiuk.dhfs.integration; - -import com.github.dockerjava.api.model.Device; -import io.quarkus.logging.Log; -import org.junit.jupiter.api.*; -import org.slf4j.LoggerFactory; -import org.testcontainers.DockerClientFactory; -import org.testcontainers.containers.GenericContainer; -import org.testcontainers.containers.Network; -import org.testcontainers.containers.output.Slf4jLogConsumer; -import org.testcontainers.containers.output.WaitingConsumer; -import org.testcontainers.containers.wait.strategy.Wait; - -import java.io.IOException; -import java.time.Duration; -import java.util.List; -import java.util.Objects; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.stream.Stream; - -import static org.awaitility.Awaitility.await; - -public class DhfsFusex3IT { - GenericContainer container1; - GenericContainer container2; - GenericContainer container3; - - WaitingConsumer waitingConsumer1; - WaitingConsumer waitingConsumer2; - WaitingConsumer waitingConsumer3; - - String c1uuid; - String c2uuid; - String c3uuid; - - // This calculation is somewhat racy, so keep it hardcoded for now - long emptyFileCount = 9; - - @BeforeEach - void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException { - // TODO: Dedup - Network network = Network.newNetwork(); - - container1 = new GenericContainer<>(DhfsImage.getInstance()) - .withPrivilegedMode(true) - .withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse"))) - .waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network); - container2 = new GenericContainer<>(DhfsImage.getInstance()) - .withPrivilegedMode(true) - .withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse"))) - .waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network); - container3 = new GenericContainer<>(DhfsImage.getInstance()) - .withPrivilegedMode(true) - .withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse"))) - .waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network); - - - Stream.of(container1, container2, container3).parallel().forEach(GenericContainer::start); - - c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); - c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); - c3uuid = container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); - - Log.info(container1.getContainerId() + "=" + c1uuid); - Log.info(container2.getContainerId() + "=" + c2uuid); - Log.info(container3.getContainerId() + "=" + c3uuid); - - waitingConsumer1 = new WaitingConsumer(); - var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFusex3IT.class)) - .withPrefix(c1uuid.substring(0, 4) + "-" + testInfo.getDisplayName()); - container1.followOutput(loggingConsumer1.andThen(waitingConsumer1)); - waitingConsumer2 = new WaitingConsumer(); - var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFusex3IT.class)) - .withPrefix(c2uuid.substring(0, 4) + "-" + testInfo.getDisplayName()); - container2.followOutput(loggingConsumer2.andThen(waitingConsumer2)); - waitingConsumer3 = new WaitingConsumer(); - var loggingConsumer3 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFusex3IT.class)) - .withPrefix(c3uuid.substring(0, 4) + "-" + testInfo.getDisplayName()); - container3.followOutput(loggingConsumer3.andThen(waitingConsumer3)); - - Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid)); - Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid)); - Assertions.assertDoesNotThrow(() -> UUID.fromString(c3uuid)); - - waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS, 2); - waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS, 2); - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS, 2); - - var c1curl = container1.execInContainer("/bin/sh", "-c", - "curl --header \"Content-Type: application/json\" " + - " --request PUT " + - " --data '{\"uuid\":\"" + c2uuid + "\"}' " + - " http://localhost:8080/objects-manage/known-peers"); - - var c2curl1 = container2.execInContainer("/bin/sh", "-c", - "curl --header \"Content-Type: application/json\" " + - " --request PUT " + - " --data '{\"uuid\":\"" + c1uuid + "\"}' " + - " http://localhost:8080/objects-manage/known-peers"); - - var c2curl3 = container2.execInContainer("/bin/sh", "-c", - "curl --header \"Content-Type: application/json\" " + - " --request PUT " + - " --data '{\"uuid\":\"" + c3uuid + "\"}' " + - " http://localhost:8080/objects-manage/known-peers"); - - var c3curl = container3.execInContainer("/bin/sh", "-c", - "curl --header \"Content-Type: application/json\" " + - " --request PUT " + - " --data '{\"uuid\":\"" + c2uuid + "\"}' " + - " http://localhost:8080/objects-manage/known-peers"); - - waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); - waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); - } - - private boolean checkEmpty() throws IOException, InterruptedException { - for (var container : List.of(container1, container2, container3)) { - var found = container.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/data/objs -type f"); - var foundWc = container.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/data/objs -type f | wc -l"); - Log.info("Remaining objects in " + container.getContainerId() + ": " + found.toString() + " " + foundWc.toString()); - if (!(found.getExitCode() == 0 && foundWc.getExitCode() == 0 && Integer.parseInt(foundWc.getStdout().strip()) == emptyFileCount)) - return false; - } - return true; - } - - @AfterEach - void stop() { - Stream.of(container1, container2, container3).parallel().forEach(GenericContainer::stop); - } - - @Test - void readWriteFileTest() throws IOException, InterruptedException, TimeoutException { - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - } - - @Test - void largerFileDeleteTest() throws IOException, InterruptedException, TimeoutException { - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /root/dhfs_default/fuse && curl -O https://ash-speed.hetzner.com/100MB.bin").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "head -c 10 /root/dhfs_default/fuse/100MB.bin").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/100MB.bin").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> checkEmpty()); - } - - @Test - void largerFileDeleteTestNoDelays() throws IOException, InterruptedException, TimeoutException { - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /root/dhfs_default/fuse && curl -O https://ash-speed.hetzner.com/100MB.bin").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "head -c 10 /root/dhfs_default/fuse/100MB.bin").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/100MB.bin").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> checkEmpty()); - } - - @Test - void gccHelloWorldTest() throws IOException, InterruptedException, TimeoutException { - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo '#include\nint main(){printf(\"hello world\"); return 0;}' > /root/dhfs_default/fuse/hello.c").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /root/dhfs_default/fuse && gcc hello.c").getExitCode()); - - await().atMost(45, TimeUnit.SECONDS).until(() -> { - var helloOut = container1.execInContainer("/bin/sh", "-c", "/root/dhfs_default/fuse/a.out"); - Log.info(helloOut); - return helloOut.getExitCode() == 0 && helloOut.getStdout().equals("hello world"); - }); - await().atMost(45, TimeUnit.SECONDS).until(() -> { - var helloOut = container2.execInContainer("/bin/sh", "-c", "/root/dhfs_default/fuse/a.out"); - Log.info(helloOut); - return helloOut.getExitCode() == 0 && helloOut.getStdout().equals("hello world"); - }); - await().atMost(45, TimeUnit.SECONDS).until(() -> { - var helloOut = container3.execInContainer("/bin/sh", "-c", "/root/dhfs_default/fuse/a.out"); - Log.info(helloOut); - return helloOut.getExitCode() == 0 && helloOut.getStdout().equals("hello world"); - }); - } - - @Test - void removeHostTest() throws IOException, InterruptedException, TimeoutException { - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - - var c3curl = container3.execInContainer("/bin/sh", "-c", - "curl --header \"Content-Type: application/json\" " + - " --request DELETE " + - " --data '{\"uuid\":\"" + c2uuid + "\"}' " + - " http://localhost:8080/objects-manage/known-peers"); - - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - } - - @Test - void dirConflictTest() throws IOException, InterruptedException, TimeoutException { - var client = DockerClientFactory.instance().client(); - client.pauseContainerCmd(container1.getContainerId()).exec(); - client.pauseContainerCmd(container2.getContainerId()).exec(); - // Pauses needed as otherwise docker buffers some incoming packets - waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /root/dhfs_default/fuse/testf").getExitCode()); - client.pauseContainerCmd(container3.getContainerId()).exec(); - client.unpauseContainerCmd(container2.getContainerId()).exec(); - waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /root/dhfs_default/fuse/testf").getExitCode()); - client.pauseContainerCmd(container2.getContainerId()).exec(); - client.unpauseContainerCmd(container1.getContainerId()).exec(); - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /root/dhfs_default/fuse/testf").getExitCode()); - client.unpauseContainerCmd(container2.getContainerId()).exec(); - client.unpauseContainerCmd(container3.getContainerId()).exec(); - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); - waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); - waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); - - await().atMost(45, TimeUnit.SECONDS).until(() -> { - for (var c : List.of(container1, container2, container3)) { - var ls = c.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse"); - var cat = c.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*"); - Log.info(ls); - Log.info(cat); - if (!(cat.getStdout().contains("test1") && cat.getStdout().contains("test2") && cat.getStdout().contains("test3"))) - return false; - } - return true; - }); - - await().atMost(45, TimeUnit.SECONDS).until(() -> { - return container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals( - container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) && - container3.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals( - container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) && - container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout().equals( - container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout()); - }); - } - - @Test - void fileConflictTest() throws IOException, InterruptedException, TimeoutException { - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf").getExitCode()); - await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf").getStdout())); - await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf").getStdout())); - - var client = DockerClientFactory.instance().client(); - client.pauseContainerCmd(container1.getContainerId()).exec(); - client.pauseContainerCmd(container2.getContainerId()).exec(); - // Pauses needed as otherwise docker buffers some incoming packets - waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /root/dhfs_default/fuse/testf").getExitCode()); - client.pauseContainerCmd(container3.getContainerId()).exec(); - client.unpauseContainerCmd(container2.getContainerId()).exec(); - waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /root/dhfs_default/fuse/testf").getExitCode()); - client.pauseContainerCmd(container2.getContainerId()).exec(); - client.unpauseContainerCmd(container1.getContainerId()).exec(); - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2); - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /root/dhfs_default/fuse/testf").getExitCode()); - client.unpauseContainerCmd(container2.getContainerId()).exec(); - client.unpauseContainerCmd(container3.getContainerId()).exec(); - Log.warn("Waiting for connections"); - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); - waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); - waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2); - Log.warn("Connected"); - - await().atMost(45, TimeUnit.SECONDS).until(() -> { - for (var c : List.of(container1, container2, container3)) { - var ls = c.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse"); - var cat = c.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*"); - Log.info(ls); - Log.info(cat); - if (!(cat.getStdout().contains("test1") && cat.getStdout().contains("test2") && cat.getStdout().contains("test3"))) - return false; - } - return true; - }); - - await().atMost(45, TimeUnit.SECONDS).until(() -> { - return container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals( - container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) && - container3.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals( - container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) && - container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout().equals( - container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout()); - }); - } - -} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java deleted file mode 100644 index 5bec10e9..00000000 --- a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/DhfsImage.java +++ /dev/null @@ -1,93 +0,0 @@ -package com.usatiuk.dhfs.integration; - -import io.quarkus.logging.Log; -import org.jetbrains.annotations.NotNull; -import org.testcontainers.images.builder.ImageFromDockerfile; - -import java.nio.file.Paths; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -public class DhfsImage implements Future { - - private static String _builtImage = null; - private static DhfsImage INSTANCE = new DhfsImage(); - - private DhfsImage() {} - - public static DhfsImage getInstance() { - return INSTANCE; - } - - @Override - public boolean cancel(boolean mayInterruptIfRunning) { - return false; - } - - @Override - public boolean isCancelled() { - return false; - } - - @Override - public boolean isDone() { - return true; - } - - @Override - public String get() throws InterruptedException, ExecutionException { - return buildImpl(); - } - - @Override - public String get(long timeout, @NotNull TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { - return buildImpl(); - } - - private synchronized String buildImpl() { - if (_builtImage != null) { - return _builtImage; - } - - Log.info("Building image"); - - String buildPath = System.getProperty("buildDirectory"); - String nativeLibsDirectory = System.getProperty("nativeLibsDirectory"); - Log.info("Build path: " + buildPath); - Log.info("Native libs path: " + nativeLibsDirectory); - - var image = new ImageFromDockerfile() - .withDockerfileFromBuilder(builder -> - builder - .from("azul/zulu-openjdk-debian:21-jre-headless-latest") - .run("apt update && apt install -y libfuse2 curl gcc") - .copy("/app", "/app") - .copy("/libs", "/libs") - .cmd("java", "-ea", "-Xmx128M", - "--add-exports", "java.base/sun.nio.ch=ALL-UNNAMED", - "--add-exports", "java.base/jdk.internal.access=ALL-UNNAMED", - "-Ddhfs.objects.peerdiscovery.interval=100", - "-Ddhfs.objects.invalidation.delay=100", - "-Ddhfs.objects.deletion.delay=0", - "-Ddhfs.objects.deletion.can-delete-retry-delay=1000", - "-Ddhfs.objects.ref_verification=true", - "-Ddhfs.objects.write_log=true", - "-Ddhfs.objects.sync.timeout=10", - "-Ddhfs.objects.sync.ping.timeout=5", - "-Ddhfs.objects.reconnect_interval=1s", - "-Dcom.usatiuk.dhfs.supportlib.native-path=/libs", - "-Dquarkus.log.category.\"com.usatiuk\".level=TRACE", - "-Dquarkus.log.category.\"com.usatiuk.dhfs\".level=TRACE", - "-Ddhfs.objects.periodic-push-op-interval=5s", - "-jar", "/app/quarkus-run.jar") - .build()) - .withFileFromPath("/app", Paths.get(buildPath, "quarkus-app")) - .withFileFromPath("/libs", Paths.get(nativeLibsDirectory)); - - _builtImage = image.get(); - Log.info("Image built: " + _builtImage); - return _builtImage; - } -} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/ResyncIT.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/ResyncIT.java deleted file mode 100644 index 07a929e4..00000000 --- a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/integration/ResyncIT.java +++ /dev/null @@ -1,135 +0,0 @@ -package com.usatiuk.dhfs.integration; - -import com.github.dockerjava.api.model.Device; -import org.junit.jupiter.api.*; -import org.slf4j.LoggerFactory; -import org.testcontainers.containers.GenericContainer; -import org.testcontainers.containers.Network; -import org.testcontainers.containers.output.Slf4jLogConsumer; -import org.testcontainers.containers.output.WaitingConsumer; -import org.testcontainers.containers.wait.strategy.Wait; - -import java.io.IOException; -import java.time.Duration; -import java.util.Objects; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.stream.Stream; - -import static org.awaitility.Awaitility.await; - -public class ResyncIT { - GenericContainer container1; - GenericContainer container2; - - WaitingConsumer waitingConsumer1; - WaitingConsumer waitingConsumer2; - - String c1uuid; - String c2uuid; - - @BeforeEach - void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException { - Network network = Network.newNetwork(); - - container1 = new GenericContainer<>(DhfsImage.getInstance()) - .withPrivilegedMode(true) - .withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse"))) - .waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network); - container2 = new GenericContainer<>(DhfsImage.getInstance()) - .withPrivilegedMode(true) - .withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse"))) - .waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network); - - Stream.of(container1, container2).parallel().forEach(GenericContainer::start); - - waitingConsumer1 = new WaitingConsumer(); - var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFuseIT.class)).withPrefix("1-" + testInfo.getDisplayName()); - container1.followOutput(loggingConsumer1.andThen(waitingConsumer1)); - waitingConsumer2 = new WaitingConsumer(); - var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFuseIT.class)).withPrefix("2-" + testInfo.getDisplayName()); - container2.followOutput(loggingConsumer2.andThen(waitingConsumer2)); - } - - @AfterEach - void stop() { - Stream.of(container1, container2).parallel().forEach(GenericContainer::stop); - } - - @Test - void readWriteFileTest() throws IOException, InterruptedException, TimeoutException { - await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode()); - c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); - c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); - - Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid)); - Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid)); - - waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); - - var c1curl = container1.execInContainer("/bin/sh", "-c", - "curl --header \"Content-Type: application/json\" " + - " --request PUT " + - " --data '{\"uuid\":\"" + c2uuid + "\"}' " + - " http://localhost:8080/objects-manage/known-peers"); - - var c2curl = container2.execInContainer("/bin/sh", "-c", - "curl --header \"Content-Type: application/json\" " + - " --request PUT " + - " --data '{\"uuid\":\"" + c1uuid + "\"}' " + - " http://localhost:8080/objects-manage/known-peers"); - - waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); - await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout())); - } - - - @Test - void manyFiles() throws IOException, InterruptedException, TimeoutException { - var ret = container1.execInContainer("/bin/sh", "-c", "for i in $(seq 1 200); do echo $i > /root/dhfs_default/fuse/test$i; done"); - Assertions.assertEquals(0, ret.getExitCode()); - var foundWc = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l"); - Assertions.assertEquals(200, Integer.valueOf(foundWc.getStdout().strip())); - - ret = container2.execInContainer("/bin/sh", "-c", "for i in $(seq 1 200); do echo $i > /root/dhfs_default/fuse/test-2-$i; done"); - Assertions.assertEquals(0, ret.getExitCode()); - foundWc = container2.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l"); - Assertions.assertEquals(200, Integer.valueOf(foundWc.getStdout().strip())); - - c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); - c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout(); - - Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid)); - Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid)); - - waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS); - - var c1curl = container1.execInContainer("/bin/sh", "-c", - "curl --header \"Content-Type: application/json\" " + - " --request PUT " + - " --data '{\"uuid\":\"" + c2uuid + "\"}' " + - " http://localhost:8080/objects-manage/known-peers"); - - var c2curl = container2.execInContainer("/bin/sh", "-c", - "curl --header \"Content-Type: application/json\" " + - " --request PUT " + - " --data '{\"uuid\":\"" + c1uuid + "\"}' " + - " http://localhost:8080/objects-manage/known-peers"); - - waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); - waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS); - await().atMost(45, TimeUnit.SECONDS).until(() -> { - var foundWc2 = container2.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l"); - return 400 == Integer.valueOf(foundWc2.getStdout().strip()); - }); - await().atMost(45, TimeUnit.SECONDS).until(() -> { - var foundWc2 = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l"); - return 400 == Integer.valueOf(foundWc2.getStdout().strip()); - }); - } - -} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/persistence/FileObjectPersistentStoreTest.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/persistence/FileObjectPersistentStoreTest.java deleted file mode 100644 index 16e78c86..00000000 --- a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/persistence/FileObjectPersistentStoreTest.java +++ /dev/null @@ -1,95 +0,0 @@ -package com.usatiuk.dhfs.persistence; - -import com.google.protobuf.ByteString; -import com.usatiuk.dhfs.TempDataProfile; -import com.usatiuk.dhfs.objects.persistence.ChunkDataP; -import com.usatiuk.dhfs.objects.persistence.JObjectDataP; -import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP; -import com.usatiuk.dhfs.objects.repository.persistence.FileObjectPersistentStore; -import io.quarkus.test.junit.QuarkusTest; -import io.quarkus.test.junit.TestProfile; -import jakarta.inject.Inject; -import org.apache.commons.lang3.RandomStringUtils; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -import java.util.Map; -import java.util.concurrent.ThreadLocalRandom; - - -class Profiles { - public static class FileObjectPersistentStoreTestProfile extends TempDataProfile { - @Override - protected void getConfigOverrides(Map ret) { - ret.put("quarkus.log.category.\"com.usatiuk.dhfs\".level", "TRACE"); - ret.put("dhfs.fuse.enabled", "false"); - ret.put("dhfs.objects.ref_verification", "true"); - } - } -} - -@QuarkusTest -@TestProfile(Profiles.FileObjectPersistentStoreTestProfile.class) -public class FileObjectPersistentStoreTest { - @Inject - FileObjectPersistentStore fileObjectPersistentStore; - - @Test - public void writeReadFullObject() { - String name = "writeReadFullObjectSmallMeta"; - - var bytes = new byte[100000]; - ThreadLocalRandom.current().nextBytes(bytes); - - ObjectMetadataP meta = ObjectMetadataP.newBuilder().setName("verycoolname123456789").build(); - JObjectDataP data = JObjectDataP.newBuilder().setChunkData(ChunkDataP.newBuilder().setData(ByteString.copyFrom(bytes)).build()).build(); - - fileObjectPersistentStore.writeObjectDirect(name, meta, data); - var readMeta = fileObjectPersistentStore.readObjectMeta(name); - var readData = fileObjectPersistentStore.readObject(name); - Assertions.assertEquals(meta, readMeta); - Assertions.assertEquals(data, readData); - - var bigString = RandomStringUtils.random(100000); - - var newMeta = ObjectMetadataP.newBuilder().setName(String.valueOf(bigString)).build(); - fileObjectPersistentStore.writeObjectMetaDirect(name, newMeta); - readMeta = fileObjectPersistentStore.readObjectMeta(name); - readData = fileObjectPersistentStore.readObject(name); - Assertions.assertEquals(newMeta, readMeta); - Assertions.assertEquals(data, readData); - - fileObjectPersistentStore.writeObjectDirect(name, newMeta, null); - readMeta = fileObjectPersistentStore.readObjectMeta(name); - Assertions.assertEquals(newMeta, readMeta); - Assertions.assertThrows(Throwable.class, () -> fileObjectPersistentStore.readObject(name)); - - fileObjectPersistentStore.writeObjectMetaDirect(name, meta); - readMeta = fileObjectPersistentStore.readObjectMeta(name); - Assertions.assertEquals(meta, readMeta); - Assertions.assertThrows(Throwable.class, () -> fileObjectPersistentStore.readObject(name)); - - fileObjectPersistentStore.writeObjectDirect(name, newMeta, null); - readMeta = fileObjectPersistentStore.readObjectMeta(name); - Assertions.assertEquals(newMeta, readMeta); - Assertions.assertThrows(Throwable.class, () -> fileObjectPersistentStore.readObject(name)); - - fileObjectPersistentStore.writeObjectDirect(name, newMeta, data); - readMeta = fileObjectPersistentStore.readObjectMeta(name); - readData = fileObjectPersistentStore.readObject(name); - Assertions.assertEquals(newMeta, readMeta); - Assertions.assertEquals(data, readData); - - fileObjectPersistentStore.writeObjectMetaDirect(name, meta); - readMeta = fileObjectPersistentStore.readObjectMeta(name); - readData = fileObjectPersistentStore.readObject(name); - Assertions.assertEquals(meta, readMeta); - Assertions.assertEquals(data, readData); - - fileObjectPersistentStore.writeObjectMetaDirect(name, newMeta); - readMeta = fileObjectPersistentStore.readObjectMeta(name); - readData = fileObjectPersistentStore.readObject(name); - Assertions.assertEquals(newMeta, readMeta); - Assertions.assertEquals(data, readData); - } -} diff --git a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/persistence/ProtoSerializationTest.java b/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/persistence/ProtoSerializationTest.java deleted file mode 100644 index fd6f10e7..00000000 --- a/dhfs-parent/server-old/src/test/java/com/usatiuk/dhfs/persistence/ProtoSerializationTest.java +++ /dev/null @@ -1,24 +0,0 @@ -package com.usatiuk.dhfs.persistence; - -import io.quarkus.test.junit.QuarkusTest; - -@QuarkusTest -public class ProtoSerializationTest { - -// @Inject -// ProtoSerializerService protoSerializerService; -// -// @Test -// void SerializeDeserializePeerDirectory() { -// var pd = new PeerDirectory(); -// pd.getPeers().add(UUID.randomUUID()); -// var ser = JObjectDataP.newBuilder().setPeerDirectory((PeerDirectoryP) protoSerializerService.serialize(pd)).build(); -// var deser = (PeerDirectory) protoSerializerService.deserialize(ser); -// Assertions.assertIterableEquals(pd.getPeers(), deser.getPeers()); -// -// var ser2 = protoSerializerService.serializeToJObjectDataP(pd); -// var deser2 = (PeerDirectory) protoSerializerService.deserialize(ser2); -// Assertions.assertIterableEquals(pd.getPeers(), deser2.getPeers()); -// } -// -} diff --git a/dhfs-parent/server-old/src/test/resources/application.properties b/dhfs-parent/server-old/src/test/resources/application.properties deleted file mode 100644 index 64f51835..00000000 --- a/dhfs-parent/server-old/src/test/resources/application.properties +++ /dev/null @@ -1,11 +0,0 @@ -dhfs.objects.persistence.files.root=${HOME}/dhfs_data/dhfs_root_test -dhfs.objects.root=${HOME}/dhfs_data/dhfs_root_d_test -dhfs.fuse.root=${HOME}/dhfs_data/dhfs_fuse_root_test -dhfs.objects.ref_verification=true -dhfs.objects.deletion.delay=0 -quarkus.log.category."com.usatiuk.dhfs".level=TRACE -quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE -quarkus.class-loading.parent-first-artifacts=com.usatiuk.dhfs:supportlib -quarkus.http.test-port=0 -quarkus.http.test-ssl-port=0 -dhfs.local-discovery=false \ No newline at end of file