diff --git a/dhfs-parent/server-old/.dockerignore b/dhfs-parent/server-old/.dockerignore
deleted file mode 100644
index 94810d00..00000000
--- a/dhfs-parent/server-old/.dockerignore
+++ /dev/null
@@ -1,5 +0,0 @@
-*
-!target/*-runner
-!target/*-runner.jar
-!target/lib/*
-!target/quarkus-app/*
\ No newline at end of file
diff --git a/dhfs-parent/server-old/.gitignore b/dhfs-parent/server-old/.gitignore
deleted file mode 100644
index 8c7863e7..00000000
--- a/dhfs-parent/server-old/.gitignore
+++ /dev/null
@@ -1,43 +0,0 @@
-#Maven
-target/
-pom.xml.tag
-pom.xml.releaseBackup
-pom.xml.versionsBackup
-release.properties
-.flattened-pom.xml
-
-# Eclipse
-.project
-.classpath
-.settings/
-bin/
-
-# IntelliJ
-.idea
-*.ipr
-*.iml
-*.iws
-
-# NetBeans
-nb-configuration.xml
-
-# Visual Studio Code
-.vscode
-.factorypath
-
-# OSX
-.DS_Store
-
-# Vim
-*.swp
-*.swo
-
-# patch
-*.orig
-*.rej
-
-# Local environment
-.env
-
-# Plugin directory
-/.quarkus/cli/plugins/
diff --git a/dhfs-parent/server-old/Dockerfile b/dhfs-parent/server-old/Dockerfile
deleted file mode 100644
index 62bace54..00000000
--- a/dhfs-parent/server-old/Dockerfile
+++ /dev/null
@@ -1,2 +0,0 @@
-FROM azul/zulu-openjdk-debian:21-jre-latest
-RUN apt update && apt install -y libfuse2 curl
\ No newline at end of file
diff --git a/dhfs-parent/server-old/docker-compose.yml b/dhfs-parent/server-old/docker-compose.yml
deleted file mode 100644
index a6a0aefa..00000000
--- a/dhfs-parent/server-old/docker-compose.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-version: "3.2"
-
-services:
- dhfs1:
- build: .
- privileged: true
- devices:
- - /dev/fuse
- volumes:
- - $HOME/dhfs/dhfs1:/dhfs_root
- - $HOME/dhfs/dhfs1_f:/dhfs_root/fuse:rshared
- - ./target/quarkus-app:/app
- command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
- -Ddhfs.objects.persistence.files.root=/dhfs_root/p
- -Ddhfs.objects.root=/dhfs_root/d
- -Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
- -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005
- -jar /app/quarkus-run.jar"
- ports:
- - 8080:8080
- - 8081:8443
- - 5005:5005
- dhfs2:
- build: .
- privileged: true
- devices:
- - /dev/fuse
- volumes:
- - $HOME/dhfs/dhfs2:/dhfs_root
- - $HOME/dhfs/dhfs2_f:/dhfs_root/fuse:rshared
- - ./target/quarkus-app:/app
- command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
- --add-exports java.base/jdk.internal.access=ALL-UNNAMED
- -Ddhfs.objects.persistence.files.root=/dhfs_root/p
- -Ddhfs.objects.root=/dhfs_root/d
- -Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
- -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5010
- -jar /app/quarkus-run.jar"
- ports:
- - 8090:8080
- - 8091:8443
- - 5010:5010
diff --git a/dhfs-parent/server-old/pom.xml b/dhfs-parent/server-old/pom.xml
deleted file mode 100644
index bb74c72a..00000000
--- a/dhfs-parent/server-old/pom.xml
+++ /dev/null
@@ -1,209 +0,0 @@
-
-
- 4.0.0
- com.usatiuk.dhfs
- server
- 1.0.0-SNAPSHOT
-
-
- com.usatiuk.dhfs
- parent
- 1.0-SNAPSHOT
-
-
-
-
- org.testcontainers
- testcontainers
- test
-
-
- org.awaitility
- awaitility
- test
-
-
- com.usatiuk
- autoprotomap
- 1.0-SNAPSHOT
-
-
- com.usatiuk
- autoprotomap-deployment
- 1.0-SNAPSHOT
- provided
-
-
- org.bouncycastle
- bcprov-jdk18on
- 1.78.1
-
-
- org.bouncycastle
- bcpkix-jdk18on
- 1.78.1
-
-
- io.quarkus
- quarkus-security
-
-
- net.openhft
- zero-allocation-hashing
-
-
- io.quarkus
- quarkus-grpc
-
-
- io.quarkus
- quarkus-arc
-
-
- io.quarkus
- quarkus-rest
-
-
- io.quarkus
- quarkus-rest-client
-
-
- io.quarkus
- quarkus-rest-client-jsonb
-
-
- io.quarkus
- quarkus-rest-jsonb
-
-
- io.quarkus
- quarkus-scheduler
-
-
- io.quarkus
- quarkus-junit5
- test
-
-
- org.projectlombok
- lombok
- provided
-
-
- com.github.SerCeMan
- jnr-fuse
- 44ed40f8ce
-
-
- com.github.jnr
- jnr-ffi
- 2.2.16
-
-
- com.github.jnr
- jnr-posix
- 3.1.19
-
-
- com.github.jnr
- jnr-constants
- 0.10.4
-
-
- org.apache.commons
- commons-lang3
-
-
- commons-io
- commons-io
-
-
- org.jboss.slf4j
- slf4j-jboss-logmanager
- test
-
-
- commons-codec
- commons-codec
-
-
- org.apache.commons
- commons-collections4
-
-
- org.apache.commons
- commons-math3
- 3.6.1
-
-
- com.usatiuk
- kleppmanntree
- 1.0-SNAPSHOT
-
-
- com.usatiuk.dhfs
- supportlib
- 1.0-SNAPSHOT
-
-
- com.usatiuk.dhfs
- objects
- 1.0-SNAPSHOT
-
-
- com.usatiuk.dhfs
- utils
- 1.0-SNAPSHOT
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-surefire-plugin
-
- 1C
- false
- classes
-
-
-
- org.apache.maven.plugins
- maven-failsafe-plugin
-
-
-
- true
-
-
- concurrent
-
-
- 0.5
-
- true
- true
-
-
-
-
- ${quarkus.platform.group-id}
- quarkus-maven-plugin
- ${quarkus.platform.version}
- true
-
-
- quarkus-plugin
-
- build
- generate-code
- generate-code-tests
-
-
-
-
-
-
-
diff --git a/dhfs-parent/server-old/src/lombok.config b/dhfs-parent/server-old/src/lombok.config
deleted file mode 100644
index f1c474ce..00000000
--- a/dhfs-parent/server-old/src/lombok.config
+++ /dev/null
@@ -1 +0,0 @@
-lombok.accessors.prefix += _
diff --git a/dhfs-parent/server-old/src/main/docker/Dockerfile.jvm b/dhfs-parent/server-old/src/main/docker/Dockerfile.jvm
deleted file mode 100644
index b1de5988..00000000
--- a/dhfs-parent/server-old/src/main/docker/Dockerfile.jvm
+++ /dev/null
@@ -1,97 +0,0 @@
-####
-# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
-#
-# Before building the container image run:
-#
-# ./mvnw package
-#
-# Then, build the image with:
-#
-# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/server-jvm .
-#
-# Then run the container using:
-#
-# docker run -i --rm -p 8080:8080 quarkus/server-jvm
-#
-# If you want to include the debug port into your docker image
-# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
-# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
-# when running the container
-#
-# Then run the container using :
-#
-# docker run -i --rm -p 8080:8080 quarkus/server-jvm
-#
-# This image uses the `run-java.sh` script to run the application.
-# This scripts computes the command line to execute your Java application, and
-# includes memory/GC tuning.
-# You can configure the behavior using the following environment properties:
-# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
-# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
-# in JAVA_OPTS (example: "-Dsome.property=foo")
-# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
-# used to calculate a default maximal heap memory based on a containers restriction.
-# If used in a container without any memory constraints for the container then this
-# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
-# of the container available memory as set here. The default is `50` which means 50%
-# of the available memory is used as an upper boundary. You can skip this mechanism by
-# setting this value to `0` in which case no `-Xmx` option is added.
-# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
-# is used to calculate a default initial heap memory based on the maximum heap memory.
-# If used in a container without any memory constraints for the container then this
-# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
-# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
-# is used as the initial heap size. You can skip this mechanism by setting this value
-# to `0` in which case no `-Xms` option is added (example: "25")
-# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
-# This is used to calculate the maximum value of the initial heap memory. If used in
-# a container without any memory constraints for the container then this option has
-# no effect. If there is a memory constraint then `-Xms` is limited to the value set
-# here. The default is 4096MB which means the calculated value of `-Xms` never will
-# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
-# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
-# when things are happening. This option, if set to true, will set
-# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
-# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
-# true").
-# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
-# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
-# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
-# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
-# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
-# (example: "20")
-# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
-# (example: "40")
-# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
-# (example: "4")
-# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
-# previous GC times. (example: "90")
-# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
-# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
-# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
-# contain the necessary JRE command-line options to specify the required GC, which
-# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
-# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
-# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
-# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
-# accessed directly. (example: "foo.example.com,bar.example.com")
-#
-###
-FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
-
-ENV LANGUAGE='en_US:en'
-
-
-# We make four distinct layers so if there are application changes the library layers can be re-used
-COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/
-COPY --chown=185 target/quarkus-app/*.jar /deployments/
-COPY --chown=185 target/quarkus-app/app/ /deployments/app/
-COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/
-
-EXPOSE 8080
-USER 185
-ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
-ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
-
-ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]
-
diff --git a/dhfs-parent/server-old/src/main/docker/Dockerfile.legacy-jar b/dhfs-parent/server-old/src/main/docker/Dockerfile.legacy-jar
deleted file mode 100644
index f66a1665..00000000
--- a/dhfs-parent/server-old/src/main/docker/Dockerfile.legacy-jar
+++ /dev/null
@@ -1,93 +0,0 @@
-####
-# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
-#
-# Before building the container image run:
-#
-# ./mvnw package -Dquarkus.package.jar.type=legacy-jar
-#
-# Then, build the image with:
-#
-# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/server-legacy-jar .
-#
-# Then run the container using:
-#
-# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
-#
-# If you want to include the debug port into your docker image
-# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
-# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
-# when running the container
-#
-# Then run the container using :
-#
-# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
-#
-# This image uses the `run-java.sh` script to run the application.
-# This scripts computes the command line to execute your Java application, and
-# includes memory/GC tuning.
-# You can configure the behavior using the following environment properties:
-# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
-# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
-# in JAVA_OPTS (example: "-Dsome.property=foo")
-# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
-# used to calculate a default maximal heap memory based on a containers restriction.
-# If used in a container without any memory constraints for the container then this
-# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
-# of the container available memory as set here. The default is `50` which means 50%
-# of the available memory is used as an upper boundary. You can skip this mechanism by
-# setting this value to `0` in which case no `-Xmx` option is added.
-# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
-# is used to calculate a default initial heap memory based on the maximum heap memory.
-# If used in a container without any memory constraints for the container then this
-# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
-# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
-# is used as the initial heap size. You can skip this mechanism by setting this value
-# to `0` in which case no `-Xms` option is added (example: "25")
-# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
-# This is used to calculate the maximum value of the initial heap memory. If used in
-# a container without any memory constraints for the container then this option has
-# no effect. If there is a memory constraint then `-Xms` is limited to the value set
-# here. The default is 4096MB which means the calculated value of `-Xms` never will
-# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
-# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
-# when things are happening. This option, if set to true, will set
-# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
-# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
-# true").
-# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
-# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
-# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
-# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
-# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
-# (example: "20")
-# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
-# (example: "40")
-# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
-# (example: "4")
-# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
-# previous GC times. (example: "90")
-# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
-# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
-# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
-# contain the necessary JRE command-line options to specify the required GC, which
-# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
-# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
-# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
-# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
-# accessed directly. (example: "foo.example.com,bar.example.com")
-#
-###
-FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
-
-ENV LANGUAGE='en_US:en'
-
-
-COPY target/lib/* /deployments/lib/
-COPY target/*-runner.jar /deployments/quarkus-run.jar
-
-EXPOSE 8080
-USER 185
-ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
-ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
-
-ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]
diff --git a/dhfs-parent/server-old/src/main/docker/Dockerfile.native b/dhfs-parent/server-old/src/main/docker/Dockerfile.native
deleted file mode 100644
index 226e7c71..00000000
--- a/dhfs-parent/server-old/src/main/docker/Dockerfile.native
+++ /dev/null
@@ -1,27 +0,0 @@
-####
-# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
-#
-# Before building the container image run:
-#
-# ./mvnw package -Dnative
-#
-# Then, build the image with:
-#
-# docker build -f src/main/docker/Dockerfile.native -t quarkus/server .
-#
-# Then run the container using:
-#
-# docker run -i --rm -p 8080:8080 quarkus/server
-#
-###
-FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9
-WORKDIR /work/
-RUN chown 1001 /work \
- && chmod "g+rwX" /work \
- && chown 1001:root /work
-COPY --chown=1001:root target/*-runner /work/application
-
-EXPOSE 8080
-USER 1001
-
-ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]
diff --git a/dhfs-parent/server-old/src/main/docker/Dockerfile.native-micro b/dhfs-parent/server-old/src/main/docker/Dockerfile.native-micro
deleted file mode 100644
index 4bd4c6de..00000000
--- a/dhfs-parent/server-old/src/main/docker/Dockerfile.native-micro
+++ /dev/null
@@ -1,30 +0,0 @@
-####
-# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
-# It uses a micro base image, tuned for Quarkus native executables.
-# It reduces the size of the resulting container image.
-# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image.
-#
-# Before building the container image run:
-#
-# ./mvnw package -Dnative
-#
-# Then, build the image with:
-#
-# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/server .
-#
-# Then run the container using:
-#
-# docker run -i --rm -p 8080:8080 quarkus/server
-#
-###
-FROM quay.io/quarkus/quarkus-micro-image:2.0
-WORKDIR /work/
-RUN chown 1001 /work \
- && chmod "g+rwX" /work \
- && chown 1001:root /work
-COPY --chown=1001:root target/*-runner /work/application
-
-EXPOSE 8080
-USER 1001
-
-ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]
diff --git a/dhfs-parent/server-old/src/main/java/DeadlockDetector.java b/dhfs-parent/server-old/src/main/java/DeadlockDetector.java
deleted file mode 100644
index 7b275098..00000000
--- a/dhfs-parent/server-old/src/main/java/DeadlockDetector.java
+++ /dev/null
@@ -1,63 +0,0 @@
-import io.quarkus.logging.Log;
-import io.quarkus.runtime.ShutdownEvent;
-import io.quarkus.runtime.StartupEvent;
-import jakarta.annotation.Priority;
-import jakarta.enterprise.context.ApplicationScoped;
-import jakarta.enterprise.event.Observes;
-
-import java.lang.management.ManagementFactory;
-import java.lang.management.ThreadInfo;
-import java.lang.management.ThreadMXBean;
-import java.util.Arrays;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-
-@ApplicationScoped
-public class DeadlockDetector {
- private final ExecutorService _executor = Executors.newSingleThreadExecutor();
-
- void init(@Observes @Priority(1) StartupEvent event) {
- _executor.submit(this::run);
- }
-
- void shutdown(@Observes @Priority(100000) ShutdownEvent event) {
- _executor.shutdownNow();
- }
-
- private void run() {
- ThreadMXBean bean = ManagementFactory.getThreadMXBean();
- try {
- while (!Thread.interrupted()) {
- Thread.sleep(4000);
-
- long[] threadIds = bean.findDeadlockedThreads(); // Returns null if no threads are deadlocked.
-
- if (threadIds != null) {
- ThreadInfo[] infos = bean.getThreadInfo(threadIds, Integer.MAX_VALUE);
-
- StringBuilder sb = new StringBuilder();
-
- sb.append("Deadlock detected!\n");
-
- for (ThreadInfo info : infos) {
- StackTraceElement[] stack = info.getStackTrace();
- sb.append(info.getThreadName()).append("\n");
- sb.append("getLockedMonitors: ").append(Arrays.toString(info.getLockedMonitors())).append("\n");
- sb.append("getLockedSynchronizers: ").append(Arrays.toString(info.getLockedSynchronizers())).append("\n");
- sb.append("waiting on: ").append(info.getLockInfo()).append("\n");
- sb.append("locked by: ").append(info.getLockOwnerName()).append("\n");
- sb.append("Stack trace:\n");
- for (var e : stack) {
- sb.append(e.toString()).append("\n");
- }
- sb.append("===");
- }
-
- Log.error(sb);
- }
- }
- } catch (InterruptedException e) {
- }
- Log.info("Deadlock detector thread exiting");
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/Main.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/Main.java
deleted file mode 100644
index 69e488c0..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/Main.java
+++ /dev/null
@@ -1,21 +0,0 @@
-package com.usatiuk.dhfs;
-
-import io.quarkus.runtime.Quarkus;
-import io.quarkus.runtime.QuarkusApplication;
-import io.quarkus.runtime.annotations.QuarkusMain;
-
-@QuarkusMain
-public class Main {
- public static void main(String... args) {
- Quarkus.run(DhfsStorageServerApp.class, args);
- }
-
- public static class DhfsStorageServerApp implements QuarkusApplication {
-
- @Override
- public int run(String... args) throws Exception {
- Quarkus.waitForExit();
- return 0;
- }
- }
-}
\ No newline at end of file
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/ShutdownChecker.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/ShutdownChecker.java
deleted file mode 100644
index dcd379a8..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/ShutdownChecker.java
+++ /dev/null
@@ -1,42 +0,0 @@
-package com.usatiuk.dhfs;
-
-import io.quarkus.logging.Log;
-import io.quarkus.runtime.ShutdownEvent;
-import io.quarkus.runtime.StartupEvent;
-import jakarta.annotation.Priority;
-import jakarta.enterprise.context.ApplicationScoped;
-import jakarta.enterprise.event.Observes;
-import org.eclipse.microprofile.config.inject.ConfigProperty;
-
-import java.io.IOException;
-import java.nio.file.Paths;
-
-@ApplicationScoped
-public class ShutdownChecker {
- private static final String dataFileName = "running";
- @ConfigProperty(name = "dhfs.objects.root")
- String dataRoot;
- boolean _cleanShutdown = true;
- boolean _initialized = false;
-
- void init(@Observes @Priority(2) StartupEvent event) throws IOException {
- Paths.get(dataRoot).toFile().mkdirs();
- Log.info("Initializing with root " + dataRoot);
- if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) {
- _cleanShutdown = false;
- Log.error("Unclean shutdown detected!");
- } else {
- Paths.get(dataRoot).resolve(dataFileName).toFile().createNewFile();
- }
- _initialized = true;
- }
-
- void shutdown(@Observes @Priority(100000) ShutdownEvent event) throws IOException {
- Paths.get(dataRoot).resolve(dataFileName).toFile().delete();
- }
-
- public boolean lastShutdownClean() {
- if (!_initialized) throw new IllegalStateException("ShutdownChecker not initialized");
- return _cleanShutdown;
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/DirectoryConflictResolver.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/DirectoryConflictResolver.java
deleted file mode 100644
index 0f242e45..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/DirectoryConflictResolver.java
+++ /dev/null
@@ -1,107 +0,0 @@
-package com.usatiuk.dhfs.files.conflicts;
-
-import com.usatiuk.dhfs.files.objects.Directory;
-import com.usatiuk.dhfs.files.objects.FsNode;
-import com.usatiuk.dhfs.objects.jrepository.JObject;
-import com.usatiuk.dhfs.objects.jrepository.JObjectData;
-import com.usatiuk.dhfs.objects.jrepository.JObjectManager;
-import com.usatiuk.dhfs.objects.repository.ConflictResolver;
-import com.usatiuk.dhfs.objects.repository.ObjectHeader;
-import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService;
-import io.grpc.Status;
-import io.grpc.StatusRuntimeException;
-import io.quarkus.logging.Log;
-import jakarta.enterprise.context.ApplicationScoped;
-import jakarta.inject.Inject;
-import org.apache.commons.lang3.NotImplementedException;
-
-import java.util.*;
-
-@ApplicationScoped
-public class DirectoryConflictResolver implements ConflictResolver {
- @Inject
- PersistentPeerDataService persistentPeerDataService;
-
- @Inject
- JObjectManager jObjectManager;
-
- @Override
- public void resolve(UUID conflictHost, ObjectHeader theirsHeader, JObjectData theirsData, JObject> ours) {
- var theirsDir = (Directory) theirsData;
- if (!theirsDir.getClass().equals(Directory.class)) {
- Log.error("Object type mismatch!");
- throw new NotImplementedException();
- }
-
- ours.runWriteLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, oursDirU, bump, invalidate) -> {
- if (oursDirU == null)
- throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict but we don't have local copy"));
- if (!(oursDirU instanceof Directory oursDir))
- throw new NotImplementedException("Type conflict for " + ours.getMeta().getName() + ", directory was expected");
-
- Directory first;
- Directory second;
- UUID otherHostname;
-
- if (oursDir.getMtime() >= theirsDir.getMtime()) {
- first = oursDir;
- second = theirsDir;
- otherHostname = conflictHost;
- } else {
- second = oursDir;
- first = theirsDir;
- otherHostname = persistentPeerDataService.getSelfUuid();
- }
-
- LinkedHashMap mergedChildren = new LinkedHashMap<>(first.getChildren());
- Map newChangelog = new LinkedHashMap<>(m.getChangelog());
-
- for (var entry : second.getChildren().entrySet()) {
- if (mergedChildren.containsKey(entry.getKey()) &&
- !Objects.equals(mergedChildren.get(entry.getKey()), entry.getValue())) {
- int i = 0;
- do {
- String name = entry.getKey() + ".conflict." + i + "." + otherHostname;
- if (mergedChildren.containsKey(name)) {
- i++;
- continue;
- }
- mergedChildren.put(name, entry.getValue());
- break;
- } while (true);
- } else {
- mergedChildren.put(entry.getKey(), entry.getValue());
- }
- }
-
- for (var entry : theirsHeader.getChangelog().getEntriesList()) {
- newChangelog.merge(UUID.fromString(entry.getHost()), entry.getVersion(), Long::max);
- }
-
- boolean wasChanged = oursDir.getChildren().size() != mergedChildren.size()
- || oursDir.getMtime() != first.getMtime()
- || oursDir.getCtime() != first.getCtime();
-
- if (m.getBestVersion() > newChangelog.values().stream().reduce(0L, Long::sum))
- throw new StatusRuntimeException(Status.ABORTED.withDescription("Race when conflict resolving"));
-
- if (wasChanged) {
- newChangelog.merge(persistentPeerDataService.getSelfUuid(), 1L, Long::sum);
-
- for (var child : mergedChildren.values()) {
- if (!(new HashSet<>(oursDir.getChildren().values()).contains(child))) {
- jObjectManager.getOrPut(child.toString(), FsNode.class, Optional.of(oursDir.getName()));
- }
- }
-
- oursDir.setMtime(first.getMtime());
- oursDir.setCtime(first.getCtime());
-
- oursDir.setChildren(mergedChildren);
- }
-
- m.setChangelog(newChangelog);
- return null;
- });
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/FileConflictResolver.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/FileConflictResolver.java
deleted file mode 100644
index 4610c4b5..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/FileConflictResolver.java
+++ /dev/null
@@ -1,182 +0,0 @@
-package com.usatiuk.dhfs.files.conflicts;
-
-import com.usatiuk.dhfs.files.objects.ChunkData;
-import com.usatiuk.dhfs.files.objects.File;
-import com.usatiuk.dhfs.files.service.DhfsFileService;
-import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeManager;
-import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile;
-import com.usatiuk.dhfs.objects.jrepository.JObject;
-import com.usatiuk.dhfs.objects.jrepository.JObjectData;
-import com.usatiuk.dhfs.objects.jrepository.JObjectManager;
-import com.usatiuk.dhfs.objects.repository.ConflictResolver;
-import com.usatiuk.dhfs.objects.repository.ObjectHeader;
-import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService;
-import com.usatiuk.kleppmanntree.AlreadyExistsException;
-import io.grpc.Status;
-import io.grpc.StatusRuntimeException;
-import io.quarkus.logging.Log;
-import jakarta.enterprise.context.ApplicationScoped;
-import jakarta.inject.Inject;
-import org.apache.commons.lang3.NotImplementedException;
-import org.apache.commons.lang3.tuple.Pair;
-import org.eclipse.microprofile.config.inject.ConfigProperty;
-
-import java.util.*;
-
-@ApplicationScoped
-public class FileConflictResolver implements ConflictResolver {
- @Inject
- PersistentPeerDataService persistentPeerDataService;
- @Inject
- DhfsFileService fileService;
- @Inject
- JKleppmannTreeManager jKleppmannTreeManager;
- @Inject
- JObjectManager jObjectManager;
-
- @ConfigProperty(name = "dhfs.files.use_hash_for_chunks")
- boolean useHashForChunks;
-
- // FIXME: There might be a race where node with conflict deletes a file, and we answer that
- // it can do it as we haven't recorded the received file in the object model yet
- @Override
- public void resolve(UUID conflictHost, ObjectHeader theirsHeader, JObjectData theirsData, JObject> ours) {
- var theirsFile = (File) theirsData;
- if (!theirsFile.getClass().equals(File.class)) {
- Log.error("Object type mismatch!");
- throw new NotImplementedException();
- }
-
- var newJFile = ours.runWriteLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, oursFileU, bumpFile, invalidateFile) -> {
- if (oursFileU == null)
- throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict but we don't have local copy"));
- if (!(oursFileU instanceof File oursFile))
- throw new StatusRuntimeException(Status.ABORTED.withDescription("Bad type for file"));
-
- File first;
- File second;
- UUID otherHostname;
-
- if (oursFile.getMtime() >= theirsFile.getMtime()) {
- first = oursFile;
- second = theirsFile;
- otherHostname = conflictHost;
- } else {
- second = oursFile;
- first = theirsFile;
- otherHostname = persistentPeerDataService.getSelfUuid();
- }
-
- Map newChangelog = new LinkedHashMap<>(m.getChangelog());
-
- for (var entry : theirsHeader.getChangelog().getEntriesList()) {
- newChangelog.merge(UUID.fromString(entry.getHost()), entry.getVersion(), Long::max);
- }
-
- boolean chunksDiff = !Objects.equals(first.getChunks(), second.getChunks());
-
- var firstChunksCopy = first.getChunks().entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList();
- var secondChunksCopy = second.getChunks().entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList();
-
- boolean wasChanged = oursFile.getMtime() != first.getMtime()
- || oursFile.getCtime() != first.getCtime()
- || first.isSymlink() != second.isSymlink()
- || chunksDiff;
-
- if (m.getBestVersion() > newChangelog.values().stream().reduce(0L, Long::sum))
- throw new StatusRuntimeException(Status.ABORTED.withDescription("Race when conflict resolving"));
-
- m.setChangelog(newChangelog);
-
- if (wasChanged) {
- m.getChangelog().merge(persistentPeerDataService.getSelfUuid(), 1L, Long::sum);
-
- if (useHashForChunks)
- throw new NotImplementedException();
-
- HashSet oursBackup = new HashSet<>(oursFile.getChunks().values());
- oursFile.getChunks().clear();
-
- for (var e : firstChunksCopy) {
- oursFile.getChunks().put(e.getLeft(), e.getValue());
- jObjectManager.getOrPut(e.getValue(), ChunkData.class, Optional.of(oursFile.getName()));
- }
- HashSet oursNew = new HashSet<>(oursFile.getChunks().values());
-
- oursFile.setMtime(first.getMtime());
- oursFile.setCtime(first.getCtime());
-
- var newFile = new File(UUID.randomUUID(), second.getMode(), second.isSymlink());
-
- newFile.setMtime(second.getMtime());
- newFile.setCtime(second.getCtime());
-
- for (var e : secondChunksCopy) {
- newFile.getChunks().put(e.getLeft(), e.getValue());
- jObjectManager.getOrPut(e.getValue(), ChunkData.class, Optional.of(newFile.getName()));
- }
-
- fileService.updateFileSize((JObject) ours);
-
- var ret = jObjectManager.putLocked(newFile, Optional.empty());
-
- fileService.updateFileSize((JObject) ret);
-
- try {
- for (var cuuid : oursBackup) {
- if (!oursNew.contains(cuuid))
- jObjectManager
- .get(cuuid)
- .ifPresent(jObject -> jObject.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (mc, d, b, v) -> {
- mc.removeRef(oursFile.getName());
- return null;
- }));
- }
- } catch (Exception e) {
- ret.getMeta().unfreeze();
- ret.rwUnlock();
- return null;
- }
- return ret;
- }
-
- return null;
- });
-
- if (newJFile == null) return;
- boolean locked = true;
-
- // FIXME: Slow and what happens if a directory is deleted?
- try {
- var parent = fileService.inoToParent(ours.getMeta().getName());
- // FIXME?
- var tree = jKleppmannTreeManager.getTree("fs");
-
- var nodeId = tree.getNewNodeId();
- newJFile.getMeta().addRef(nodeId);
- newJFile.getMeta().unfreeze();
- newJFile.rwUnlock();
- locked = false;
-
- int i = 0;
-
- do {
- try {
- tree.move(parent.getRight(), new JKleppmannTreeNodeMetaFile(parent.getLeft() + ".fconflict." + persistentPeerDataService.getSelfUuid() + "." + conflictHost + "." + i, newJFile.getMeta().getName()), nodeId);
- } catch (AlreadyExistsException aex) {
- i++;
- continue;
- }
- break;
- } while (true);
- } catch (Exception e) {
- Log.error("Error when creating new file for " + ours.getMeta().getName(), e);
- } finally {
- if (locked) {
- newJFile.getMeta().unfreeze();
- newJFile.getMeta().getReferrersMutable().clear();
- newJFile.rwUnlock();
- }
- }
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/NoOpConflictResolver.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/NoOpConflictResolver.java
deleted file mode 100644
index 22a429b7..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/conflicts/NoOpConflictResolver.java
+++ /dev/null
@@ -1,45 +0,0 @@
-package com.usatiuk.dhfs.files.conflicts;
-
-import com.usatiuk.dhfs.objects.jrepository.JObject;
-import com.usatiuk.dhfs.objects.jrepository.JObjectData;
-import com.usatiuk.dhfs.objects.jrepository.JObjectManager;
-import com.usatiuk.dhfs.objects.repository.ConflictResolver;
-import com.usatiuk.dhfs.objects.repository.ObjectHeader;
-import io.grpc.Status;
-import io.grpc.StatusRuntimeException;
-import jakarta.enterprise.context.ApplicationScoped;
-
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.Objects;
-import java.util.UUID;
-
-@ApplicationScoped
-public class NoOpConflictResolver implements ConflictResolver {
- @Override
- public void resolve(UUID conflictHost, ObjectHeader theirsHeader, JObjectData theirsData, JObject> ours) {
- ours.runWriteLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d, b, i) -> {
- if (d == null)
- throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict but we don't have local copy"));
-
- if (!Objects.equals(theirsData.getClass(), ours.getData().getClass()))
- throw new StatusRuntimeException(Status.ABORTED.withDescription("Type conflict for object " + m.getName()
- + " ours: " + ours.getData().getClass() + " theirs: " + theirsData.getClass()));
-
- if (!Objects.equals(theirsData, ours.getData()))
- throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict for immutable object " + m.getName()));
-
- Map newChangelog = new LinkedHashMap<>(m.getChangelog());
-
- for (var entry : theirsHeader.getChangelog().getEntriesList())
- newChangelog.merge(UUID.fromString(entry.getHost()), entry.getVersion(), Long::max);
-
- if (m.getBestVersion() > newChangelog.values().stream().reduce(0L, Long::sum))
- throw new StatusRuntimeException(Status.ABORTED.withDescription("Race when conflict resolving"));
-
- m.setChangelog(newChangelog);
-
- return null;
- });
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java
deleted file mode 100644
index 46f8e283..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkData.java
+++ /dev/null
@@ -1,90 +0,0 @@
-package com.usatiuk.dhfs.files.objects;
-
-import com.google.protobuf.ByteString;
-import com.usatiuk.dhfs.files.conflicts.NoOpConflictResolver;
-import com.usatiuk.dhfs.objects.jrepository.AssumedUnique;
-import com.usatiuk.dhfs.objects.jrepository.JObjectData;
-import com.usatiuk.dhfs.objects.jrepository.Leaf;
-import com.usatiuk.dhfs.objects.persistence.ChunkDataP;
-import com.usatiuk.dhfs.objects.repository.ConflictResolver;
-import net.openhft.hashing.LongTupleHashFunction;
-
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.Objects;
-import java.util.stream.Collectors;
-
-@AssumedUnique
-@Leaf
-public class ChunkData extends JObjectData {
- final ChunkDataP _data;
-
- public ChunkData(ByteString bytes) {
- super();
- _data = ChunkDataP.newBuilder()
- .setData(bytes)
- // TODO: There might be (most definitely) a copy there
- .setName(Arrays.stream(LongTupleHashFunction.xx128().hashBytes(bytes.asReadOnlyByteBuffer()))
- .mapToObj(Long::toHexString).collect(Collectors.joining()))
- .build();
- }
-
- public ChunkData(ByteString bytes, String name) {
- super();
- _data = ChunkDataP.newBuilder()
- .setData(bytes)
- .setName(name)
- .build();
- }
-
- public ChunkData(ChunkDataP chunkDataP) {
- super();
- _data = chunkDataP;
- }
-
- ChunkDataP getData() {
- return _data;
- }
-
- public ByteString getBytes() {
- return _data.getData();
- }
-
- public int getSize() {
- return _data.getData().size();
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null || getClass() != o.getClass()) return false;
- ChunkData chunkData = (ChunkData) o;
- return Objects.equals(getName(), chunkData.getName());
- }
-
- @Override
- public int hashCode() {
- return Objects.hashCode(getName());
- }
-
- @Override
- public String getName() {
- return _data.getName();
- }
-
- @Override
- public Class extends ConflictResolver> getConflictResolver() {
- return NoOpConflictResolver.class;
- }
-
- @Override
- public Collection extractRefs() {
- return List.of();
- }
-
- @Override
- public int estimateSize() {
- return _data.getData().size();
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java
deleted file mode 100644
index 64532d56..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/ChunkDataSerializer.java
+++ /dev/null
@@ -1,18 +0,0 @@
-package com.usatiuk.dhfs.files.objects;
-
-import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
-import com.usatiuk.dhfs.objects.persistence.ChunkDataP;
-import jakarta.inject.Singleton;
-
-@Singleton
-public class ChunkDataSerializer implements ProtoSerializer {
- @Override
- public ChunkData deserialize(ChunkDataP message) {
- return new ChunkData(message);
- }
-
- @Override
- public ChunkDataP serialize(ChunkData object) {
- return object.getData();
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/Directory.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/Directory.java
deleted file mode 100644
index e4aa578f..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/Directory.java
+++ /dev/null
@@ -1,65 +0,0 @@
-package com.usatiuk.dhfs.files.objects;
-
-import com.usatiuk.dhfs.files.conflicts.DirectoryConflictResolver;
-import com.usatiuk.dhfs.objects.jrepository.JObjectData;
-import com.usatiuk.dhfs.objects.repository.ConflictResolver;
-import lombok.Getter;
-import lombok.Setter;
-
-import java.io.Serial;
-import java.util.*;
-
-public class Directory extends FsNode {
- @Serial
- private static final long serialVersionUID = 1;
- @Getter
- @Setter
- private Map _children = new HashMap<>();
-
- public Directory(UUID uuid) {
- super(uuid);
- }
-
- public Directory(UUID uuid, long mode) {
- super(uuid, mode);
- }
-
- @Override
- public Class extends ConflictResolver> getConflictResolver() {
- return DirectoryConflictResolver.class;
- }
-
- public Optional getKid(String name) {
- return Optional.ofNullable(_children.get(name));
- }
-
- public boolean removeKid(String name) {
- return _children.remove(name) != null;
- }
-
- public boolean putKid(String name, UUID uuid) {
- if (_children.containsKey(name)) return false;
-
- _children.put(name, uuid);
- return true;
- }
-
- @Override
- public Class extends JObjectData> getRefType() {
- return FsNode.class;
- }
-
- @Override
- public Collection extractRefs() {
- return _children.values().stream().map(UUID::toString).toList();
- }
-
- public List getChildrenList() {
- return _children.keySet().stream().toList();
- }
-
- @Override
- public int estimateSize() {
- return _children.size() * 192;
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/DirectorySerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/DirectorySerializer.java
deleted file mode 100644
index bec56829..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/DirectorySerializer.java
+++ /dev/null
@@ -1,36 +0,0 @@
-package com.usatiuk.dhfs.files.objects;
-
-import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
-import com.usatiuk.dhfs.objects.persistence.DirectoryP;
-import com.usatiuk.dhfs.objects.persistence.FsNodeP;
-import jakarta.inject.Singleton;
-
-import java.util.Map;
-import java.util.UUID;
-import java.util.stream.Collectors;
-
-@Singleton
-public class DirectorySerializer implements ProtoSerializer {
- @Override
- public Directory deserialize(DirectoryP message) {
- var ret = new Directory(UUID.fromString(message.getFsNode().getUuid()));
- ret.setMtime(message.getFsNode().getMtime());
- ret.setCtime(message.getFsNode().getCtime());
- ret.setMode(message.getFsNode().getMode());
- ret.getChildren().putAll(message.getChildrenMap().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> UUID.fromString(e.getValue()))));
- return ret;
- }
-
- @Override
- public DirectoryP serialize(Directory object) {
- return DirectoryP.newBuilder()
- .setFsNode(FsNodeP.newBuilder()
- .setCtime(object.getCtime())
- .setMtime(object.getMtime())
- .setMode(object.getMode())
- .setUuid(object.getUuid().toString())
- .build())
- .putAllChildren(object.getChildren().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().toString())))
- .build();
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/File.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/File.java
deleted file mode 100644
index 0c6fa4e8..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/File.java
+++ /dev/null
@@ -1,51 +0,0 @@
-package com.usatiuk.dhfs.files.objects;
-
-import com.usatiuk.dhfs.files.conflicts.FileConflictResolver;
-import com.usatiuk.dhfs.objects.jrepository.JObjectData;
-import com.usatiuk.dhfs.objects.repository.ConflictResolver;
-import lombok.Getter;
-import lombok.Setter;
-
-import java.util.*;
-
-public class File extends FsNode {
- @Getter
- private final NavigableMap _chunks;
- @Getter
- private final boolean _symlink;
- @Getter
- @Setter
- private long _size = 0;
-
- public File(UUID uuid, long mode, boolean symlink) {
- super(uuid, mode);
- _symlink = symlink;
- _chunks = new TreeMap<>();
- }
-
- public File(UUID uuid, long mode, boolean symlink, NavigableMap chunks) {
- super(uuid, mode);
- _symlink = symlink;
- _chunks = chunks;
- }
-
- @Override
- public Class extends ConflictResolver> getConflictResolver() {
- return FileConflictResolver.class;
- }
-
- @Override
- public Class extends JObjectData> getRefType() {
- return ChunkData.class;
- }
-
- @Override
- public Collection extractRefs() {
- return Collections.unmodifiableCollection(_chunks.values());
- }
-
- @Override
- public int estimateSize() {
- return _chunks.size() * 192;
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java
deleted file mode 100644
index 510cefd3..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FileSerializer.java
+++ /dev/null
@@ -1,46 +0,0 @@
-package com.usatiuk.dhfs.files.objects;
-
-import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
-import com.usatiuk.dhfs.objects.persistence.FileP;
-import com.usatiuk.dhfs.objects.persistence.FsNodeP;
-import jakarta.inject.Singleton;
-
-import java.util.TreeMap;
-import java.util.UUID;
-
-@Singleton
-public class FileSerializer implements ProtoSerializer {
- @Override
- public File deserialize(FileP message) {
- TreeMap chunks = new TreeMap<>();
- message.getChunksList().forEach(chunk -> {
- chunks.put(chunk.getStart(), chunk.getId());
- });
- var ret = new File(UUID.fromString(message.getFsNode().getUuid()),
- message.getFsNode().getMode(),
- message.getSymlink(),
- chunks
- );
- ret.setMtime(message.getFsNode().getMtime());
- ret.setCtime(message.getFsNode().getCtime());
- ret.setSize(message.getSize());
- return ret;
- }
-
- @Override
- public FileP serialize(File object) {
- var builder = FileP.newBuilder()
- .setFsNode(FsNodeP.newBuilder()
- .setCtime(object.getCtime())
- .setMtime(object.getMtime())
- .setMode(object.getMode())
- .setUuid(object.getUuid().toString())
- .build())
- .setSymlink(object.isSymlink())
- .setSize(object.getSize());
- object.getChunks().forEach((s, i) -> {
- builder.addChunksBuilder().setStart(s).setId(i);
- });
- return builder.build();
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java
deleted file mode 100644
index a6e6ac14..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/objects/FsNode.java
+++ /dev/null
@@ -1,43 +0,0 @@
-package com.usatiuk.dhfs.files.objects;
-
-import com.usatiuk.dhfs.objects.jrepository.JObjectData;
-import lombok.Getter;
-import lombok.Setter;
-
-import java.io.Serial;
-import java.util.UUID;
-
-public abstract class FsNode extends JObjectData {
- @Serial
- private static final long serialVersionUID = 1;
-
- @Getter
- final UUID _uuid;
- @Getter
- @Setter
- private long _mode;
- @Getter
- @Setter
- private long _ctime;
- @Getter
- @Setter
- private long _mtime;
-
- protected FsNode(UUID uuid) {
- this._uuid = uuid;
- this._ctime = System.currentTimeMillis();
- this._mtime = this._ctime;
- }
-
- protected FsNode(UUID uuid, long mode) {
- this._uuid = uuid;
- this._mode = mode;
- this._ctime = System.currentTimeMillis();
- this._mtime = this._ctime;
- }
-
- @Override
- public String getName() {
- return _uuid.toString();
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java
deleted file mode 100644
index 58678dd2..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileService.java
+++ /dev/null
@@ -1,51 +0,0 @@
-package com.usatiuk.dhfs.files.service;
-
-import com.google.protobuf.ByteString;
-import com.google.protobuf.UnsafeByteOperations;
-import com.usatiuk.dhfs.files.objects.File;
-import com.usatiuk.dhfs.objects.jrepository.JObject;
-import org.apache.commons.lang3.tuple.Pair;
-
-import java.util.Optional;
-
-public interface DhfsFileService {
- Optional open(String name);
-
- Optional create(String name, long mode);
-
- Pair inoToParent(String ino);
-
- void mkdir(String name, long mode);
-
- Optional getattr(String name);
-
- Boolean chmod(String name, long mode);
-
- void unlink(String name);
-
- Boolean rename(String from, String to);
-
- Boolean setTimes(String fileUuid, long atimeMs, long mtimeMs);
-
- Iterable readDir(String name);
-
- void updateFileSize(JObject file);
-
- Long size(String f);
-
- Optional read(String fileUuid, long offset, int length);
-
- Long write(String fileUuid, long offset, ByteString data);
-
- default Long write(String fileUuid, long offset, byte[] data) {
- return write(fileUuid, offset, UnsafeByteOperations.unsafeWrap(data));
- }
-
- Boolean truncate(String fileUuid, long length);
-
- String readlink(String uuid);
-
- ByteString readlinkBS(String uuid);
-
- String symlink(String oldpath, String newpath);
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java
deleted file mode 100644
index 33b30d85..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DhfsFileServiceImpl.java
+++ /dev/null
@@ -1,814 +0,0 @@
-package com.usatiuk.dhfs.files.service;
-
-import com.google.protobuf.ByteString;
-import com.google.protobuf.UnsafeByteOperations;
-import com.usatiuk.dhfs.files.objects.ChunkData;
-import com.usatiuk.dhfs.files.objects.File;
-import com.usatiuk.dhfs.files.objects.FsNode;
-import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeManager;
-import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode;
-import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
-import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaDirectory;
-import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile;
-import com.usatiuk.dhfs.objects.jrepository.JMutator;
-import com.usatiuk.dhfs.objects.jrepository.JObject;
-import com.usatiuk.dhfs.objects.jrepository.JObjectManager;
-import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager;
-import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService;
-import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace;
-import io.grpc.Status;
-import io.grpc.StatusRuntimeException;
-import io.quarkus.logging.Log;
-import io.quarkus.runtime.StartupEvent;
-import jakarta.annotation.Priority;
-import jakarta.enterprise.context.ApplicationScoped;
-import jakarta.enterprise.event.Observes;
-import jakarta.inject.Inject;
-import org.apache.commons.lang3.tuple.Pair;
-import org.eclipse.microprofile.config.inject.ConfigProperty;
-
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Path;
-import java.util.*;
-import java.util.stream.StreamSupport;
-
-@ApplicationScoped
-public class DhfsFileServiceImpl implements DhfsFileService {
- @Inject
- JObjectManager jObjectManager;
- @Inject
- JObjectTxManager jObjectTxManager;
-
- @ConfigProperty(name = "dhfs.files.target_chunk_size")
- int targetChunkSize;
-
- @ConfigProperty(name = "dhfs.files.write_merge_threshold")
- float writeMergeThreshold;
-
- @ConfigProperty(name = "dhfs.files.write_merge_max_chunk_to_take")
- float writeMergeMaxChunkToTake;
-
- @ConfigProperty(name = "dhfs.files.write_merge_limit")
- float writeMergeLimit;
-
- @ConfigProperty(name = "dhfs.files.write_last_chunk_limit")
- float writeLastChunkLimit;
-
- @ConfigProperty(name = "dhfs.files.use_hash_for_chunks")
- boolean useHashForChunks;
-
- @ConfigProperty(name = "dhfs.files.allow_recursive_delete")
- boolean allowRecursiveDelete;
-
- @ConfigProperty(name = "dhfs.objects.ref_verification")
- boolean refVerification;
-
- @ConfigProperty(name = "dhfs.objects.write_log")
- boolean writeLogging;
-
- @Inject
- PersistentPeerDataService persistentPeerDataService;
- @Inject
- JKleppmannTreeManager jKleppmannTreeManager;
-
- private JKleppmannTreeManager.JKleppmannTree _tree;
-
- private ChunkData createChunk(ByteString bytes) {
- if (useHashForChunks) {
- return new ChunkData(bytes);
- } else {
- return new ChunkData(bytes, persistentPeerDataService.getUniqueId());
- }
- }
-
- void init(@Observes @Priority(500) StartupEvent event) {
- Log.info("Initializing file service");
- _tree = jKleppmannTreeManager.getTree("fs");
- }
-
- private JObject getDirEntry(String name) {
- var res = _tree.traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
- if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
- var ret = jObjectManager.get(res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
- if (!ret.getMeta().getKnownClass().equals(JKleppmannTreeNode.class))
- throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not jObject: " + name));
- return (JObject) ret;
- }
-
- private Optional> getDirEntryOpt(String name) {
- var res = _tree.traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
- if (res == null) return Optional.empty();
- var ret = jObjectManager.get(res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
- if (!ret.getMeta().getKnownClass().equals(JKleppmannTreeNode.class))
- throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not jObject: " + name));
- return Optional.of((JObject) ret);
- }
-
- @Override
- public Optional getattr(String uuid) {
- return jObjectTxManager.executeTx(() -> {
- var ref = jObjectManager.get(uuid);
- if (ref.isEmpty()) return Optional.empty();
- return ref.get().runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> {
- GetattrRes ret;
- if (d instanceof File f) {
- ret = new GetattrRes(f.getMtime(), f.getCtime(), f.getMode(), f.isSymlink() ? GetattrType.SYMLINK : GetattrType.FILE);
- } else if (d instanceof JKleppmannTreeNode) {
- ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY);
- } else {
- throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + m.getName()));
- }
- return Optional.of(ret);
- });
- });
- }
-
- @Override
- public Optional open(String name) {
- return jObjectTxManager.executeTx(() -> {
- try {
- var ret = getDirEntry(name);
- return Optional.of(ret.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
- if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f) return f.getFileIno();
- else if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f) return m.getName();
- throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + m.getName()));
- }));
- } catch (StatusRuntimeException e) {
- if (e.getStatus().getCode() == Status.Code.NOT_FOUND) {
- return Optional.empty();
- }
- throw e;
- }
- });
- }
-
- private void ensureDir(JObject entry) {
- entry.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> {
- if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f)
- throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription(m.getName() + " is a file, not directory"));
- else if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f) return null;
- throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + m.getName()));
- });
- }
-
- @Override
- public Optional create(String name, long mode) {
- return jObjectTxManager.executeTx(() -> {
- Path path = Path.of(name);
- var parent = getDirEntry(path.getParent().toString());
-
- ensureDir(parent);
-
- String fname = path.getFileName().toString();
-
- var fuuid = UUID.randomUUID();
- Log.debug("Creating file " + fuuid);
- File f = new File(fuuid, mode, false);
-
- var newNodeId = _tree.getNewNodeId();
- var fobj = jObjectManager.putLocked(f, Optional.of(newNodeId));
- try {
- _tree.move(parent.getMeta().getName(), new JKleppmannTreeNodeMetaFile(fname, f.getName()), newNodeId);
- } catch (Exception e) {
- fobj.getMeta().removeRef(newNodeId);
- throw e;
- } finally {
- fobj.rwUnlock();
- }
- return Optional.of(f.getName());
- });
- }
-
- //FIXME: Slow..
- @Override
- public Pair inoToParent(String ino) {
- return jObjectTxManager.executeTx(() -> {
- return _tree.findParent(w -> {
- if (w.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f)
- if (f.getFileIno().equals(ino))
- return true;
- return false;
- });
- });
- }
-
- @Override
- public void mkdir(String name, long mode) {
- jObjectTxManager.executeTx(() -> {
- Path path = Path.of(name);
- var parent = getDirEntry(path.getParent().toString());
- ensureDir(parent);
-
- String dname = path.getFileName().toString();
-
- Log.debug("Creating directory " + name);
-
- _tree.move(parent.getMeta().getName(), new JKleppmannTreeNodeMetaDirectory(dname), _tree.getNewNodeId());
- });
- }
-
- @Override
- public void unlink(String name) {
- jObjectTxManager.executeTx(() -> {
- var node = getDirEntryOpt(name).orElse(null);
- JKleppmannTreeNodeMeta meta = node.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> {
- if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f)
- if (!d.getNode().getChildren().isEmpty()) throw new DirectoryNotEmptyException();
- return d.getNode().getMeta();
- });
-
- _tree.trash(meta, node.getMeta().getName());
- });
- }
-
- @Override
- public Boolean rename(String from, String to) {
- return jObjectTxManager.executeTx(() -> {
- var node = getDirEntry(from);
- JKleppmannTreeNodeMeta meta = node.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> d.getNode().getMeta());
-
- var toPath = Path.of(to);
- var toDentry = getDirEntry(toPath.getParent().toString());
- ensureDir(toDentry);
-
- _tree.move(toDentry.getMeta().getName(), meta.withName(toPath.getFileName().toString()), node.getMeta().getName());
-
- return true;
- });
- }
-
- @Override
- public Boolean chmod(String uuid, long mode) {
- return jObjectTxManager.executeTx(() -> {
- var dent = jObjectManager.get(uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
-
- dent.runWriteLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d, bump, i) -> {
- if (d instanceof JKleppmannTreeNode) {
- return null;//FIXME:?
- } else if (d instanceof File f) {
- bump.apply();
- f.setMtime(System.currentTimeMillis());
- f.setMode(mode);
- } else {
- throw new IllegalArgumentException(uuid + " is not a file");
- }
- return null;
- });
-
- return true;
- });
- }
-
- @Override
- public Iterable readDir(String name) {
- return jObjectTxManager.executeTx(() -> {
- var found = getDirEntry(name);
-
- return found.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> {
- if (!(d instanceof JKleppmannTreeNode) || !(d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory)) {
- throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
- }
- return new ArrayList<>(d.getNode().getChildren().keySet());
- });
- });
- }
-
- @Override
- public Optional read(String fileUuid, long offset, int length) {
- return jObjectTxManager.executeTx(() -> {
- if (length < 0)
- throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
- if (offset < 0)
- throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
-
- var fileOpt = jObjectManager.get(fileUuid);
- if (fileOpt.isEmpty()) {
- Log.error("File not found when trying to read: " + fileUuid);
- return Optional.empty();
- }
- var file = fileOpt.get();
-
- try {
- return file.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (md, fileData) -> {
- if (!(fileData instanceof File)) {
- throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
- }
- var chunksAll = ((File) fileData).getChunks();
- if (chunksAll.isEmpty()) {
- return Optional.of(ByteString.empty());
- }
- var chunksList = chunksAll.tailMap(chunksAll.floorKey(offset)).entrySet();
-
- if (chunksList.isEmpty()) {
- return Optional.of(ByteString.empty());
- }
-
- var chunks = chunksList.iterator();
- ByteString buf = ByteString.empty();
-
- long curPos = offset;
- var chunk = chunks.next();
-
- while (curPos < offset + length) {
- var chunkPos = chunk.getKey();
-
- long offInChunk = curPos - chunkPos;
-
- long toReadInChunk = (offset + length) - curPos;
-
- var chunkBytes = readChunk(chunk.getValue());
-
- long readableLen = chunkBytes.size() - offInChunk;
-
- var toReadReally = Math.min(readableLen, toReadInChunk);
-
- if (toReadReally < 0) break;
-
- buf = buf.concat(chunkBytes.substring((int) offInChunk, (int) (offInChunk + toReadReally)));
-
- curPos += toReadReally;
-
- if (readableLen > toReadInChunk)
- break;
-
- if (!chunks.hasNext()) break;
-
- chunk = chunks.next();
- }
-
- // FIXME:
- return Optional.of(buf);
- });
- } catch (Exception e) {
- Log.error("Error reading file: " + fileUuid, e);
- return Optional.empty();
- }
- });
- }
-
- private ByteString readChunk(String uuid) {
- var chunkRead = jObjectManager.get(uuid).orElse(null);
-
- if (chunkRead == null) {
- Log.error("Chunk requested not found: " + uuid);
- throw new StatusRuntimeException(Status.NOT_FOUND);
- }
-
- return chunkRead.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> {
- if (!(d instanceof ChunkData cd))
- throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
- return cd.getBytes();
- });
- }
-
- private int getChunkSize(String uuid) {
- return readChunk(uuid).size();
- }
-
- private void cleanupChunks(File f, Collection uuids) {
- // FIXME:
- var inFile = useHashForChunks ? new HashSet<>(f.getChunks().values()) : Collections.emptySet();
- for (var cuuid : uuids) {
- try {
- if (inFile.contains(cuuid)) continue;
- jObjectManager.get(cuuid)
- .ifPresent(jObject -> jObject.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION,
- (m, d, b, v) -> {
- m.removeRef(f.getName());
- return null;
- }));
- } catch (Exception e) {
- Log.error("Error when cleaning chunk " + cuuid, e);
- }
- }
- }
-
- @Override
- public Long write(String fileUuid, long offset, ByteString data) {
- return jObjectTxManager.executeTx(() -> {
- if (offset < 0)
- throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
-
- // FIXME:
- var file = (JObject) jObjectManager.get(fileUuid).orElse(null);
- if (file == null) {
- Log.error("File not found when trying to read: " + fileUuid);
- return -1L;
- }
-
- file.rwLockNoCopy();
- try {
- file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE);
- // FIXME:
- if (!(file.getData() instanceof File))
- throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
-
- if (writeLogging) {
- Log.info("Writing to file: " + file.getMeta().getName() + " size=" + size(fileUuid) + " "
- + offset + " " + data.size());
- }
-
- if (size(fileUuid) < offset)
- truncate(fileUuid, offset);
-
- // FIXME: Some kind of immutable interface?
- var chunksAll = Collections.unmodifiableNavigableMap(file.getData().getChunks());
- var first = chunksAll.floorEntry(offset);
- var last = chunksAll.lowerEntry(offset + data.size());
- NavigableMap removedChunks = new TreeMap<>();
-
- long start = 0;
-
- NavigableMap beforeFirst = first != null ? chunksAll.headMap(first.getKey(), false) : Collections.emptyNavigableMap();
- NavigableMap afterLast = last != null ? chunksAll.tailMap(last.getKey(), false) : Collections.emptyNavigableMap();
-
- if (first != null && (getChunkSize(first.getValue()) + first.getKey() <= offset)) {
- beforeFirst = chunksAll;
- afterLast = Collections.emptyNavigableMap();
- first = null;
- last = null;
- start = offset;
- } else if (!chunksAll.isEmpty()) {
- var between = chunksAll.subMap(first.getKey(), true, last.getKey(), true);
- removedChunks.putAll(between);
- start = first.getKey();
- }
-
- ByteString pendingWrites = ByteString.empty();
-
- if (first != null && first.getKey() < offset) {
- var chunkBytes = readChunk(first.getValue());
- pendingWrites = pendingWrites.concat(chunkBytes.substring(0, (int) (offset - first.getKey())));
- }
- pendingWrites = pendingWrites.concat(data);
-
- if (last != null) {
- var lchunkBytes = readChunk(last.getValue());
- if (last.getKey() + lchunkBytes.size() > offset + data.size()) {
- var startInFile = offset + data.size();
- var startInChunk = startInFile - last.getKey();
- pendingWrites = pendingWrites.concat(lchunkBytes.substring((int) startInChunk, lchunkBytes.size()));
- }
- }
-
- int combinedSize = pendingWrites.size();
-
- if (targetChunkSize > 0) {
- if (combinedSize < (targetChunkSize * writeMergeThreshold)) {
- boolean leftDone = false;
- boolean rightDone = false;
- while (!leftDone && !rightDone) {
- if (beforeFirst.isEmpty()) leftDone = true;
- if (!beforeFirst.isEmpty() || !leftDone) {
- var takeLeft = beforeFirst.lastEntry();
-
- var cuuid = takeLeft.getValue();
-
- if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) {
- leftDone = true;
- continue;
- }
-
- if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) {
- leftDone = true;
- continue;
- }
-
- // FIXME: (and test this)
- beforeFirst = beforeFirst.headMap(takeLeft.getKey(), false);
- start = takeLeft.getKey();
- pendingWrites = readChunk(cuuid).concat(pendingWrites);
- combinedSize += getChunkSize(cuuid);
- removedChunks.put(takeLeft.getKey(), takeLeft.getValue());
- }
- if (afterLast.isEmpty()) rightDone = true;
- if (!afterLast.isEmpty() && !rightDone) {
- var takeRight = afterLast.firstEntry();
-
- var cuuid = takeRight.getValue();
-
- if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) {
- rightDone = true;
- continue;
- }
-
- if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) {
- rightDone = true;
- continue;
- }
-
- // FIXME: (and test this)
- afterLast = afterLast.tailMap(takeRight.getKey(), false);
- pendingWrites = pendingWrites.concat(readChunk(cuuid));
- combinedSize += getChunkSize(cuuid);
- removedChunks.put(takeRight.getKey(), takeRight.getValue());
- }
- }
- }
- }
-
- NavigableMap newChunks = new TreeMap<>();
-
- {
- int cur = 0;
- while (cur < combinedSize) {
- int end;
-
- if (targetChunkSize <= 0)
- end = combinedSize;
- else {
- if ((combinedSize - cur) > (targetChunkSize * writeLastChunkLimit)) {
- end = Math.min(cur + targetChunkSize, combinedSize);
- } else {
- end = combinedSize;
- }
- }
-
- var thisChunk = pendingWrites.substring(cur, end);
-
- ChunkData newChunkData = createChunk(thisChunk);
- //FIXME:
- jObjectManager.put(newChunkData, Optional.of(file.getMeta().getName()));
- newChunks.put(start, newChunkData.getName());
-
- start += thisChunk.size();
- cur = end;
- }
- }
-
- file.mutate(new FileChunkMutator(file.getData().getMtime(), System.currentTimeMillis(), removedChunks, newChunks));
-
- cleanupChunks(file.getData(), removedChunks.values());
- updateFileSize((JObject) file);
- } finally {
- file.rwUnlock();
- }
-
- return (long) data.size();
- });
- }
-
- @Override
- public Boolean truncate(String fileUuid, long length) {
- return jObjectTxManager.executeTx(() -> {
- if (length < 0)
- throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
-
- var file = (JObject) jObjectManager.get(fileUuid).orElse(null);
- if (file == null) {
- Log.error("File not found when trying to read: " + fileUuid);
- return false;
- }
-
- if (length == 0) {
- file.rwLockNoCopy();
- try {
- file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE);
-
- var oldChunks = Collections.unmodifiableNavigableMap(new TreeMap<>(file.getData().getChunks()));
-
- file.mutate(new JMutator<>() {
- long oldMtime;
-
- @Override
- public boolean mutate(File object) {
- oldMtime = object.getMtime();
- object.getChunks().clear();
- return true;
- }
-
- @Override
- public void revert(File object) {
- object.setMtime(oldMtime);
- object.getChunks().putAll(oldChunks);
- }
- });
- cleanupChunks(file.getData(), oldChunks.values());
- updateFileSize((JObject) file);
- } catch (Exception e) {
- Log.error("Error writing file chunks: " + fileUuid, e);
- return false;
- } finally {
- file.rwUnlock();
- }
- return true;
- }
-
- file.rwLockNoCopy();
- try {
- file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE);
-
- var curSize = size(fileUuid);
- if (curSize == length) return true;
-
- var chunksAll = Collections.unmodifiableNavigableMap(file.getData().getChunks());
- NavigableMap removedChunks = new TreeMap<>();
- NavigableMap newChunks = new TreeMap<>();
-
- if (curSize < length) {
- long combinedSize = (length - curSize);
-
- long start = curSize;
-
- // Hack
- HashMap zeroCache = new HashMap<>();
-
- {
- long cur = 0;
- while (cur < combinedSize) {
- long end;
-
- if (targetChunkSize <= 0)
- end = combinedSize;
- else {
- if ((combinedSize - cur) > (targetChunkSize * 1.5)) {
- end = cur + targetChunkSize;
- } else {
- end = combinedSize;
- }
- }
-
- if (!zeroCache.containsKey(end - cur))
- zeroCache.put(end - cur, UnsafeByteOperations.unsafeWrap(new byte[Math.toIntExact(end - cur)]));
-
- ChunkData newChunkData = createChunk(zeroCache.get(end - cur));
- //FIXME:
- jObjectManager.put(newChunkData, Optional.of(file.getMeta().getName()));
- newChunks.put(start, newChunkData.getName());
-
- start += newChunkData.getSize();
- cur = end;
- }
- }
- } else {
- var tail = chunksAll.lowerEntry(length);
- var afterTail = chunksAll.tailMap(tail.getKey(), false);
-
- removedChunks.put(tail.getKey(), tail.getValue());
- removedChunks.putAll(afterTail);
-
- var tailBytes = readChunk(tail.getValue());
- var newChunk = tailBytes.substring(0, (int) (length - tail.getKey()));
-
- ChunkData newChunkData = createChunk(newChunk);
- //FIXME:
- jObjectManager.put(newChunkData, Optional.of(file.getMeta().getName()));
- newChunks.put(tail.getKey(), newChunkData.getName());
- }
-
- file.mutate(new FileChunkMutator(file.getData().getMtime(), System.currentTimeMillis(), removedChunks, newChunks));
-
- cleanupChunks(file.getData(), removedChunks.values());
- updateFileSize((JObject) file);
- return true;
- } catch (Exception e) {
- Log.error("Error reading file: " + fileUuid, e);
- return false;
- } finally {
- file.rwUnlock();
- }
- });
- }
-
- @Override
- public String readlink(String uuid) {
- return jObjectTxManager.executeTx(() -> {
- return readlinkBS(uuid).toStringUtf8();
- });
- }
-
- @Override
- public ByteString readlinkBS(String uuid) {
- return jObjectTxManager.executeTx(() -> {
- var fileOpt = jObjectManager.get(uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid)));
-
- return fileOpt.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (md, fileData) -> {
- if (!(fileData instanceof File)) {
- throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
- }
-
- if (!((File) fileData).isSymlink())
- throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Not a symlink: " + uuid));
-
- return read(uuid, 0, Math.toIntExact(size(uuid))).get();
- });
- });
- }
-
- @Override
- public String symlink(String oldpath, String newpath) {
- return jObjectTxManager.executeTx(() -> {
- Path path = Path.of(newpath);
- var parent = getDirEntry(path.getParent().toString());
-
- ensureDir(parent);
-
- String fname = path.getFileName().toString();
-
- var fuuid = UUID.randomUUID();
- Log.debug("Creating file " + fuuid);
-
- File f = new File(fuuid, 0, true);
- var newNodeId = _tree.getNewNodeId();
- ChunkData newChunkData = createChunk(UnsafeByteOperations.unsafeWrap(oldpath.getBytes(StandardCharsets.UTF_8)));
-
- f.getChunks().put(0L, newChunkData.getName());
-
- jObjectManager.put(newChunkData, Optional.of(f.getName()));
- var newFile = jObjectManager.putLocked(f, Optional.of(newNodeId));
- try {
- updateFileSize(newFile);
- } finally {
- newFile.rwUnlock();
- }
-
- _tree.move(parent.getMeta().getName(), new JKleppmannTreeNodeMetaFile(fname, f.getName()), newNodeId);
- return f.getName();
- });
- }
-
- @Override
- public Boolean setTimes(String fileUuid, long atimeMs, long mtimeMs) {
- return jObjectTxManager.executeTx(() -> {
- var file = jObjectManager.get(fileUuid).orElseThrow(
- () -> new StatusRuntimeException(Status.NOT_FOUND.withDescription(
- "File not found for setTimes: " + fileUuid))
- );
-
- file.runWriteLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, fileData, bump, i) -> {
- if (fileData instanceof JKleppmannTreeNode) return null; // FIXME:
- if (!(fileData instanceof FsNode fd))
- throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
-
- bump.apply();
- fd.setMtime(mtimeMs);
- return null;
- });
-
- return true;
- });
- }
-
- @Override
- public void updateFileSize(JObject file) {
- jObjectTxManager.executeTx(() -> {
- file.rwLockNoCopy();
- try {
- file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE);
- if (!(file.getData() instanceof File fd))
- throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
-
- long realSize = 0;
-
- var last = fd.getChunks().lastEntry();
- if (last != null) {
- var lastSize = getChunkSize(last.getValue());
- realSize = last.getKey() + lastSize;
- }
-
- if (realSize != fd.getSize()) {
- long finalRealSize = realSize;
- file.mutate(new JMutator() {
- long oldSize;
-
- @Override
- public boolean mutate(File object) {
- oldSize = object.getSize();
- object.setSize(finalRealSize);
- return true;
- }
-
- @Override
- public void revert(File object) {
- object.setSize(oldSize);
- }
- });
- }
- } catch (Exception e) {
- Log.error("Error updating file size: " + file.getMeta().getName(), e);
- } finally {
- file.rwUnlock();
- }
- });
- }
-
- @Override
- public Long size(String uuid) {
- return jObjectTxManager.executeTx(() -> {
- var read = jObjectManager.get(uuid)
- .orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND));
-
- try {
- return read.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (fsNodeData, fileData) -> {
- if (!(fileData instanceof File fd))
- throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
-
- return fd.getSize();
- });
- } catch (Exception e) {
- Log.error("Error reading file: " + uuid, e);
- return -1L;
- }
- });
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DirectoryNotEmptyException.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DirectoryNotEmptyException.java
deleted file mode 100644
index f13096f9..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/DirectoryNotEmptyException.java
+++ /dev/null
@@ -1,8 +0,0 @@
-package com.usatiuk.dhfs.files.service;
-
-public class DirectoryNotEmptyException extends RuntimeException {
- @Override
- public synchronized Throwable fillInStackTrace() {
- return this;
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/FileChunkMutator.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/FileChunkMutator.java
deleted file mode 100644
index 3b31cdae..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/FileChunkMutator.java
+++ /dev/null
@@ -1,36 +0,0 @@
-package com.usatiuk.dhfs.files.service;
-
-import com.usatiuk.dhfs.files.objects.File;
-import com.usatiuk.dhfs.objects.jrepository.JMutator;
-
-import java.util.NavigableMap;
-
-public class FileChunkMutator implements JMutator {
- private final long _oldTime;
- private final long _newTime;
- private final NavigableMap _removedChunks;
- private final NavigableMap _newChunks;
-
- public FileChunkMutator(long oldTime, long newTime, NavigableMap removedChunks, NavigableMap newChunks) {
- _oldTime = oldTime;
- _newTime = newTime;
- _removedChunks = removedChunks;
- _newChunks = newChunks;
- }
-
- @Override
- public boolean mutate(File object) {
- object.setMtime(_newTime);
- object.getChunks().keySet().removeAll(_removedChunks.keySet());
- object.getChunks().putAll(_newChunks);
- return true;
- }
-
- @Override
- public void revert(File object) {
- object.setMtime(_oldTime);
- object.getChunks().keySet().removeAll(_newChunks.keySet());
- object.getChunks().putAll(_removedChunks);
- }
-
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrRes.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrRes.java
deleted file mode 100644
index 3240a6b4..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrRes.java
+++ /dev/null
@@ -1,4 +0,0 @@
-package com.usatiuk.dhfs.files.service;
-
-public record GetattrRes(long mtime, long ctime, long mode, GetattrType type) {
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrType.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrType.java
deleted file mode 100644
index ebcd4868..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/files/service/GetattrType.java
+++ /dev/null
@@ -1,7 +0,0 @@
-package com.usatiuk.dhfs.files.service;
-
-public enum GetattrType {
- FILE,
- DIRECTORY,
- SYMLINK
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java
deleted file mode 100644
index 0fa8ee29..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/DhfsFuse.java
+++ /dev/null
@@ -1,391 +0,0 @@
-package com.usatiuk.dhfs.fuse;
-
-import com.google.protobuf.UnsafeByteOperations;
-import com.sun.security.auth.module.UnixSystem;
-import com.usatiuk.dhfs.files.service.DhfsFileService;
-import com.usatiuk.dhfs.files.service.DirectoryNotEmptyException;
-import com.usatiuk.dhfs.files.service.GetattrRes;
-import com.usatiuk.dhfs.objects.repository.persistence.ObjectPersistentStore;
-import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer;
-import com.usatiuk.kleppmanntree.AlreadyExistsException;
-import io.grpc.Status;
-import io.grpc.StatusRuntimeException;
-import io.quarkus.logging.Log;
-import io.quarkus.runtime.ShutdownEvent;
-import io.quarkus.runtime.StartupEvent;
-import jakarta.annotation.Priority;
-import jakarta.enterprise.context.ApplicationScoped;
-import jakarta.enterprise.event.Observes;
-import jakarta.inject.Inject;
-import jnr.ffi.Pointer;
-import org.apache.commons.lang3.SystemUtils;
-import org.eclipse.microprofile.config.inject.ConfigProperty;
-import ru.serce.jnrfuse.ErrorCodes;
-import ru.serce.jnrfuse.FuseFillDir;
-import ru.serce.jnrfuse.FuseStubFS;
-import ru.serce.jnrfuse.struct.FileStat;
-import ru.serce.jnrfuse.struct.FuseFileInfo;
-import ru.serce.jnrfuse.struct.Statvfs;
-import ru.serce.jnrfuse.struct.Timespec;
-
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Optional;
-
-import static jnr.posix.FileStat.*;
-
-@ApplicationScoped
-public class DhfsFuse extends FuseStubFS {
- private static final int blksize = 1048576;
- private static final int iosize = 1048576;
- @Inject
- ObjectPersistentStore persistentStore; // FIXME?
- @ConfigProperty(name = "dhfs.fuse.root")
- String root;
- @ConfigProperty(name = "dhfs.fuse.enabled")
- boolean enabled;
- @ConfigProperty(name = "dhfs.fuse.debug")
- Boolean debug;
- @ConfigProperty(name = "dhfs.files.target_chunk_size")
- int targetChunkSize;
- @Inject
- JnrPtrByteOutputAccessors jnrPtrByteOutputAccessors;
- @Inject
- DhfsFileService fileService;
-
- void init(@Observes @Priority(100000) StartupEvent event) {
- if (!enabled) return;
- Paths.get(root).toFile().mkdirs();
- Log.info("Mounting with root " + root);
-
- var uid = new UnixSystem().getUid();
- var gid = new UnixSystem().getGid();
-
- var opts = new ArrayList();
-
- // Assuming macFuse
- if (SystemUtils.IS_OS_MAC) {
- opts.add("-o");
- opts.add("iosize=" + iosize);
- } else if (SystemUtils.IS_OS_LINUX) {
- // FIXME: There's something else missing: the writes still seem to be 32k max
-// opts.add("-o");
-// opts.add("large_read");
- opts.add("-o");
- opts.add("big_writes");
- opts.add("-o");
- opts.add("max_read=" + iosize);
- opts.add("-o");
- opts.add("max_write=" + iosize);
- }
- opts.add("-o");
- opts.add("auto_cache");
- opts.add("-o");
- opts.add("uid=" + uid);
- opts.add("-o");
- opts.add("gid=" + gid);
-
- mount(Paths.get(root), false, debug, opts.toArray(String[]::new));
- }
-
- void shutdown(@Observes @Priority(1) ShutdownEvent event) {
- if (!enabled) return;
- Log.info("Unmounting");
- umount();
- Log.info("Unmounted");
- }
-
- @Override
- public int statfs(String path, Statvfs stbuf) {
- try {
- stbuf.f_frsize.set(blksize);
- stbuf.f_bsize.set(blksize);
- stbuf.f_blocks.set(persistentStore.getTotalSpace() / blksize); // total data blocks in file system
- stbuf.f_bfree.set(persistentStore.getFreeSpace() / blksize); // free blocks in fs
- stbuf.f_bavail.set(persistentStore.getUsableSpace() / blksize); // avail blocks in fs
- stbuf.f_files.set(1000); //FIXME:
- stbuf.f_ffree.set(Integer.MAX_VALUE - 2000); //FIXME:
- stbuf.f_favail.set(Integer.MAX_VALUE - 2000); //FIXME:
- stbuf.f_namemax.set(2048);
- return super.statfs(path, stbuf);
- } catch (Exception e) {
- Log.error("When statfs " + path, e);
- return -ErrorCodes.EIO();
- }
- }
-
- @Override
- public int getattr(String path, FileStat stat) {
- try {
- var fileOpt = fileService.open(path);
- if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
- var uuid = fileOpt.get();
- Optional found = fileService.getattr(uuid);
- if (found.isEmpty()) {
- return -ErrorCodes.ENOENT();
- }
- switch (found.get().type()) {
- case FILE -> {
- stat.st_mode.set(S_IFREG | found.get().mode());
- stat.st_nlink.set(1);
- stat.st_size.set(fileService.size(uuid));
- }
- case DIRECTORY -> {
- stat.st_mode.set(S_IFDIR | found.get().mode());
- stat.st_nlink.set(2);
- }
- case SYMLINK -> {
- stat.st_mode.set(S_IFLNK | 0777);
- stat.st_nlink.set(1);
- stat.st_size.set(fileService.size(uuid));
- }
- }
-
- // FIXME: Race?
- stat.st_ctim.tv_sec.set(found.get().ctime() / 1000);
- stat.st_ctim.tv_nsec.set((found.get().ctime() % 1000) * 1000);
- stat.st_mtim.tv_sec.set(found.get().mtime() / 1000);
- stat.st_mtim.tv_nsec.set((found.get().mtime() % 1000) * 1000);
- stat.st_atim.tv_sec.set(found.get().mtime() / 1000);
- stat.st_atim.tv_nsec.set((found.get().mtime() % 1000) * 1000);
- stat.st_blksize.set(blksize);
- } catch (Exception e) {
- Log.error("When getattr " + path, e);
- return -ErrorCodes.EIO();
- } catch (Throwable e) {
- Log.error("When getattr " + path, e);
- return -ErrorCodes.EIO();
- }
- return 0;
- }
-
- @Override
- public int utimens(String path, Timespec[] timespec) {
- try {
- var fileOpt = fileService.open(path);
- if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
- var file = fileOpt.get();
- var res = fileService.setTimes(file,
- timespec[0].tv_sec.get() * 1000,
- timespec[1].tv_sec.get() * 1000);
- if (!res) return -ErrorCodes.EINVAL();
- else return 0;
- } catch (Exception e) {
- Log.error("When utimens " + path, e);
- return -ErrorCodes.EIO();
- }
- }
-
- @Override
- public int open(String path, FuseFileInfo fi) {
- try {
- if (fileService.open(path).isEmpty()) return -ErrorCodes.ENOENT();
- return 0;
- } catch (Exception e) {
- Log.error("When open " + path, e);
- return -ErrorCodes.EIO();
- }
- }
-
- @Override
- public int read(String path, Pointer buf, long size, long offset, FuseFileInfo fi) {
- if (size < 0) return -ErrorCodes.EINVAL();
- if (offset < 0) return -ErrorCodes.EINVAL();
- try {
- var fileOpt = fileService.open(path);
- if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
- var file = fileOpt.get();
- var read = fileService.read(fileOpt.get(), offset, (int) size);
- if (read.isEmpty()) return 0;
- UnsafeByteOperations.unsafeWriteTo(read.get(), new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size));
- return read.get().size();
- } catch (Exception e) {
- Log.error("When reading " + path, e);
- return -ErrorCodes.EIO();
- }
- }
-
- @Override
- public int write(String path, Pointer buf, long size, long offset, FuseFileInfo fi) {
- if (offset < 0) return -ErrorCodes.EINVAL();
- try {
- var fileOpt = fileService.open(path);
- if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
- var buffer = UninitializedByteBuffer.allocateUninitialized((int) size);
-
- jnrPtrByteOutputAccessors.getUnsafe().copyMemory(
- buf.address(),
- jnrPtrByteOutputAccessors.getNioAccess().getBufferAddress(buffer),
- size
- );
-
- var written = fileService.write(fileOpt.get(), offset, UnsafeByteOperations.unsafeWrap(buffer));
- return written.intValue();
- } catch (Exception e) {
- Log.error("When writing " + path, e);
- return -ErrorCodes.EIO();
- }
- }
-
- @Override
- public int create(String path, long mode, FuseFileInfo fi) {
- try {
- var ret = fileService.create(path, mode);
- if (ret.isEmpty()) return -ErrorCodes.ENOSPC();
- else return 0;
- } catch (Exception e) {
- Log.error("When creating " + path, e);
- return -ErrorCodes.EIO();
- }
- }
-
- @Override
- public int mkdir(String path, long mode) {
- try {
- fileService.mkdir(path, mode);
- return 0;
- } catch (AlreadyExistsException aex) {
- return -ErrorCodes.EEXIST();
- } catch (Exception e) {
- Log.error("When creating dir " + path, e);
- return -ErrorCodes.EIO();
- }
- }
-
- @Override
- public int rmdir(String path) {
- try {
- fileService.unlink(path);
- return 0;
- } catch (DirectoryNotEmptyException ex) {
- return -ErrorCodes.ENOTEMPTY();
- } catch (Exception e) {
- Log.error("When removing dir " + path, e);
- return -ErrorCodes.EIO();
- }
- }
-
- @Override
- public int rename(String path, String newName) {
- try {
- var ret = fileService.rename(path, newName);
- if (!ret) return -ErrorCodes.ENOENT();
- else return 0;
- } catch (Exception e) {
- Log.error("When renaming " + path, e);
- return -ErrorCodes.EIO();
- }
-
- }
-
- @Override
- public int unlink(String path) {
- try {
- fileService.unlink(path);
- return 0;
- } catch (Exception e) {
- Log.error("When unlinking " + path, e);
- return -ErrorCodes.EIO();
- }
- }
-
- @Override
- public int truncate(String path, long size) {
- if (size < 0) return -ErrorCodes.EINVAL();
- try {
- var fileOpt = fileService.open(path);
- if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
- var file = fileOpt.get();
- var ok = fileService.truncate(file, size);
- if (ok)
- return 0;
- else
- return -ErrorCodes.ENOSPC();
- } catch (Exception e) {
- Log.error("When truncating " + path, e);
- return -ErrorCodes.EIO();
- }
- }
-
- @Override
- public int chmod(String path, long mode) {
- try {
- var fileOpt = fileService.open(path);
- if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
- var ret = fileService.chmod(fileOpt.get(), mode);
- if (ret) return 0;
- else return -ErrorCodes.EINVAL();
- } catch (Exception e) {
- Log.error("When chmod " + path, e);
- return -ErrorCodes.EIO();
- }
- }
-
- @Override
- public int readdir(String path, Pointer buf, FuseFillDir filler, long offset, FuseFileInfo fi) {
- try {
- Iterable found;
- try {
- found = fileService.readDir(path);
- } catch (StatusRuntimeException e) {
- if (e.getStatus().getCode().equals(Status.NOT_FOUND.getCode()))
- return -ErrorCodes.ENOENT();
- else throw e;
- }
-
- filler.apply(buf, ".", null, 0);
- filler.apply(buf, "..", null, 0);
-
- for (var c : found) {
- filler.apply(buf, c, null, 0);
- }
-
- return 0;
- } catch (Exception e) {
- Log.error("When readdir " + path, e);
- return -ErrorCodes.EIO();
- }
- }
-
- @Override
- public int readlink(String path, Pointer buf, long size) {
- if (size < 0) return -ErrorCodes.EINVAL();
- try {
- var fileOpt = fileService.open(path);
- if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
- var file = fileOpt.get();
- var read = fileService.readlinkBS(fileOpt.get());
- if (read.isEmpty()) return 0;
- UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size));
- buf.putByte(Math.min(size - 1, read.size()), (byte) 0);
- return 0;
- } catch (Exception e) {
- Log.error("When reading " + path, e);
- return -ErrorCodes.EIO();
- }
- }
-
- @Override
- public int chown(String path, long uid, long gid) {
- try {
- var fileOpt = fileService.open(path);
- if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
- return 0;
- } catch (Exception e) {
- Log.error("When chown " + path, e);
- return -ErrorCodes.EIO();
- }
- }
-
- @Override
- public int symlink(String oldpath, String newpath) {
- try {
- var ret = fileService.symlink(oldpath, newpath);
- if (ret == null) return -ErrorCodes.EEXIST();
- else return 0;
- } catch (Exception e) {
- Log.error("When creating " + newpath, e);
- return -ErrorCodes.EIO();
- }
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutput.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutput.java
deleted file mode 100644
index d2790516..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutput.java
+++ /dev/null
@@ -1,64 +0,0 @@
-package com.usatiuk.dhfs.fuse;
-
-import com.google.protobuf.ByteOutput;
-import jnr.ffi.Pointer;
-
-import java.nio.ByteBuffer;
-import java.nio.MappedByteBuffer;
-
-public class JnrPtrByteOutput extends ByteOutput {
- private final Pointer _backing;
- private final long _size;
- private final JnrPtrByteOutputAccessors _accessors;
- private long _pos;
-
- public JnrPtrByteOutput(JnrPtrByteOutputAccessors accessors, Pointer backing, long size) {
- _backing = backing;
- _size = size;
- _pos = 0;
- _accessors = accessors;
- }
-
- @Override
- public void write(byte value) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void write(byte[] value, int offset, int length) {
- if (length + _pos > _size) throw new IndexOutOfBoundsException();
- _backing.put(_pos, value, offset, length);
- _pos += length;
- }
-
- @Override
- public void writeLazy(byte[] value, int offset, int length) {
- if (length + _pos > _size) throw new IndexOutOfBoundsException();
- _backing.put(_pos, value, offset, length);
- _pos += length;
- }
-
- @Override
- public void write(ByteBuffer value) {
- var rem = value.remaining();
- if (rem + _pos > _size) throw new IndexOutOfBoundsException();
-
- if (value.isDirect()) {
- if (value instanceof MappedByteBuffer mb) {
- mb.load();
- }
- long addr = _accessors.getNioAccess().getBufferAddress(value) + value.position();
- var out = _backing.address() + _pos;
- _accessors.getUnsafe().copyMemory(addr, out, rem);
- } else {
- throw new UnsupportedOperationException();
- }
-
- _pos += rem;
- }
-
- @Override
- public void writeLazy(ByteBuffer value) {
- write(value);
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java
deleted file mode 100644
index 78cc8ff4..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/fuse/JnrPtrByteOutputAccessors.java
+++ /dev/null
@@ -1,24 +0,0 @@
-package com.usatiuk.dhfs.fuse;
-
-import jakarta.inject.Singleton;
-import jdk.internal.access.JavaNioAccess;
-import jdk.internal.access.SharedSecrets;
-import lombok.Getter;
-import sun.misc.Unsafe;
-
-import java.lang.reflect.Field;
-
-@Singleton
-class JnrPtrByteOutputAccessors {
- @Getter
- JavaNioAccess _nioAccess;
- @Getter
- Unsafe _unsafe;
-
- JnrPtrByteOutputAccessors() throws NoSuchFieldException, IllegalAccessException {
- _nioAccess = SharedSecrets.getJavaNioAccess();
- Field f = Unsafe.class.getDeclaredField("theUnsafe");
- f.setAccessible(true);
- _unsafe = (Unsafe) f.get(null);
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java
deleted file mode 100644
index 2743bf48..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeManager.java
+++ /dev/null
@@ -1,566 +0,0 @@
-package com.usatiuk.dhfs.objects.jkleppmanntree;
-
-import com.usatiuk.dhfs.files.objects.File;
-import com.usatiuk.dhfs.objects.jkleppmanntree.structs.*;
-import com.usatiuk.dhfs.objects.jrepository.*;
-import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService;
-import com.usatiuk.dhfs.objects.repository.opsupport.Op;
-import com.usatiuk.dhfs.objects.repository.opsupport.OpObject;
-import com.usatiuk.dhfs.objects.repository.opsupport.OpObjectRegistry;
-import com.usatiuk.dhfs.objects.repository.opsupport.OpSender;
-import com.usatiuk.kleppmanntree.*;
-import com.usatiuk.dhfs.utils.VoidFn;
-import io.quarkus.logging.Log;
-import jakarta.enterprise.context.ApplicationScoped;
-import jakarta.inject.Inject;
-import org.apache.commons.lang3.tuple.Pair;
-
-import java.util.*;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.function.Function;
-
-@ApplicationScoped
-public class JKleppmannTreeManager {
- private static final String dataFileName = "trees";
- private final ConcurrentHashMap _trees = new ConcurrentHashMap<>();
- @Inject
- JKleppmannTreePeerInterface jKleppmannTreePeerInterface;
- @Inject
- OpSender opSender;
- @Inject
- OpObjectRegistry opObjectRegistry;
- @Inject
- JObjectManager jObjectManager;
- @Inject
- PersistentPeerDataService persistentPeerDataService;
- @Inject
- JObjectTxManager jObjectTxManager;
- @Inject
- SoftJObjectFactory softJObjectFactory;
- @Inject
- JKleppmannTreePeerInterface peerInterface;
-
- public JKleppmannTree getTree(String name) {
- return _trees.computeIfAbsent(name, this::createTree);
- }
-
- private JKleppmannTree createTree(String name) {
- return jObjectTxManager.executeTx(() -> {
- var data = jObjectManager.get(JKleppmannTreePersistentData.nameFromTreeName(name)).orElse(null);
- if (data == null) {
- data = jObjectManager.put(new JKleppmannTreePersistentData(name), Optional.empty());
- }
- var tree = new JKleppmannTree(name);
- opObjectRegistry.registerObject(tree);
- return tree;
- });
- }
-
- public class JKleppmannTree implements OpObject {
- private final KleppmannTree _tree;
-
- private final SoftJObject _persistentData;
-
- private final JKleppmannTreeStorageInterface _storageInterface;
- private final JKleppmannTreeClock _clock;
-
- private final String _treeName;
-
- JKleppmannTree(String treeName) {
- _treeName = treeName;
-
- _persistentData = softJObjectFactory.create(JKleppmannTreePersistentData.class, JKleppmannTreePersistentData.nameFromTreeName(treeName));
-
- _storageInterface = new JKleppmannTreeStorageInterface();
- _clock = new JKleppmannTreeClock();
-
- _tree = new KleppmannTree<>(_storageInterface, peerInterface, _clock, new JOpRecorder());
- }
-
- public String traverse(List names) {
- return _tree.traverse(names);
- }
-
- public String getNewNodeId() {
- return _storageInterface.getNewNodeId();
- }
-
- public void move(String newParent, JKleppmannTreeNodeMeta newMeta, String node) {
- _tree.move(newParent, newMeta, node);
- }
-
- public void trash(JKleppmannTreeNodeMeta newMeta, String node) {
- _tree.move(_storageInterface.getTrashId(), newMeta.withName(node), node);
- }
-
- @Override
- public boolean hasPendingOpsForHost(UUID host) {
- return _persistentData.get()
- .runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY,
- (m, d) -> d.getQueues().containsKey(host) &&
- !d.getQueues().get(host).isEmpty()
- );
- }
-
- @Override
- public List getPendingOpsForHost(UUID host, int limit) {
- return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
- if (d.getQueues().containsKey(host)) {
- var queue = d.getQueues().get(host);
- ArrayList collected = new ArrayList<>();
-
- for (var node : queue.entrySet()) {
- collected.add(new JKleppmannTreeOpWrapper(node.getValue()));
- if (collected.size() >= limit) break;
- }
-
- return collected;
- }
- return List.of();
- });
- }
-
- @Override
- public String getId() {
- return _treeName;
- }
-
- @Override
- public void commitOpForHost(UUID host, Op op) {
- if (!(op instanceof JKleppmannTreeOpWrapper jop))
- throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass() + " " + getId());
- _persistentData.get().assertRwLock();
- _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
-
- var got = _persistentData.get().getData().getQueues().get(host).firstEntry().getValue();
- if (!Objects.equals(jop.getOp(), got))
- throw new IllegalArgumentException("Committed op push was not the oldest");
-
- _persistentData.get().mutate(new JMutator() {
- @Override
- public boolean mutate(JKleppmannTreePersistentData object) {
- object.getQueues().get(host).pollFirstEntry();
- return true;
- }
-
- @Override
- public void revert(JKleppmannTreePersistentData object) {
- object.getQueues().get(host).put(jop.getOp().timestamp(), jop.getOp());
- }
- });
-
- }
-
- @Override
- public void pushBootstrap(UUID host) {
- _tree.recordBoostrapFor(host);
- }
-
- public Pair findParent(Function predicate) {
- return _tree.findParent(predicate);
- }
-
- @Override
- public boolean acceptExternalOp(UUID from, Op op) {
- if (op instanceof JKleppmannTreePeriodicPushOp pushOp) {
- return _tree.updateExternalTimestamp(pushOp.getFrom(), pushOp.getTimestamp());
- }
-
- if (!(op instanceof JKleppmannTreeOpWrapper jop))
- throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass() + " " + getId());
-
- JObject> fileRef;
- if (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile f) {
- var fino = f.getFileIno();
- fileRef = jObjectManager.getOrPut(fino, File.class, Optional.of(jop.getOp().childId()));
- } else {
- fileRef = null;
- }
-
- if (Log.isTraceEnabled())
- Log.trace("Received op from " + from + ": " + jop.getOp().timestamp().timestamp() + " " + jop.getOp().childId() + "->" + jop.getOp().newParentId() + " as " + jop.getOp().newMeta().getName());
-
- try {
- _tree.applyExternalOp(from, jop.getOp());
- } catch (Exception e) {
- Log.error("Error applying external op", e);
- throw e;
- } finally {
- // FIXME:
- // Fixup the ref if it didn't really get applied
-
- if ((fileRef == null) && (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile))
- Log.error("Could not create child of pushed op: " + jop.getOp());
-
- if (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile f) {
- if (fileRef != null) {
- var got = jObjectManager.get(jop.getOp().childId()).orElse(null);
-
- VoidFn remove = () -> {
- fileRef.runWriteLockedVoid(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d, b, v) -> {
- m.removeRef(jop.getOp().childId());
- });
- };
-
- if (got == null) {
- remove.apply();
- } else {
- try {
- got.rLock();
- try {
- got.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
- if (got.getData() == null || !got.getData().extractRefs().contains(f.getFileIno()))
- remove.apply();
- } finally {
- got.rUnlock();
- }
- } catch (DeletedObjectAccessException dex) {
- remove.apply();
- }
- }
- }
- }
- }
- return true;
- }
-
- @Override
- public Op getPeriodicPushOp() {
- return new JKleppmannTreePeriodicPushOp(persistentPeerDataService.getSelfUuid(), _clock.peekTimestamp());
- }
-
- @Override
- public void addToTx() {
- // FIXME: a hack
- _persistentData.get().rwLockNoCopy();
- _persistentData.get().rwUnlock();
- }
-
- private class JOpRecorder implements OpRecorder {
- @Override
- public void recordOp(OpMove op) {
- _persistentData.get().assertRwLock();
- _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
- var hostUuds = persistentPeerDataService.getHostUuids().stream().toList();
- _persistentData.get().mutate(new JMutator() {
- @Override
- public boolean mutate(JKleppmannTreePersistentData object) {
- object.recordOp(hostUuds, op);
- return true;
- }
-
- @Override
- public void revert(JKleppmannTreePersistentData object) {
- object.removeOp(hostUuds, op);
- }
- });
- opSender.push(JKleppmannTree.this);
- }
-
- @Override
- public void recordOpForPeer(UUID peer, OpMove op) {
- _persistentData.get().assertRwLock();
- _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
- _persistentData.get().mutate(new JMutator() {
- @Override
- public boolean mutate(JKleppmannTreePersistentData object) {
- object.recordOp(peer, op);
- return true;
- }
-
- @Override
- public void revert(JKleppmannTreePersistentData object) {
- object.removeOp(peer, op);
- }
- });
- opSender.push(JKleppmannTree.this);
- }
- }
-
- private class JKleppmannTreeClock implements Clock {
- @Override
- public Long getTimestamp() {
- _persistentData.get().assertRwLock();
- _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
- var ret = _persistentData.get().getData().getClock().peekTimestamp() + 1;
- _persistentData.get().mutate(new JMutator() {
- @Override
- public boolean mutate(JKleppmannTreePersistentData object) {
- object.getClock().getTimestamp();
- return true;
- }
-
- @Override
- public void revert(JKleppmannTreePersistentData object) {
- object.getClock().ungetTimestamp();
- }
- });
- return ret;
- }
-
- @Override
- public Long peekTimestamp() {
- return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getClock().peekTimestamp());
- }
-
- @Override
- public Long updateTimestamp(Long receivedTimestamp) {
- _persistentData.get().assertRwLock();
- _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
- _persistentData.get().mutate(new JMutator() {
- Long _old;
-
- @Override
- public boolean mutate(JKleppmannTreePersistentData object) {
- _old = object.getClock().updateTimestamp(receivedTimestamp);
- return true;
- }
-
- @Override
- public void revert(JKleppmannTreePersistentData object) {
- object.getClock().setTimestamp(_old);
- }
- });
- return _persistentData.get().getData().getClock().peekTimestamp();
- }
- }
-
- public class JKleppmannTreeStorageInterface implements StorageInterface {
- private final LogWrapper _logWrapper = new LogWrapper();
- private final PeerLogWrapper _peerLogWrapper = new PeerLogWrapper();
-
- public JKleppmannTreeStorageInterface() {
- if (jObjectManager.get(getRootId()).isEmpty()) {
- putNode(new JKleppmannTreeNode(new TreeNode<>(getRootId(), null, new JKleppmannTreeNodeMetaDirectory(""))));
- putNode(new JKleppmannTreeNode(new TreeNode<>(getTrashId(), null, null)));
- }
- }
-
- public JObject putNode(JKleppmannTreeNode node) {
- return jObjectManager.put(node, Optional.ofNullable(node.getNode().getParent()));
- }
-
- public JObject putNodeLocked(JKleppmannTreeNode node) {
- return jObjectManager.putLocked(node, Optional.ofNullable(node.getNode().getParent()));
- }
-
- @Override
- public String getRootId() {
- return _treeName + "_jt_root";
- }
-
- @Override
- public String getTrashId() {
- return _treeName + "_jt_trash";
- }
-
- @Override
- public String getNewNodeId() {
- return persistentPeerDataService.getUniqueId();
- }
-
- @Override
- public JKleppmannTreeNodeWrapper getById(String id) {
- var got = jObjectManager.get(id);
- if (got.isEmpty()) return null;
- return new JKleppmannTreeNodeWrapper((JObject) got.get());
- }
-
- @Override
- public JKleppmannTreeNodeWrapper createNewNode(TreeNode node) {
- return new JKleppmannTreeNodeWrapper(putNodeLocked(new JKleppmannTreeNode(node)));
- }
-
- @Override
- public void removeNode(String id) {}
-
- @Override
- public LogInterface getLog() {
- return _logWrapper;
- }
-
- @Override
- public PeerTimestampLogInterface getPeerTimestampLog() {
- return _peerLogWrapper;
- }
-
- @Override
- public void rLock() {
- _persistentData.get().rLock();
- }
-
- @Override
- public void rUnlock() {
- _persistentData.get().rUnlock();
- }
-
- @Override
- public void rwLock() {
- _persistentData.get().rwLockNoCopy();
- }
-
- @Override
- public void rwUnlock() {
- _persistentData.get().rwUnlock();
- }
-
- @Override
- public void assertRwLock() {
- _persistentData.get().assertRwLock();
- }
-
- private class PeerLogWrapper implements PeerTimestampLogInterface {
-
- @Override
- public Long getForPeer(UUID peerId) {
- return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY,
- (m, d) -> d.getPeerTimestampLog().get(peerId));
- }
-
- @Override
- public void putForPeer(UUID peerId, Long timestamp) {
- _persistentData.get().assertRwLock();
- _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
- _persistentData.get().mutate(new JMutator() {
- Long old;
-
- @Override
- public boolean mutate(JKleppmannTreePersistentData object) {
- old = object.getPeerTimestampLog().put(peerId, timestamp);
- return !Objects.equals(old, timestamp);
- }
-
- @Override
- public void revert(JKleppmannTreePersistentData object) {
- if (old != null)
- object.getPeerTimestampLog().put(peerId, old);
- else
- object.getPeerTimestampLog().remove(peerId, timestamp);
- }
- });
- }
- }
-
- private class LogWrapper implements LogInterface {
- @Override
- public Pair, LogRecord> peekOldest() {
- return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
- var ret = d.getLog().firstEntry();
- if (ret == null) return null;
- return Pair.of(ret);
- });
- }
-
- @Override
- public Pair, LogRecord> takeOldest() {
- _persistentData.get().assertRwLock();
- _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
-
- var ret = _persistentData.get().getData().getLog().firstEntry();
- if (ret != null)
- _persistentData.get().mutate(new JMutator() {
- @Override
- public boolean mutate(JKleppmannTreePersistentData object) {
- object.getLog().pollFirstEntry();
- return true;
- }
-
- @Override
- public void revert(JKleppmannTreePersistentData object) {
- object.getLog().put(ret.getKey(), ret.getValue());
- }
- });
- return Pair.of(ret);
- }
-
- @Override
- public Pair, LogRecord> peekNewest() {
- return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
- var ret = d.getLog().lastEntry();
- if (ret == null) return null;
- return Pair.of(ret);
- });
- }
-
- @Override
- public List, LogRecord>> newestSlice(CombinedTimestamp since, boolean inclusive) {
- return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
- var tail = d.getLog().tailMap(since, inclusive);
- return tail.entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList();
- });
- }
-
- @Override
- public List, LogRecord>> getAll() {
- return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
- return d.getLog().entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList();
- });
- }
-
- @Override
- public boolean isEmpty() {
- return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
- return d.getLog().isEmpty();
- });
- }
-
- @Override
- public boolean containsKey(CombinedTimestamp timestamp) {
- return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
- return d.getLog().containsKey(timestamp);
- });
- }
-
- @Override
- public long size() {
- return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
- return (long) d.getLog().size();
- });
- }
-
- @Override
- public void put(CombinedTimestamp timestamp, LogRecord record) {
- _persistentData.get().assertRwLock();
- _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
- if (_persistentData.get().getData().getLog().containsKey(timestamp))
- throw new IllegalStateException("Overwriting log entry?");
- _persistentData.get().mutate(new JMutator() {
- @Override
- public boolean mutate(JKleppmannTreePersistentData object) {
- object.getLog().put(timestamp, record);
- return true;
- }
-
- @Override
- public void revert(JKleppmannTreePersistentData object) {
- object.getLog().remove(timestamp, record);
- }
- });
- }
-
- @Override
- public void replace(CombinedTimestamp timestamp, LogRecord record) {
- _persistentData.get().assertRwLock();
- _persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
- _persistentData.get().mutate(new JMutator() {
- LogRecord old;
-
- @Override
- public boolean mutate(JKleppmannTreePersistentData object) {
- old = object.getLog().put(timestamp, record);
- return !Objects.equals(old, record);
- }
-
- @Override
- public void revert(JKleppmannTreePersistentData object) {
- if (old != null)
- object.getLog().put(timestamp, old);
- else
- object.getLog().remove(timestamp, record);
- }
- });
- }
- }
- }
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java
deleted file mode 100644
index cd4b09c9..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeNodeWrapper.java
+++ /dev/null
@@ -1,71 +0,0 @@
-package com.usatiuk.dhfs.objects.jkleppmanntree;
-
-import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode;
-import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
-import com.usatiuk.dhfs.objects.jrepository.JObject;
-import com.usatiuk.dhfs.objects.jrepository.JObjectManager;
-import com.usatiuk.kleppmanntree.TreeNode;
-import com.usatiuk.kleppmanntree.TreeNodeWrapper;
-
-import java.util.UUID;
-
-public class JKleppmannTreeNodeWrapper implements TreeNodeWrapper {
- private final JObject _backing;
-
- public JKleppmannTreeNodeWrapper(JObject backing) {_backing = backing;}
-
- @Override
- public void rLock() {
- _backing.rLock();
- }
-
- @Override
- public void rUnlock() {
- _backing.rUnlock();
- }
-
- @Override
- public void rwLock() {
- _backing.rwLock();
- }
-
- @Override
- public void rwUnlock() {
- _backing.bumpVer(); // FIXME:?
- _backing.rwUnlock();
- }
-
- @Override
- public void freeze() {
- _backing.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> {
- m.freeze();
- return null;
- });
- }
-
- @Override
- public void unfreeze() {
- _backing.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> {
- m.unfreeze();
- return null;
- });
- }
-
- @Override
- public void notifyRef(String id) {
- _backing.getMeta().addRef(id);
- }
-
- @Override
- public void notifyRmRef(String id) {
- _backing.getMeta().removeRef(id);
- }
-
- @Override
- public TreeNode getNode() {
- _backing.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
- if (_backing.getData() == null)
- throw new IllegalStateException("Node " + _backing.getMeta().getName() + " data lost!");
- return _backing.getData().getNode();
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java
deleted file mode 100644
index 4612f8fc..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreeOpWrapper.java
+++ /dev/null
@@ -1,30 +0,0 @@
-package com.usatiuk.dhfs.objects.jkleppmanntree;
-
-import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
-import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile;
-import com.usatiuk.dhfs.objects.repository.opsupport.Op;
-import com.usatiuk.kleppmanntree.OpMove;
-import lombok.Getter;
-
-import java.util.Collection;
-import java.util.List;
-import java.util.UUID;
-
-// Wrapper to avoid having to specify generic types
-public class JKleppmannTreeOpWrapper implements Op {
- @Getter
- private final OpMove _op;
-
- public JKleppmannTreeOpWrapper(OpMove op) {
- if (op == null) throw new IllegalArgumentException("op shouldn't be null");
- _op = op;
- }
-
- @Override
- public Collection getEscapedRefs() {
- if (_op.newMeta() instanceof JKleppmannTreeNodeMetaFile mf) {
- return List.of(mf.getFileIno());
- }
- return List.of();
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java
deleted file mode 100644
index 39b5d484..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeerInterface.java
+++ /dev/null
@@ -1,25 +0,0 @@
-package com.usatiuk.dhfs.objects.jkleppmanntree;
-
-import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService;
-import com.usatiuk.kleppmanntree.PeerInterface;
-import jakarta.inject.Inject;
-import jakarta.inject.Singleton;
-
-import java.util.Collection;
-import java.util.UUID;
-
-@Singleton
-public class JKleppmannTreePeerInterface implements PeerInterface {
- @Inject
- PersistentPeerDataService persistentPeerDataService;
-
- @Override
- public UUID getSelfId() {
- return persistentPeerDataService.getSelfUuid();
- }
-
- @Override
- public Collection getAllPeers() {
- return persistentPeerDataService.getHostUuidsAndSelf();
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java
deleted file mode 100644
index 3c84d067..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/JKleppmannTreePeriodicPushOp.java
+++ /dev/null
@@ -1,25 +0,0 @@
-package com.usatiuk.dhfs.objects.jkleppmanntree;
-
-import com.usatiuk.dhfs.objects.repository.opsupport.Op;
-import lombok.Getter;
-
-import java.util.Collection;
-import java.util.List;
-import java.util.UUID;
-
-public class JKleppmannTreePeriodicPushOp implements Op {
- @Getter
- private final UUID _from;
- @Getter
- private final long _timestamp;
-
- public JKleppmannTreePeriodicPushOp(UUID from, long timestamp) {
- _from = from;
- _timestamp = timestamp;
- }
-
- @Override
- public Collection getEscapedRefs() {
- return List.of();
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeLogEffectSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeLogEffectSerializer.java
deleted file mode 100644
index 7c9f13da..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeLogEffectSerializer.java
+++ /dev/null
@@ -1,53 +0,0 @@
-package com.usatiuk.dhfs.objects.jkleppmanntree.serializers;
-
-import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
-import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeOpWrapper;
-import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
-import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaP;
-import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeOpLogEffectP;
-import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeOpP;
-import com.usatiuk.kleppmanntree.LogEffect;
-import com.usatiuk.kleppmanntree.LogEffectOld;
-import jakarta.inject.Inject;
-import jakarta.inject.Singleton;
-
-import java.util.UUID;
-
-@Singleton
-public class JKleppmannTreeLogEffectSerializer implements ProtoSerializer> {
- @Inject
- ProtoSerializer opProtoSerializer;
- @Inject
- ProtoSerializer metaProtoSerializer;
-
- @Override
- public LogEffect deserialize(JKleppmannTreeOpLogEffectP message) {
- return new LogEffect<>(
- message.hasOldParent() ? new LogEffectOld<>(
- opProtoSerializer.deserialize(message.getOldEffectiveMove()).getOp(),
- message.getOldParent(),
- metaProtoSerializer.deserialize(message.getOldMeta())
- ) : null,
- opProtoSerializer.deserialize(message.getEffectiveOp()).getOp(),
- message.getNewParentId(),
- metaProtoSerializer.deserialize(message.getNewMeta()),
- message.getSelfId()
- );
- }
-
- @Override
- public JKleppmannTreeOpLogEffectP serialize(LogEffect object) {
- var builder = JKleppmannTreeOpLogEffectP.newBuilder();
- // FIXME: all these wrappers
- if (object.oldInfo() != null) {
- builder.setOldEffectiveMove(opProtoSerializer.serialize(new JKleppmannTreeOpWrapper(object.oldInfo().oldEffectiveMove())));
- builder.setOldParent(object.oldInfo().oldParent());
- builder.setOldMeta(metaProtoSerializer.serialize(object.oldInfo().oldMeta()));
- }
- builder.setEffectiveOp(opProtoSerializer.serialize(new JKleppmannTreeOpWrapper(object.effectiveOp())));
- builder.setNewParentId(object.newParentId());
- builder.setNewMeta(metaProtoSerializer.serialize(object.newMeta()));
- builder.setSelfId(object.childId());
- return builder.build();
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeNodeProtoSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeNodeProtoSerializer.java
deleted file mode 100644
index 8e0e36f6..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeNodeProtoSerializer.java
+++ /dev/null
@@ -1,56 +0,0 @@
-package com.usatiuk.dhfs.objects.jkleppmanntree.serializers;
-
-import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
-import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeOpWrapper;
-import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode;
-import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
-import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaP;
-import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeP;
-import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeOpP;
-import com.usatiuk.kleppmanntree.TreeNode;
-import jakarta.inject.Inject;
-import jakarta.inject.Singleton;
-
-import java.util.HashMap;
-import java.util.UUID;
-
-@Singleton
-public class JKleppmannTreeNodeProtoSerializer implements ProtoSerializer {
- @Inject
- ProtoSerializer metaProtoSerializer;
- @Inject
- ProtoSerializer opProtoSerializer;
-
- @Override
- public JKleppmannTreeNode deserialize(JKleppmannTreeNodeP message) {
- var children = new HashMap();
- message.getChildrenList().forEach(child -> children.put(child.getKey(), child.getValue()));
- var node = new TreeNode(
- message.getId(),
- message.hasParent() ? message.getParent() : null,
- message.hasMeta() ? metaProtoSerializer.deserialize(message.getMeta()) : null,
- children
- );
- if (message.hasLastEffectiveOp())
- node.setLastEffectiveOp((opProtoSerializer.deserialize(message.getLastEffectiveOp())).getOp());
- return new JKleppmannTreeNode(node);
- }
-
- @Override
- public JKleppmannTreeNodeP serialize(JKleppmannTreeNode object) {
- var builder = JKleppmannTreeNodeP.newBuilder().setId(object.getNode().getId());
- if (object.getNode().getParent() != null)
- builder.setParent(object.getNode().getParent());
- if (object.getNode().getMeta() != null) {
- builder.setMeta(metaProtoSerializer.serialize(object.getNode().getMeta()));
- }
- if (object.getNode().getLastEffectiveOp() != null)
- builder.setLastEffectiveOp(
- opProtoSerializer.serialize(new JKleppmannTreeOpWrapper(object.getNode().getLastEffectiveOp()))
- );
- object.getNode().getChildren().forEach((k, v) -> {
- builder.addChildrenBuilder().setKey(k).setValue(v);
- });
- return builder.build();
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeOpProtoSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeOpProtoSerializer.java
deleted file mode 100644
index 4e7c8c43..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreeOpProtoSerializer.java
+++ /dev/null
@@ -1,40 +0,0 @@
-package com.usatiuk.dhfs.objects.jkleppmanntree.serializers;
-
-import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
-import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeOpWrapper;
-import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
-import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaP;
-import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeOpP;
-import com.usatiuk.kleppmanntree.CombinedTimestamp;
-import com.usatiuk.kleppmanntree.OpMove;
-import jakarta.inject.Inject;
-import jakarta.inject.Singleton;
-
-import java.util.UUID;
-
-@Singleton
-public class JKleppmannTreeOpProtoSerializer implements ProtoSerializer {
- @Inject
- ProtoSerializer metaProtoSerializer;
-
- @Override
- public JKleppmannTreeOpWrapper deserialize(JKleppmannTreeOpP message) {
- return new JKleppmannTreeOpWrapper(new OpMove<>(
- new CombinedTimestamp<>(message.getTimestamp(), UUID.fromString(message.getPeer())), message.getNewParentId(),
- message.hasMeta() ? metaProtoSerializer.deserialize(message.getMeta()) : null,
- message.getChild()
- ));
- }
-
- @Override
- public JKleppmannTreeOpP serialize(JKleppmannTreeOpWrapper object) {
- var builder = JKleppmannTreeOpP.newBuilder();
- builder.setTimestamp(object.getOp().timestamp().timestamp())
- .setPeer(object.getOp().timestamp().nodeId().toString())
- .setNewParentId(object.getOp().newParentId())
- .setChild(object.getOp().childId());
- if (object.getOp().newMeta() != null)
- builder.setMeta(metaProtoSerializer.serialize(object.getOp().newMeta()));
- return builder.build();
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePeriodicPushOpProtoSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePeriodicPushOpProtoSerializer.java
deleted file mode 100644
index 24bd6a66..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePeriodicPushOpProtoSerializer.java
+++ /dev/null
@@ -1,22 +0,0 @@
-package com.usatiuk.dhfs.objects.jkleppmanntree.serializers;
-
-import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
-import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreePeriodicPushOp;
-import com.usatiuk.dhfs.objects.repository.JKleppmannTreePeriodicPushOpP;
-import jakarta.inject.Singleton;
-
-import java.util.UUID;
-
-@Singleton
-public class JKleppmannTreePeriodicPushOpProtoSerializer implements ProtoSerializer {
-
- @Override
- public JKleppmannTreePeriodicPushOp deserialize(JKleppmannTreePeriodicPushOpP message) {
- return new JKleppmannTreePeriodicPushOp(UUID.fromString(message.getFromUuid()), message.getTimestamp());
- }
-
- @Override
- public JKleppmannTreePeriodicPushOpP serialize(JKleppmannTreePeriodicPushOp object) {
- return JKleppmannTreePeriodicPushOpP.newBuilder().setTimestamp(object.getTimestamp()).setFromUuid(object.getFrom().toString()).build();
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePersistentDataProtoSerializer.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePersistentDataProtoSerializer.java
deleted file mode 100644
index 75cdab5b..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/serializers/JKleppmannTreePersistentDataProtoSerializer.java
+++ /dev/null
@@ -1,86 +0,0 @@
-package com.usatiuk.dhfs.objects.jkleppmanntree.serializers;
-
-import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
-import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeOpWrapper;
-import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
-import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreePersistentData;
-import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeOpLogEffectP;
-import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeOpP;
-import com.usatiuk.dhfs.objects.persistence.JKleppmannTreePersistentDataP;
-import com.usatiuk.kleppmanntree.*;
-import jakarta.inject.Inject;
-import jakarta.inject.Singleton;
-
-import java.util.HashMap;
-import java.util.TreeMap;
-import java.util.UUID;
-
-@Singleton
-public class JKleppmannTreePersistentDataProtoSerializer implements ProtoSerializer {
- @Inject
- ProtoSerializer opProtoSerializer;
- @Inject
- ProtoSerializer> effectProtoSerializer;
-
- @Override
- public JKleppmannTreePersistentData deserialize(JKleppmannTreePersistentDataP message) {
- HashMap, OpMove>> queues = new HashMap<>();
-
- for (var q : message.getQueuesList()) {
- var qmap = new TreeMap, OpMove>();
- for (var o : q.getEntriesList()) {
- var op = (JKleppmannTreeOpWrapper) opProtoSerializer.deserialize(o.getOp());
- qmap.put(new CombinedTimestamp<>(o.getClock(), UUID.fromString(o.getUuid())), op.getOp());
- }
- queues.put(UUID.fromString(q.getNode()), qmap);
- }
-
- var log = new HashMap();
-
- for (var l : message.getPeerLogList()) {
- log.put(UUID.fromString(l.getHost()), l.getTimestamp());
- }
-
- var opLog = new TreeMap, LogRecord>();
- for (var l : message.getOpLogList()) {
- opLog.put(new CombinedTimestamp<>(l.getClock(), UUID.fromString(l.getUuid())),
- new LogRecord<>(opProtoSerializer.deserialize(l.getOp()).getOp(), l.getEffectsList().stream().map(effectProtoSerializer::deserialize).toList())
- );
- }
-
- return new JKleppmannTreePersistentData(
- message.getTreeName(),
- new AtomicClock(message.getClock()),
- queues,
- log,
- opLog
- );
- }
-
- @Override
- public JKleppmannTreePersistentDataP serialize(JKleppmannTreePersistentData object) {
- var builder = JKleppmannTreePersistentDataP.newBuilder()
- .setTreeName(object.getTreeName())
- .setClock(object.getClock().peekTimestamp());
- for (var q : object.getQueues().entrySet()) {
- if (q.getValue().isEmpty()) continue;
- var qb = builder.addQueuesBuilder();
- qb.setNode(q.getKey().toString());
- for (var e : q.getValue().entrySet()) {
- qb.addEntriesBuilder().setClock(e.getKey().timestamp()).setUuid(e.getKey().nodeId().toString())
- .setOp((JKleppmannTreeOpP) opProtoSerializer.serialize(new JKleppmannTreeOpWrapper(e.getValue())));
- }
- }
- for (var peerLogEntry : object.getPeerTimestampLog().entrySet()) {
- builder.addPeerLogBuilder().setHost(peerLogEntry.getKey().toString()).setTimestamp(peerLogEntry.getValue());
- }
- for (var e : object.getLog().entrySet()) {
- builder.addOpLogBuilder()
- .setClock(e.getKey().timestamp())
- .setUuid(e.getKey().nodeId().toString())
- .setOp(opProtoSerializer.serialize(new JKleppmannTreeOpWrapper(e.getValue().op())))
- .addAllEffects(e.getValue().effects().stream().map(effectProtoSerializer::serialize).toList());
- }
- return builder.build();
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java
deleted file mode 100644
index 0146da88..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNode.java
+++ /dev/null
@@ -1,45 +0,0 @@
-package com.usatiuk.dhfs.objects.jkleppmanntree.structs;
-
-import com.usatiuk.dhfs.objects.jrepository.JObjectData;
-import com.usatiuk.dhfs.objects.jrepository.OnlyLocal;
-import com.usatiuk.dhfs.objects.repository.ConflictResolver;
-import com.usatiuk.kleppmanntree.TreeNode;
-import lombok.Getter;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.UUID;
-
-// FIXME: Ideally this is two classes?
-@OnlyLocal
-public class JKleppmannTreeNode extends JObjectData {
- @Getter
- final TreeNode _node;
-
- public JKleppmannTreeNode(TreeNode node) {
- _node = node;
- }
-
- @Override
- public String getName() {
- return _node.getId();
- }
-
- @Override
- public Class extends ConflictResolver> getConflictResolver() {
- return null;
- }
-
- @Override
- public Collection extractRefs() {
- if (_node.getMeta() instanceof JKleppmannTreeNodeMetaFile)
- return List.of(((JKleppmannTreeNodeMetaFile) _node.getMeta()).getFileIno());
- return Collections.unmodifiableCollection(_node.getChildren().values());
- }
-
- @Override
- public Class extends JObjectData> getRefType() {
- return JObjectData.class;
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java
deleted file mode 100644
index 2ea7d27f..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMeta.java
+++ /dev/null
@@ -1,31 +0,0 @@
-package com.usatiuk.dhfs.objects.jkleppmanntree.structs;
-
-import com.usatiuk.autoprotomap.runtime.ProtoMirror;
-import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaP;
-import com.usatiuk.kleppmanntree.NodeMeta;
-import lombok.Getter;
-
-import java.util.Objects;
-
-@ProtoMirror(JKleppmannTreeNodeMetaP.class)
-public abstract class JKleppmannTreeNodeMeta implements NodeMeta {
- @Getter
- private final String _name;
-
- public JKleppmannTreeNodeMeta(String name) {_name = name;}
-
- public abstract JKleppmannTreeNodeMeta withName(String name);
-
- @Override
- public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null || getClass() != o.getClass()) return false;
- JKleppmannTreeNodeMeta that = (JKleppmannTreeNodeMeta) o;
- return Objects.equals(_name, that._name);
- }
-
- @Override
- public int hashCode() {
- return Objects.hashCode(_name);
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java
deleted file mode 100644
index 79882017..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaDirectory.java
+++ /dev/null
@@ -1,16 +0,0 @@
-package com.usatiuk.dhfs.objects.jkleppmanntree.structs;
-
-import com.usatiuk.autoprotomap.runtime.ProtoMirror;
-import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaDirectoryP;
-
-@ProtoMirror(JKleppmannTreeNodeMetaDirectoryP.class)
-public class JKleppmannTreeNodeMetaDirectory extends JKleppmannTreeNodeMeta {
- public JKleppmannTreeNodeMetaDirectory(String name) {
- super(name);
- }
-
- @Override
- public JKleppmannTreeNodeMeta withName(String name) {
- return new JKleppmannTreeNodeMetaDirectory(name);
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java
deleted file mode 100644
index 124cd51d..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreeNodeMetaFile.java
+++ /dev/null
@@ -1,37 +0,0 @@
-package com.usatiuk.dhfs.objects.jkleppmanntree.structs;
-
-import com.usatiuk.autoprotomap.runtime.ProtoMirror;
-import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaFileP;
-import lombok.Getter;
-
-import java.util.Objects;
-
-@ProtoMirror(JKleppmannTreeNodeMetaFileP.class)
-public class JKleppmannTreeNodeMetaFile extends JKleppmannTreeNodeMeta {
- @Getter
- private final String _fileIno;
-
- public JKleppmannTreeNodeMetaFile(String name, String fileIno) {
- super(name);
- _fileIno = fileIno;
- }
-
- @Override
- public JKleppmannTreeNodeMeta withName(String name) {
- return new JKleppmannTreeNodeMetaFile(name, _fileIno);
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null || getClass() != o.getClass()) return false;
- if (!super.equals(o)) return false;
- JKleppmannTreeNodeMetaFile that = (JKleppmannTreeNodeMetaFile) o;
- return Objects.equals(_fileIno, that._fileIno);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(super.hashCode(), _fileIno);
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java
deleted file mode 100644
index d6881d5b..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jkleppmanntree/structs/JKleppmannTreePersistentData.java
+++ /dev/null
@@ -1,88 +0,0 @@
-package com.usatiuk.dhfs.objects.jkleppmanntree.structs;
-
-import com.usatiuk.dhfs.objects.jrepository.JObjectData;
-import com.usatiuk.dhfs.objects.jrepository.OnlyLocal;
-import com.usatiuk.dhfs.objects.repository.ConflictResolver;
-import com.usatiuk.kleppmanntree.AtomicClock;
-import com.usatiuk.kleppmanntree.CombinedTimestamp;
-import com.usatiuk.kleppmanntree.LogRecord;
-import com.usatiuk.kleppmanntree.OpMove;
-import lombok.Getter;
-
-import java.util.*;
-
-@OnlyLocal
-public class JKleppmannTreePersistentData extends JObjectData {
- private final String _treeName;
- @Getter
- private final AtomicClock _clock;
- @Getter
- private final HashMap, OpMove>> _queues;
- @Getter
- private final HashMap _peerTimestampLog;
- @Getter
- private final TreeMap, LogRecord> _log;
-
- public JKleppmannTreePersistentData(String treeName, AtomicClock clock,
- HashMap, OpMove>> queues,
- HashMap peerTimestampLog, TreeMap, LogRecord> log) {
- _treeName = treeName;
- _clock = clock;
- _queues = queues;
- _peerTimestampLog = peerTimestampLog;
- _log = log;
- }
-
- public JKleppmannTreePersistentData(String treeName) {
- _treeName = treeName;
- _clock = new AtomicClock(1);
- _queues = new HashMap<>();
- _peerTimestampLog = new HashMap<>();
- _log = new TreeMap<>();
- }
-
- public static String nameFromTreeName(String treeName) {
- return treeName + "_pd";
- }
-
- public void recordOp(UUID host, OpMove opMove) {
- _queues.computeIfAbsent(host, h -> new TreeMap<>());
- _queues.get(host).put(opMove.timestamp(), opMove);
- }
-
- public void removeOp(UUID host, OpMove opMove) {
- _queues.get(host).remove(opMove.timestamp(), opMove);
- }
-
- public void recordOp(Collection hosts, OpMove opMove) {
- for (var u : hosts) {
- recordOp(u, opMove);
- }
- }
-
- public void removeOp(Collection hosts, OpMove opMove) {
- for (var u : hosts) {
- removeOp(u, opMove);
- }
- }
-
-
- @Override
- public String getName() {
- return nameFromTreeName(_treeName);
- }
-
- public String getTreeName() {
- return _treeName;
- }
-
- @Override
- public Class extends ConflictResolver> getConflictResolver() {
- return null;
- }
-
- @Override
- public Collection extractRefs() {
- return List.of();
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/AssumedUnique.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/AssumedUnique.java
deleted file mode 100644
index 47e026c8..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/AssumedUnique.java
+++ /dev/null
@@ -1,11 +0,0 @@
-package com.usatiuk.dhfs.objects.jrepository;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-@Retention(RetentionPolicy.RUNTIME)
-@Target(ElementType.TYPE)
-public @interface AssumedUnique {
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/DeletedObjectAccessException.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/DeletedObjectAccessException.java
deleted file mode 100644
index 5557adc2..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/DeletedObjectAccessException.java
+++ /dev/null
@@ -1,4 +0,0 @@
-package com.usatiuk.dhfs.objects.jrepository;
-
-public class DeletedObjectAccessException extends RuntimeException {
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JMutator.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JMutator.java
deleted file mode 100644
index 4f0a1be7..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JMutator.java
+++ /dev/null
@@ -1,7 +0,0 @@
-package com.usatiuk.dhfs.objects.jrepository;
-
-public interface JMutator {
- boolean mutate(T object);
-
- void revert(T object);
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObject.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObject.java
deleted file mode 100644
index 1d0a9ca0..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObject.java
+++ /dev/null
@@ -1,87 +0,0 @@
-package com.usatiuk.dhfs.objects.jrepository;
-
-import com.usatiuk.dhfs.utils.VoidFn;
-
-public abstract class JObject {
- public abstract ObjectMetadata getMeta();
-
- public abstract T getData();
-
- abstract void rollback(ObjectMetadata meta, JObjectData data);
-
- public abstract R runReadLocked(JObjectManager.ResolutionStrategy resolutionStrategy, JObjectManager.ObjectFnRead fn);
-
- // Note: this is expensive
- public abstract R runWriteLocked(JObjectManager.ResolutionStrategy resolutionStrategy, JObjectManager.ObjectFnWrite fn);
-
- public void runReadLockedVoid(JObjectManager.ResolutionStrategy resolutionStrategy, JObjectManager.ObjectFnReadVoid fn) {
- runReadLocked(resolutionStrategy, (m, d) -> {
- fn.apply(m, d);
- return null;
- });
- }
-
- public void runWriteLockedVoid(JObjectManager.ResolutionStrategy resolutionStrategy, JObjectManager.ObjectFnWriteVoid fn) {
- runWriteLocked(resolutionStrategy, (m, d, b, v) -> {
- fn.apply(m, d, b, v);
- return null;
- });
- }
-
- public JObject extends X> as(Class klass) {
- if (klass.isAssignableFrom(getMeta().getKnownClass())) return (JObject extends X>) this;
- throw new IllegalStateException("Class mismatch for " + getMeta().getName() + " got: " + getMeta().getKnownClass());
- }
-
- public JObject local() {
- tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
- if (getData() == null)
- throw new IllegalStateException("Data missing for " + getMeta().getName());
- return this;
- }
-
- public JObject remote() {
- tryResolve(JObjectManager.ResolutionStrategy.REMOTE);
- if (getData() == null)
- throw new IllegalStateException("Data missing for " + getMeta().getName());
- return this;
- }
-
- public abstract void mutate(JMutator super T> mutator);
-
- public abstract boolean tryResolve(JObjectManager.ResolutionStrategy resolutionStrategy);
-
- public abstract void externalResolution(JObjectData data);
-
- public abstract void rwLock();
-
- public abstract boolean tryRwLock();
-
- public abstract void rwLockNoCopy();
-
- public abstract void rwUnlock();
-
- public abstract void drop();
-
- abstract boolean haveRwLock();
-
- public abstract void assertRwLock();
-
- public abstract void doDelete();
-
- public abstract void markSeen();
-
- public abstract void rLock();
-
- public abstract void rUnlock();
-
- public abstract void bumpVer();
-
- public abstract void commitFence();
-
- public abstract void commitFenceAsync(VoidFn callback);
-
- public abstract int estimateSize();
-
- abstract boolean updateDeletionState();
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectData.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectData.java
deleted file mode 100644
index 9afa248e..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectData.java
+++ /dev/null
@@ -1,29 +0,0 @@
-package com.usatiuk.dhfs.objects.jrepository;
-
-import com.usatiuk.autoprotomap.runtime.ProtoMirror;
-import com.usatiuk.dhfs.objects.persistence.JObjectDataP;
-import com.usatiuk.dhfs.objects.repository.ConflictResolver;
-
-import java.util.Collection;
-import java.util.List;
-
-@ProtoMirror(JObjectDataP.class)
-public abstract class JObjectData {
- public abstract String getName();
-
- public Class extends ConflictResolver> getConflictResolver() {
- throw new UnsupportedOperationException();
- }
-
- public Class extends JObjectData> getRefType() {
- throw new UnsupportedOperationException("This object shouldn't have refs");
- }
-
- public Collection extractRefs() {
- return List.of();
- }
-
- public int estimateSize() {
- return 0;
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectKey.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectKey.java
deleted file mode 100644
index 41558e57..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectKey.java
+++ /dev/null
@@ -1,4 +0,0 @@
-package com.usatiuk.dhfs.objects.jrepository;
-
-public record JObjectKey(short type) {
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectLRU.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectLRU.java
deleted file mode 100644
index 4194a807..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectLRU.java
+++ /dev/null
@@ -1,95 +0,0 @@
-package com.usatiuk.dhfs.objects.jrepository;
-
-import io.quarkus.logging.Log;
-import io.quarkus.runtime.Shutdown;
-import io.quarkus.runtime.Startup;
-import jakarta.enterprise.context.ApplicationScoped;
-import org.eclipse.microprofile.config.inject.ConfigProperty;
-
-import java.util.LinkedHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-
-@ApplicationScoped
-public class JObjectLRU {
- private final LinkedHashMap, Long> _cache = new LinkedHashMap<>();
- @ConfigProperty(name = "dhfs.objects.lru.limit")
- long sizeLimit;
- @ConfigProperty(name = "dhfs.objects.lru.print-stats")
- boolean printStats;
- private long _curSize = 0;
- private long _evict = 0;
- private ExecutorService _statusExecutor = null;
-
- @Startup
- void init() {
- if (printStats) {
- _statusExecutor = Executors.newSingleThreadExecutor();
- _statusExecutor.submit(() -> {
- try {
- while (true) {
- Thread.sleep(10000);
- if (_curSize > 0)
- Log.info("Cache status: size="
- + _curSize / 1024 / 1024 + "MB"
- + " evicted=" + _evict);
- _evict = 0;
- if (Log.isTraceEnabled()) {
- long realSize = 0;
- synchronized (_cache) {
- for (JObject> object : _cache.keySet()) {
- realSize += object.estimateSize();
- }
- Log.info("Cache status: real size="
- + realSize / 1024 / 1024 + "MB" + " entries=" + _cache.size());
- }
- }
- }
- } catch (InterruptedException ignored) {
- }
- });
- }
- }
-
- @Shutdown
- void shutdown() {
- if (_statusExecutor != null)
- _statusExecutor.shutdownNow();
- }
-
- public void notifyAccess(JObject> obj) {
- if (obj.getData() == null) return;
- long size = obj.estimateSize();
- synchronized (_cache) {
- _curSize += size;
- var old = _cache.putLast(obj, size);
- if (old != null)
- _curSize -= old;
-
- while (_curSize >= sizeLimit) {
- var del = _cache.pollFirstEntry();
- _curSize -= del.getValue();
- _evict++;
- }
- }
- }
-
- public void updateSize(JObject> obj) {
- long size = obj.estimateSize();
- synchronized (_cache) {
- var old = _cache.replace(obj, size);
- if (old != null) {
- _curSize += size;
- _curSize -= old;
- } else {
- return;
- }
-
- while (_curSize >= sizeLimit) {
- var del = _cache.pollFirstEntry();
- _curSize -= del.getValue();
- _evict++;
- }
- }
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManager.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManager.java
deleted file mode 100644
index 377c9533..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManager.java
+++ /dev/null
@@ -1,63 +0,0 @@
-package com.usatiuk.dhfs.objects.jrepository;
-
-import com.usatiuk.dhfs.utils.VoidFn;
-import jakarta.annotation.Nullable;
-
-import java.util.Collection;
-import java.util.Optional;
-
-public interface JObjectManager {
- // FIXME:
- void runWriteListeners(JObject> obj, boolean metaChanged, boolean dataChanged);
-
- void registerWriteListener(Class klass, WriteListenerFn fn);
-
- void registerMetaWriteListener(Class klass, WriteListenerFn fn);
-
- Optional> get(String name);
-
- Collection findAll();
-
- // Put a new object
- JObject put(T object, Optional parent);
-
- JObject putLocked(T object, Optional parent);
-
- // Get an object with a name if it exists, otherwise create new one based on metadata
- // Should be used when working with objects referenced from the outside
- JObject> getOrPut(String name, Class extends JObjectData> klass, Optional parent);
-
- JObject> getOrPutLocked(String name, Class extends JObjectData> klass, Optional parent);
-
- enum ResolutionStrategy {
- NO_RESOLUTION,
- LOCAL_ONLY,
- REMOTE
- }
-
- @FunctionalInterface
- interface WriteListenerFn {
- void apply(JObject> obj);
- }
-
- @FunctionalInterface
- interface ObjectFnRead {
- R apply(ObjectMetadata meta, @Nullable T data);
- }
-
- @FunctionalInterface
- interface ObjectFnWrite {
- R apply(ObjectMetadata indexData, @Nullable T data, VoidFn bump, VoidFn invalidate);
- }
-
- @FunctionalInterface
- interface ObjectFnReadVoid {
- void apply(ObjectMetadata meta, @Nullable T data);
- }
-
- @FunctionalInterface
- interface ObjectFnWriteVoid {
- void apply(ObjectMetadata indexData, @Nullable T data, VoidFn bump, VoidFn invalidate);
- }
-
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManagerImpl.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManagerImpl.java
deleted file mode 100644
index 5cd3e2ce..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectManagerImpl.java
+++ /dev/null
@@ -1,795 +0,0 @@
-package com.usatiuk.dhfs.objects.jrepository;
-
-import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
-import com.usatiuk.dhfs.objects.persistence.JObjectDataP;
-import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP;
-import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService;
-import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient;
-import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService;
-import com.usatiuk.dhfs.objects.repository.persistence.ObjectPersistentStore;
-import com.usatiuk.dhfs.utils.VoidFn;
-import io.grpc.Status;
-import io.grpc.StatusRuntimeException;
-import io.quarkus.logging.Log;
-import io.quarkus.runtime.Shutdown;
-import io.quarkus.runtime.Startup;
-import jakarta.inject.Inject;
-import jakarta.inject.Singleton;
-import lombok.Getter;
-import org.apache.commons.collections4.MultiValuedMap;
-import org.apache.commons.collections4.multimap.ArrayListValuedHashMap;
-import org.eclipse.microprofile.config.inject.ConfigProperty;
-
-import java.lang.ref.ReferenceQueue;
-import java.lang.ref.WeakReference;
-import java.util.*;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.function.Supplier;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-@Singleton
-public class JObjectManagerImpl implements JObjectManager {
- private final MultiValuedMap, WriteListenerFn> _writeListeners
- = new ArrayListValuedHashMap<>();
- private final MultiValuedMap, WriteListenerFn> _metaWriteListeners
- = new ArrayListValuedHashMap<>();
- private final ConcurrentHashMap _map = new ConcurrentHashMap<>();
- private final ReferenceQueue> _refQueue = new ReferenceQueue<>();
- @Inject
- ObjectPersistentStore objectPersistentStore;
- @Inject
- RemoteObjectServiceClient remoteObjectServiceClient;
- @Inject
- InvalidationQueueService invalidationQueueService;
- @Inject
- PersistentPeerDataService persistentPeerDataService;
- @Inject
- JObjectRefProcessor jObjectRefProcessor;
- @Inject
- SoftJObjectFactory softJObjectFactory;
- @Inject
- JObjectLRU jObjectLRU;
- @Inject
- JObjectTxManager jObjectTxManager;
- @Inject
- TxWriteback txWriteback;
-
- @Inject
- ProtoSerializer metaProtoSerializer;
- @Inject
- ProtoSerializer dataProtoSerializer;
-
- @ConfigProperty(name = "dhfs.objects.ref_verification")
- boolean refVerification;
- @ConfigProperty(name = "dhfs.objects.lock_timeout_secs")
- int lockTimeoutSecs;
- private Thread _refCleanupThread;
-
- @Override
- public void runWriteListeners(JObject> obj, boolean metaChanged, boolean dataChanged) {
- if (metaChanged)
- for (var t : _metaWriteListeners.keySet()) { // FIXME:?
- if (t.isAssignableFrom(obj.getMeta().getKnownClass()))
- for (var cb : _metaWriteListeners.get(t))
- cb.apply(obj);
- }
- if (dataChanged)
- for (var t : _writeListeners.keySet()) { // FIXME:?
- if (t.isAssignableFrom(obj.getMeta().getKnownClass()))
- for (var cb : _writeListeners.get(t))
- cb.apply(obj);
- }
- }
-
- @Override
- public void registerWriteListener(Class klass, WriteListenerFn fn) {
- _writeListeners.put(klass, fn);
- }
-
- @Override
- public void registerMetaWriteListener(Class klass, WriteListenerFn fn) {
- _metaWriteListeners.put(klass, fn);
- }
-
- @Startup
- void init() {
- _refCleanupThread = new Thread(this::refCleanupThread);
- _refCleanupThread.setName("JObject ref cleanup thread");
- _refCleanupThread.start();
- }
-
- @Shutdown
- void shutdown() throws InterruptedException {
- _refCleanupThread.interrupt();
- _refCleanupThread.join();
- }
-
- private void refCleanupThread() {
- try {
- while (!Thread.interrupted()) {
- NamedWeakReference cur = (NamedWeakReference) _refQueue.remove();
- _map.remove(cur._key, cur);
- }
- } catch (InterruptedException ignored) {
- }
- Log.info("Ref cleanup thread exiting");
- }
-
- private JObjectImpl> getFromMap(String key) {
- var ret = _map.get(key);
- if (ret != null && ret.get() != null) {
- return ret.get();
- }
- return null;
- }
-
- @Override
- public Optional> get(String name) {
- {
- var inMap = getFromMap(name);
- if (inMap != null) {
- jObjectLRU.notifyAccess(inMap);
- return Optional.of(inMap);
- }
- }
-
- ObjectMetadataP readMd;
- try {
- readMd = objectPersistentStore.readObjectMeta(name);
- } catch (StatusRuntimeException ex) {
- if (ex.getStatus().getCode().equals(Status.NOT_FOUND.getCode()))
- return Optional.empty();
- throw ex;
- }
- var meta = metaProtoSerializer.deserialize(readMd);
- if (!(meta instanceof ObjectMetadata))
- throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("Unexpected metadata type for " + name));
-
- if (((ObjectMetadata) meta).isDeleted()) {
- Log.warn("Deleted meta on disk for " + name);
- return Optional.empty();
- }
-
- JObjectImpl> ret = null;
- var newObj = new JObjectImpl<>((ObjectMetadata) meta);
- while (ret == null) {
- var ref = _map.computeIfAbsent(name, k -> new NamedWeakReference(newObj, _refQueue));
- if (ref.get() == null) _map.remove(name, ref);
- else ret = ref.get();
- }
- jObjectLRU.notifyAccess(ret);
- return Optional.of(ret);
- }
-
- @Override
- public Collection findAll() {
- var out = _map.values().stream().map(WeakReference::get)
- .filter(Objects::nonNull)
- .map(JObjectImpl::getMeta).map(ObjectMetadata::getName)
- .collect(Collectors.toCollection((Supplier>) LinkedHashSet::new));
- out.addAll(objectPersistentStore.findAllObjects());
- return out;
- }
-
- public JObjectImpl putImpl(D object, Optional parent, boolean lock) {
- while (true) {
- JObjectImpl> ret;
- JObjectImpl> newObj = null;
- try {
- ret = getFromMap(object.getName());
- if (ret != null) {
- if (!object.getClass().isAnnotationPresent(AssumedUnique.class))
- throw new IllegalArgumentException("Trying to insert different object with same key");
- } else {
- newObj = new JObjectImpl(object.getName(), persistentPeerDataService.getSelfUuid(), object);
- newObj.rwLock();
- while (ret == null) {
- JObjectImpl> finalNewObj = newObj;
- var ref = _map.computeIfAbsent(object.getName(), k -> new NamedWeakReference(finalNewObj, _refQueue));
- if (ref.get() == null) _map.remove(object.getName(), ref);
- else ret = ref.get();
- }
- if (ret != newObj) {
- newObj.drop();
- continue;
- }
- }
- JObjectImpl finalRet = (JObjectImpl) ret;
-
- boolean shouldWrite = false;
- try {
- shouldWrite = ret.runReadLocked(ResolutionStrategy.NO_RESOLUTION, (m, d) -> {
- return (object.getClass().isAnnotationPresent(PushResolution.class)
- && object.getClass().isAnnotationPresent(AssumedUnique.class)
- && finalRet.getData() == null && !finalRet.getMeta().isHaveLocalCopy())
- || (parent.isEmpty() && !m.isFrozen()) || (parent.isPresent() && !m.checkRef(parent.get()));
- });
- } catch (DeletedObjectAccessException dex) {
- shouldWrite = true;
- }
-
- if (shouldWrite)
- ret.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, i) -> {
- if (object.getClass().isAnnotationPresent(PushResolution.class)
- && object.getClass().isAnnotationPresent(AssumedUnique.class)
- && finalRet.getData() == null && !finalRet.getMeta().isHaveLocalCopy()) {
- finalRet.externalResolution(object);
- }
-
- if (parent.isPresent()) {
- m.addRef(parent.get());
- if (m.isFrozen())
- m.unfreeze();
- } else {
- m.freeze();
- }
-
- return null;
- });
- } finally {
- // FIXME?
- if (newObj != null)
- newObj.forceInvalidate();
- }
- if (newObj == null) {
- jObjectLRU.notifyAccess(ret);
- if (lock)
- ret.rwLock();
- }
- if (newObj != null && !lock)
- newObj.rwUnlock();
- return (JObjectImpl) ret;
- }
- }
-
- @Override
- public JObjectImpl putLocked(D object, Optional parent) {
- return putImpl(object, parent, true);
- }
-
- @Override
- public JObjectImpl put(D object, Optional parent) {
- return putImpl(object, parent, false);
- }
-
- public JObject> getOrPutImpl(String name, Class extends JObjectData> klass, Optional parent, boolean lock) {
- while (true) {
- var got = get(name).orElse(null);
-
- if (got != null) {
- {
- boolean shouldWrite = false;
- try {
- // These two mutate in one direction only, it's ok to not take the lock
- var gotKlass = got.getMeta().getKnownClass();
- var gotSeen = got.getMeta().isSeen();
- shouldWrite
- = !(((gotKlass.equals(klass))
- || (klass.isAssignableFrom(gotKlass)))
- && gotSeen);
- } catch (DeletedObjectAccessException dex) {
- shouldWrite = true;
- }
- if (shouldWrite || lock) {
- got.rwLock();
- try {
- var meta = got.getMeta();
- meta.narrowClass(klass);
- meta.markSeen();
- } finally {
- if (!lock) got.rwUnlock();
- }
- }
- }
-
- parent.ifPresent(s -> {
- boolean shouldWrite = false;
- try {
- shouldWrite = !got.runReadLocked(ResolutionStrategy.NO_RESOLUTION, (m, d) -> m.checkRef(s));
- } catch (DeletedObjectAccessException dex) {
- shouldWrite = true;
- }
-
- if (!shouldWrite) return;
-
- got.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, i) -> {
- if (m.isFrozen())
- m.unfreeze();
- m.addRef(s);
- return true;
- });
- });
- return got;
- }
-
- JObjectImpl> ret = null;
- var created = new JObjectImpl<>(new ObjectMetadata(name, false, klass));
- created.rwLock();
- while (ret == null) {
- var ref = _map.computeIfAbsent(name, k -> new NamedWeakReference(created, _refQueue));
- if (ref.get() == null) _map.remove(name, ref);
- else ret = ref.get();
- }
- if (ret != created) {
- created.drop();
- continue;
- }
-
- created.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, i) -> {
- parent.ifPresent(m::addRef);
- m.markSeen();
- return null;
- });
- if (!lock)
- created.rwUnlock();
- return created;
- }
- }
-
- @Override
- public JObject> getOrPutLocked(String name, Class extends JObjectData> klass, Optional parent) {
- return getOrPutImpl(name, klass, parent, true);
- }
-
- @Override
- public JObject> getOrPut(String name, Class extends JObjectData> klass, Optional parent) {
- return getOrPutImpl(name, klass, parent, false);
- }
-
- private static class NamedWeakReference extends WeakReference> {
- @Getter
- final String _key;
-
- public NamedWeakReference(JObjectImpl> target, ReferenceQueue> q) {
- super(target, q);
- this._key = target.getMeta().getName();
- }
- }
-
- public class JObjectImpl extends JObject {
- private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock();
- private final AtomicReference _dataPart = new AtomicReference<>();
- private ObjectMetadata _metaPart;
-
- // Create a new object
- protected JObjectImpl(String name, UUID selfUuid, T obj) {
- _metaPart = new ObjectMetadata(name, false, obj.getClass());
- _metaPart.setHaveLocalCopy(true);
- _dataPart.set(obj);
- _metaPart.getChangelog().put(selfUuid, 1L);
- if (Log.isTraceEnabled())
- Log.trace("new JObject: " + getMeta().getName());
- }
-
- // Create an object from existing metadata
- protected JObjectImpl(ObjectMetadata objectMetadata) {
- _metaPart = objectMetadata;
- if (Log.isTraceEnabled())
- Log.trace("new JObject (ext): " + getMeta().getName());
- }
-
- @Override
- public T getData() {
- return _dataPart.get();
- }
-
- @Override
- void rollback(ObjectMetadata meta, JObjectData data) {
- _metaPart = meta;
- _dataPart.set((T) data);
- }
-
- @Override
- public ObjectMetadata getMeta() {
- return _metaPart;
- }
-
- @Override
- public void markSeen() {
- if (!_metaPart.isSeen()) {
- runWriteLocked(ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> {
- m.markSeen();
- return null;
- });
- }
- }
-
- private void tryRemoteResolve() {
- if (_dataPart.get() == null) {
- rwLock();
- try {
- tryLocalResolve();
- if (_dataPart.get() == null) {
- var res = resolveDataRemote();
- _metaPart.narrowClass(res.getClass());
- _dataPart.set((T) res);
- _metaPart.setHaveLocalCopy(true);
- hydrateRefs();
- } // _dataPart.get() == null
- } finally {
- rwUnlock();
- } // try
- } // _dataPart.get() == null
- }
-
- private void tryLocalResolve() {
- if (_dataPart.get() == null) {
- rLock();
- try {
- if (_dataPart.get() == null) {
- if (!getMeta().isHaveLocalCopy()) return;
- JObjectData res;
- try {
- res = resolveDataLocal();
- } catch (Exception e) {
- Log.error("Object " + _metaPart.getName() + " data couldn't be read but it should exist locally!", e);
- return;
- }
-
- if (_metaPart.getSavedRefs() != null && !_metaPart.getSavedRefs().isEmpty())
- throw new IllegalStateException("Object " + _metaPart.getName() + " has non-hydrated refs when written locally");
-
- _metaPart.narrowClass(res.getClass());
- if (_dataPart.compareAndSet(null, (T) res))
- onResolution();
- } // _dataPart.get() == null
- } finally {
- rUnlock();
- } // try
- } // _dataPart.get() == null
- }
-
- @Override
- public void externalResolution(JObjectData data) {
- assertRwLock();
- if (Log.isTraceEnabled())
- Log.trace("External resolution of " + getMeta().getName());
- if (_dataPart.get() != null)
- throw new IllegalStateException("Data is not null when recording external resolution of " + getMeta().getName());
- if (!data.getClass().isAnnotationPresent(PushResolution.class))
- throw new IllegalStateException("Expected external resolution only for classes with pushResolution " + getMeta().getName());
- _metaPart.narrowClass(data.getClass());
- _dataPart.set((T) data);
- _metaPart.setHaveLocalCopy(true);
- hydrateRefs();
- }
-
- public boolean tryRLock() {
- try {
- if (!_lock.readLock().tryLock(lockTimeoutSecs, TimeUnit.SECONDS))
- return false;
- if (_metaPart.isDeleted()) {
- _lock.readLock().unlock();
- throw new DeletedObjectAccessException();
- }
- return true;
- } catch (InterruptedException e) {
- throw new RuntimeException(e);
- }
- }
-
- boolean tryRwLockImpl(boolean block, boolean txCopy) {
- try {
- if (block) {
- if (!_lock.writeLock().tryLock(lockTimeoutSecs, TimeUnit.SECONDS))
- return false;
- } else {
- if (!_lock.writeLock().tryLock())
- return false;
- }
- try {
- // TODO: Fix putImpl
-// if (_metaPart.isDeleted())
-// throw new DeletedObjectAccessException();
-
- if (_lock.writeLock().getHoldCount() == 1) {
- jObjectTxManager.addToTx(this, txCopy);
- }
- } catch (Throwable t) {
- _lock.writeLock().unlock();
- throw t;
- }
- return true;
- } catch (InterruptedException e) {
- throw new RuntimeException(e);
- }
- }
-
- @Override
- public void rwLock() {
- if (!tryRwLockImpl(true, true))
- throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("Failed to acquire write lock for " + getMeta().getName()));
- }
-
- @Override
- public boolean tryRwLock() {
- return tryRwLockImpl(false, true);
- }
-
- @Override
- public void rwLockNoCopy() {
- if (!tryRwLockImpl(true, false))
- throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("Failed to acquire write lock for " + getMeta().getName()));
- }
-
- public void rLock() {
- if (!tryRLock())
- throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("Failed to acquire read lock for " + getMeta().getName()));
- }
-
- public void rUnlock() {
- _lock.readLock().unlock();
- }
-
- protected void forceInvalidate() {
- assertRwLock();
- jObjectTxManager.forceInvalidate(this);
- }
-
- public void rwUnlock() {
- int hc = _lock.writeLock().getHoldCount();
-
- _lock.writeLock().unlock();
-
- // FIXME: this relies on the transaction running
- if (hc == 2) {
- updateDeletionState();
- }
- }
-
- @Override
- public void drop() {
- if (_lock.writeLock().getHoldCount() < 2) {
- throw new IllegalStateException("Expected for object to be locked and in transaction");
- }
- _lock.writeLock().unlock();
- jObjectTxManager.drop(this);
- }
-
- public boolean haveRwLock() {
- return _lock.isWriteLockedByCurrentThread();
- }
-
- @Override
- public void assertRwLock() {
- if (!haveRwLock())
- throw new IllegalStateException("Expected to be write-locked there: " + getMeta().getName() + " " + Thread.currentThread().getName());
- }
-
- @Override
- public R runReadLocked(ResolutionStrategy resolutionStrategy, ObjectFnRead fn) {
- tryResolve(resolutionStrategy);
-
- rLock();
- try {
- return fn.apply(_metaPart, _dataPart.get());
- } finally {
- rUnlock();
- }
- }
-
- protected boolean isResolved() {
- return _dataPart.get() != null;
- }
-
- @Override
- public R runWriteLocked(ResolutionStrategy resolutionStrategy, ObjectFnWrite fn) {
- rwLock();
- try {
- tryResolve(resolutionStrategy);
- VoidFn invalidateFn = () -> {
- tryLocalResolve();
- backupRefs();
- _dataPart.set(null);
- removeLocal(_metaPart.getName());
- };
- return fn.apply(_metaPart, _dataPart.get(), this::bumpVer, invalidateFn);
- } finally {
- rwUnlock();
- }
- }
-
- @Override
- public void mutate(JMutator super T> mutator) {
- assertRwLock();
-
- if (getData() == null) throw new IllegalStateException("Resolve before mutate!");
-
- if (mutator.mutate(getData())) {
- bumpVer();
- jObjectTxManager.addMutator(this, mutator);
- }
- }
-
- public boolean tryResolve(ResolutionStrategy resolutionStrategy) {
- if (resolutionStrategy == ResolutionStrategy.LOCAL_ONLY ||
- resolutionStrategy == ResolutionStrategy.REMOTE)
- tryLocalResolve();
- if (resolutionStrategy == ResolutionStrategy.REMOTE) tryRemoteResolve();
-
- return _dataPart.get() != null;
- }
-
- @Override
- public void doDelete() {
- assertRwLock();
- getMeta().markDeleted();
- _dataPart.set(null);
- _metaPart.setHaveLocalCopy(false);
- _metaPart.setSavedRefs(new HashSet<>());
- }
-
- public void backupRefs() {
- assertRwLock();
- if (getData() != null) {
- if ((getMeta().getSavedRefs() != null) && (!getMeta().getSavedRefs().isEmpty())) {
- Log.error("Saved refs not empty for " + getMeta().getName() + " will clean");
- getMeta().setSavedRefs(null);
- }
- getMeta().setSavedRefs(new LinkedHashSet<>(getData().extractRefs()));
- }
- }
-
- public void hydrateRefs() {
- assertRwLock();
- if (getMeta().getSavedRefs() != null) {
- StringBuilder sb = new StringBuilder();
- sb.append("Hydrating refs for ").append(getMeta().getName()).append("\n");
- sb.append("Saved refs: ");
- getMeta().getSavedRefs().forEach(r -> sb.append(r).append(" "));
- sb.append("\nExtracted refs: ");
- var extracted = new LinkedHashSet<>(getData().extractRefs());
- extracted.forEach(r -> sb.append(r).append(" "));
- Log.debug(sb.toString());
- for (var r : getMeta().getSavedRefs()) {
- if (!extracted.contains(r))
- get(r).ifPresent(ro -> ro.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, i) -> {
- m.removeRef(getMeta().getName());
- return null;
- }));
- }
- for (var r : extracted) {
- if (!getMeta().getSavedRefs().contains(r)) {
- Log.trace("Hydrating ref " + r + " for " + getMeta().getName());
- getOrPut(r, getData().getRefType(), Optional.of(getMeta().getName()));
- }
- }
- getMeta().setSavedRefs(null);
- }
- }
-
- @Override
- public boolean updateDeletionState() {
- assertRwLock();
-
- if (!getMeta().isDeletionCandidate() && getMeta().isDeleted()) {
- getMeta().undelete();
- Log.debug("Undelete: " + getMeta().getName());
-
- Stream refs = Stream.empty();
-
- if (getMeta().getSavedRefs() != null)
- refs = getMeta().getSavedRefs().stream();
- if (getData() != null)
- refs = Stream.concat(refs, getData().extractRefs().stream());
-
- refs.forEach(r -> {
- Log.trace("Hydrating ref after undelete " + r + " for " + getMeta().getName());
- getOrPut(r, getData() != null ? getData().getRefType() : JObjectData.class, Optional.of(getMeta().getName()));
- });
-
- }
-
- if (getMeta().isDeletionCandidate() && !getMeta().isDeleted()) {
- if (!getMeta().isSeen())
- tryQuickDelete();
- else
- jObjectRefProcessor.putDeletionCandidate(getMeta().getName());
- return true;
- }
- return false;
- }
-
- private void quickDeleteRef(String name) {
- var got = get(name).orElse(null);
- if (got == null) return;
- if (got.tryRwLock()) {
- try {
- got.getMeta().removeRef(getMeta().getName());
- } finally {
- got.rwUnlock();
- }
- } else {
- jObjectRefProcessor.putQuickDeletionCandidate(softJObjectFactory.create(JObjectData.class, this), softJObjectFactory.create(JObjectData.class, got));
- }
- }
-
- private void tryQuickDelete() {
- assertRwLock();
- if (!getMeta().getKnownClass().isAnnotationPresent(Leaf.class))
- tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
-
- if (Log.isTraceEnabled())
- Log.trace("Quick delete of: " + getMeta().getName());
-
- Collection extracted = null;
- if (!getMeta().getKnownClass().isAnnotationPresent(Leaf.class) && getData() != null)
- extracted = getData().extractRefs();
- Collection saved = getMeta().getSavedRefs();
-
- doDelete();
-
- if (saved != null)
- for (var r : saved) quickDeleteRef(r);
- if (extracted != null)
- for (var r : extracted) quickDeleteRef(r);
- }
-
- public T resolveDataLocal() {
- // jObject.assertRwLock();
- // FIXME: No way to assert read lock?
- return (T) dataProtoSerializer.deserialize(objectPersistentStore.readObject(getMeta().getName()));
- }
-
- public T resolveDataRemote() {
- var obj = remoteObjectServiceClient.getObject(this);
- invalidationQueueService.pushInvalidationToAll(this);
- return (T) dataProtoSerializer.deserialize(obj);
- }
-
- // Really more like "onUpdateSize"
- // Also not called from tryResolveRemote/externalResolution because
- // there it's handled by the notifyWrite
- public void onResolution() {
- jObjectLRU.updateSize(this);
- }
-
- public void removeLocal(String name) {
- assertRwLock();
- try {
- Log.debug("Invalidating " + name);
- getMeta().setHaveLocalCopy(false);
- } catch (StatusRuntimeException sx) {
- if (sx.getStatus() != Status.NOT_FOUND)
- Log.info("Couldn't delete object from persistent store: ", sx);
- } catch (Exception e) {
- Log.info("Couldn't delete object from persistent store: ", e);
- }
- }
-
- @Override
- public void bumpVer() {
- assertRwLock();
- getMeta().bumpVersion(persistentPeerDataService.getSelfUuid());
- }
-
- @Override
- public void commitFence() {
- if (haveRwLock())
- throw new IllegalStateException("Waiting on object flush inside transaction?");
- if (getMeta().getLastModifiedTx() == -1) return;
- txWriteback.fence(getMeta().getLastModifiedTx());
- }
-
- @Override
- public void commitFenceAsync(VoidFn callback) {
- if (haveRwLock())
- throw new IllegalStateException("Waiting on object flush inside transaction?");
- if (getMeta().getLastModifiedTx() == -1) {
- callback.apply();
- return;
- }
- txWriteback.asyncFence(getMeta().getLastModifiedTx(), callback);
- }
-
- @Override
- public int estimateSize() {
- if (_dataPart.get() == null) return 1024; // Assume metadata etc takes up something
- else return _dataPart.get().estimateSize() + 1024;
- }
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectRefProcessor.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectRefProcessor.java
deleted file mode 100644
index 5de25357..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectRefProcessor.java
+++ /dev/null
@@ -1,282 +0,0 @@
-package com.usatiuk.dhfs.objects.jrepository;
-
-import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService;
-import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient;
-import com.usatiuk.dhfs.objects.repository.autosync.AutoSyncProcessor;
-import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue;
-import io.quarkus.logging.Log;
-import io.quarkus.runtime.ShutdownEvent;
-import io.quarkus.runtime.StartupEvent;
-import jakarta.annotation.Priority;
-import jakarta.enterprise.context.ApplicationScoped;
-import jakarta.enterprise.event.Observes;
-import jakarta.inject.Inject;
-import org.apache.commons.lang3.concurrent.BasicThreadFactory;
-import org.apache.commons.lang3.tuple.Pair;
-import org.eclipse.microprofile.config.inject.ConfigProperty;
-
-import java.io.IOException;
-import java.util.*;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-@ApplicationScoped
-public class JObjectRefProcessor {
- private final HashSetDelayedBlockingQueue, SoftJObject>>> _quickCandidates = new HashSetDelayedBlockingQueue<>(0);
- private final HashSetDelayedBlockingQueue _candidates;
- private final HashSetDelayedBlockingQueue _canDeleteRetries;
- private final HashSet _movablesInProcessing = new HashSet<>();
- @Inject
- JObjectManager jObjectManager;
- @Inject
- PersistentPeerDataService persistentPeerDataService;
- @Inject
- RemoteObjectServiceClient remoteObjectServiceClient;
- @Inject
- AutoSyncProcessor autoSyncProcessor;
- @Inject
- JObjectTxManager jObjectTxManager;
- @ConfigProperty(name = "dhfs.objects.move-processor.threads")
- int moveProcessorThreads;
- @ConfigProperty(name = "dhfs.objects.ref-processor.threads")
- int refProcessorThreads;
- @ConfigProperty(name = "dhfs.objects.deletion.can-delete-retry-delay")
- long canDeleteRetryDelay;
- @Inject
- ExecutorService executorService;
-
- private ExecutorService _movableProcessorExecutorService;
- private ExecutorService _refProcessorExecutorService;
-
- public JObjectRefProcessor(@ConfigProperty(name = "dhfs.objects.deletion.delay") long deletionDelay,
- @ConfigProperty(name = "dhfs.objects.deletion.can-delete-retry-delay") long canDeleteRetryDelay) {
- _candidates = new HashSetDelayedBlockingQueue<>(deletionDelay);
- _canDeleteRetries = new HashSetDelayedBlockingQueue<>(canDeleteRetryDelay);
- }
-
- void init(@Observes @Priority(200) StartupEvent event) throws IOException {
- BasicThreadFactory factory = new BasicThreadFactory.Builder()
- .namingPattern("move-proc-%d")
- .build();
- _movableProcessorExecutorService = Executors.newFixedThreadPool(moveProcessorThreads, factory);
-
- BasicThreadFactory factoryRef = new BasicThreadFactory.Builder()
- .namingPattern("ref-proc-%d")
- .build();
- _refProcessorExecutorService = Executors.newFixedThreadPool(refProcessorThreads, factoryRef);
- for (int i = 0; i < refProcessorThreads; i++) {
- _refProcessorExecutorService.submit(this::refProcessor);
- }
-
- // Continue GC from last shutdown
- //FIXME
-// executorService.submit(() ->
-// jObjectManager.findAll().forEach(n -> {
-// jObjectManager.get(n).ifPresent(o -> o.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> {
-// return null;
-// }));
-// }));
- }
-
- void shutdown(@Observes @Priority(800) ShutdownEvent event) throws InterruptedException {
- _refProcessorExecutorService.shutdownNow();
- if (!_refProcessorExecutorService.awaitTermination(30, TimeUnit.SECONDS)) {
- Log.error("Refcounting threads didn't exit in 30 seconds");
- }
- }
-
- public void putQuickDeletionCandidate(SoftJObject> from, SoftJObject> obj) {
- _quickCandidates.add(Pair.of(from, obj));
- }
-
- public void putDeletionCandidate(String name) {
- synchronized (_movablesInProcessing) {
- if (_movablesInProcessing.contains(name)) return;
- if (_candidates.add(name))
- Log.debug("Deletion candidate: " + name);
- }
- }
-
- private void asyncProcessMovable(String objName) {
- synchronized (_movablesInProcessing) {
- if (_movablesInProcessing.contains(objName)) return;
- _movablesInProcessing.add(objName);
- }
-
- _movableProcessorExecutorService.submit(() -> {
- var obj = jObjectManager.get(objName).orElse(null);
- if (obj == null || obj.getMeta().isDeleted()) return;
- boolean delay = false;
- try {
- var knownHosts = persistentPeerDataService.getHostUuids();
- List missing = new ArrayList<>();
-
- var ourReferrers = obj.runReadLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d) -> {
- for (var x : knownHosts)
- if (!m.getConfirmedDeletes().contains(x)) missing.add(x);
- return m.getReferrers();
- });
- var ret = remoteObjectServiceClient.canDelete(missing, obj.getMeta().getName(), ourReferrers);
-
- long ok = 0;
-
- for (var r : ret) {
- if (!r.getDeletionCandidate())
- for (var rr : r.getReferrersList())
- autoSyncProcessor.add(rr);
- else
- ok++;
- }
-
- if (ok != missing.size()) {
- Log.debug("Delaying deletion check of " + obj.getMeta().getName());
- delay = true;
- }
-
- jObjectTxManager.executeTx(() -> {
- obj.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, i) -> {
- for (var r : ret)
- if (r.getDeletionCandidate())
- m.getConfirmedDeletes().add(UUID.fromString(r.getSelfUuid()));
- return null;
- });
- });
- } catch (Exception e) {
- Log.warn("When processing deletion of movable object " + obj.getMeta().getName(), e);
- } finally {
- synchronized (_movablesInProcessing) {
- _movablesInProcessing.remove(obj.getMeta().getName());
- if (!delay)
- _candidates.add(obj.getMeta().getName());
- else
- _canDeleteRetries.add(obj.getMeta().getName());
- }
- }
- });
- }
-
- private boolean processMovable(JObject> obj) {
- obj.assertRwLock();
- var knownHosts = persistentPeerDataService.getHostUuids();
- boolean missing = false;
- for (var x : knownHosts)
- if (!obj.getMeta().getConfirmedDeletes().contains(x)) {
- missing = true;
- break;
- }
-
- if (!missing) return true;
- asyncProcessMovable(obj.getMeta().getName());
- return false;
- }
-
- private void deleteRef(JObject> self, String name) {
- jObjectManager.get(name).ifPresent(ref -> ref.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (mc, dc, bc, ic) -> {
- mc.removeRef(self.getMeta().getName());
- return null;
- }));
- }
-
- private void refProcessor() {
- try {
- while (!Thread.interrupted()) {
- String next = null;
- Pair, SoftJObject>> nextQuick = null;
-
- while (next == null && nextQuick == null) {
- nextQuick = _quickCandidates.tryGet();
-
- if (nextQuick != null) break;
-
- next = _canDeleteRetries.tryGet();
- if (next == null)
- next = _candidates.tryGet();
- if (next == null)
- nextQuick = _quickCandidates.get(canDeleteRetryDelay);
- }
-
- JObject> target;
-
- if (nextQuick != null) {
- var fromSoft = nextQuick.getLeft();
- var toSoft = nextQuick.getRight();
-
- var from = nextQuick.getLeft().get();
- var to = nextQuick.getRight().get();
-
- if (from != null && !from.getMeta().isDeleted()) {
- Log.warn("Quick delete failed for " + from.getMeta().getName() + " -> " + toSoft.getName());
- continue;
- }
-
- if (to == null) {
- Log.warn("Quick delete object missing: " + toSoft.getName());
- continue;
- }
-
- target = to;
-
- jObjectTxManager.executeTx(() -> {
- if (from != null)
- from.rwLock();
- try {
- try {
- to.rwLock();
- to.getMeta().removeRef(fromSoft.getName());
- } finally {
- to.rwUnlock();
- }
- } finally {
- if (from != null)
- from.rwUnlock();
- }
- });
- } else {
- target = jObjectManager.get(next).orElse(null);
- }
-
- if (target == null) continue;
- try {
- jObjectTxManager.executeTx(() -> {
- target.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, v, i) -> {
- if (m.isFrozen()) return null;
- if (m.isDeleted()) return null;
- if (!m.isDeletionCandidate()) return null;
- if (m.isSeen() && !m.isOnlyLocal()) {
- if (!processMovable(target))
- return null;
- }
- if (m.isSeen() && m.isOnlyLocal())
- Log.warn("Seen only-local object: " + m.getName());
-
-
- if (!target.getMeta().getKnownClass().isAnnotationPresent(Leaf.class))
- target.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
-
- Log.debug("Deleting " + m.getName());
-
- Collection extracted = null;
- if (!target.getMeta().getKnownClass().isAnnotationPresent(Leaf.class) && target.getData() != null)
- extracted = target.getData().extractRefs();
- Collection saved = target.getMeta().getSavedRefs();
-
- target.doDelete();
-
- if (saved != null)
- for (var r : saved) deleteRef(target, r);
- if (extracted != null)
- for (var r : extracted) deleteRef(target, r);
-
- return null;
- });
- });
- } catch (Exception ex) {
- Log.error("Error when deleting: " + next, ex);
- }
- }
- } catch (InterruptedException ignored) {
- }
- Log.info("JObject Refcounter thread exiting");
- }
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectSnapshot.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectSnapshot.java
deleted file mode 100644
index 8bf62ef0..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectSnapshot.java
+++ /dev/null
@@ -1,10 +0,0 @@
-package com.usatiuk.dhfs.objects.jrepository;
-
-import com.usatiuk.dhfs.objects.persistence.JObjectDataP;
-import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP;
-
-public record JObjectSnapshot
- (ObjectMetadataP meta,
- JObjectDataP data,
- int changelogHash) {
-}
diff --git a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectTxManager.java b/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectTxManager.java
deleted file mode 100644
index 3634f3a2..00000000
--- a/dhfs-parent/server-old/src/main/java/com/usatiuk/dhfs/objects/jrepository/JObjectTxManager.java
+++ /dev/null
@@ -1,397 +0,0 @@
-package com.usatiuk.dhfs.objects.jrepository;
-
-import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
-import com.usatiuk.dhfs.objects.persistence.JObjectDataP;
-import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP;
-import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService;
-import com.usatiuk.dhfs.utils.VoidFn;
-import io.quarkus.logging.Log;
-import jakarta.annotation.Nullable;
-import jakarta.enterprise.context.ApplicationScoped;
-import jakarta.inject.Inject;
-import org.apache.commons.lang3.concurrent.BasicThreadFactory;
-import org.eclipse.microprofile.config.inject.ConfigProperty;
-
-import java.util.*;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.function.Consumer;
-import java.util.function.Supplier;
-
-@ApplicationScoped
-public class JObjectTxManager {
- private final ThreadLocal _state = new ThreadLocal<>();
- private final ExecutorService _serializerThreads;
- private final AtomicLong _transientTxId = new AtomicLong();
- @Inject
- ProtoSerializer dataProtoSerializer;
- @Inject
- ProtoSerializer metaProtoSerializer;
- @Inject
- JObjectLRU jObjectLRU;
- @Inject
- JObjectManager jObjectManager;
- @Inject
- InvalidationQueueService invalidationQueueService;
- @Inject
- TxWriteback txWriteback;
- @ConfigProperty(name = "dhfs.objects.ref_verification")
- boolean refVerification;
-
- public JObjectTxManager() {
- BasicThreadFactory factory = new BasicThreadFactory.Builder()
- .namingPattern("tx-serializer-%d")
- .build();
-
- _serializerThreads = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(), factory);
- }
-
- public void begin() {
- if (_state.get() != null)
- throw new IllegalStateException("Transaction already running");
-
- _state.set(new TxState());
- }
-
- public void drop(JObject> obj) {
- var state = _state.get();
- if (state == null)
- throw new IllegalStateException("Transaction not running");
- Log.debug("Dropping " + obj.getMeta().getName() + " from " + state._id);
- obj.assertRwLock();
- state._writeObjects.remove(obj);
- obj.rwUnlock();
- }
-
- // Returns Id of bundle to wait for, or -1 if there was nothing written
- public long commit() {
- var state = _state.get();
- if (state == null)
- throw new IllegalStateException("Transaction not running");
-
- if (state._writeObjects.isEmpty()) {
-// Log.trace("Empty transaction " + state._id);
- state._callbacks.forEach(c -> c.accept(null));
- _state.remove();
- return -1;
- }
-
- Log.debug("Committing transaction " + state._id);
-
- for (var obj : state._writeObjects.entrySet()) {
- Log.debug("Committing " + obj.getKey().getMeta().getName() + " deleted=" + obj.getKey().getMeta().isDeleted() + " deletion-candidate=" + obj.getKey().getMeta().isDeletionCandidate());
-
- var dataDiff = obj.getKey().getMeta().changelogHash() != obj.getValue().snapshot.changelogHash()
- || obj.getValue()._forceInvalidated;
-
- if (refVerification) {
- // Null check in case of object not being resolved before (though then we can't check this)
- boolean dataDiffReal = obj.getValue().snapshot.data() != null
- && !Objects.equals(obj.getValue().snapshot.data(), obj.getKey().getData() == null ? null : dataProtoSerializer.serialize(obj.getKey().getData()));
-
- if (dataDiffReal && !dataDiff) {
- var msg = "Data diff not equal for " + obj.getKey().getMeta().getName() + " " + obj.getKey().getData() + " before = " + ((obj.getValue().snapshot != null) ? obj.getValue().snapshot.data() : null) + " after = " + ((obj.getKey().getData() != null) ? dataProtoSerializer.serialize(obj.getKey().getData()) : null);
- throw new IllegalStateException(msg);
- }
- if (dataDiff && !dataDiffReal)
- Log.warn("Useless update for " + obj.getKey().getMeta().getName());
- }
-
-// if (obj.getValue()._copy && !obj.getValue()._mutators.isEmpty())
-// throw new IllegalStateException("Object copied but had mutators!");
-
- if (refVerification && !obj.getValue()._copy) {
- var cur = dataProtoSerializer.serialize(obj.getKey().getData());
- for (var mut : obj.getValue()._mutators.reversed())
- revertMutator(obj.getKey(), mut);
- var rev = dataProtoSerializer.serialize(obj.getKey().getData());
-
- if (obj.getValue().snapshot.data() != null && !Objects.equals(rev, obj.getValue().snapshot.data()))
- throw new IllegalStateException("Mutator could not be reverted for object " + obj.getKey().getMeta().getName() + "\n old = " + obj.getValue().snapshot.data() + "\n reverted = " + rev + "\n");
-
- for (var mut : obj.getValue()._mutators)
- applyMutator(obj.getKey(), mut);
-
- var cur2 = dataProtoSerializer.serialize(obj.getKey().getData());
- if (!Objects.equals(cur, cur2))
- throw new IllegalStateException("Mutator could not be reapplied for object " + obj.getKey().getMeta().getName() + "\n old = " + cur + "\n reapplied = " + cur2 + "\n");
- }
-
- obj.getValue()._metaSerialized = metaProtoSerializer.serialize(obj.getKey().getMeta());
- obj.getValue()._metaChanged = !Objects.equals(obj.getValue().snapshot.meta(), obj.getValue()._metaSerialized);
- obj.getValue()._dataChanged = dataDiff;
-
- notifyWrite(obj.getKey(), obj.getValue()._metaChanged, dataDiff);
-
- if (refVerification) {
- var oldRefs = obj.getValue().snapshot.data() == null
- ? null
- : ((JObjectData) dataProtoSerializer.deserialize(obj.getValue().snapshot.data())).extractRefs();
- verifyRefs(obj.getKey(), oldRefs);
- }
- }
-
- var bundle = txWriteback.createBundle();
-
- try {
- for (var e : state._writeObjects.entrySet()) {
- var key = e.getKey();
- var value = e.getValue();
- if (key.getMeta().isDeleted()) {
- bundle.delete(key);
- continue;
- }
-
- if (!value._dataChanged && !value._metaChanged) {
- continue;
- }
-
- if (key.getMeta().isHaveLocalCopy() && value._dataChanged) {
- bundle.commit(key,
- value._metaSerialized,
- dataProtoSerializer.serialize(key.getData())
- );
- } else if (key.getMeta().isHaveLocalCopy() && !value._dataChanged) {
- bundle.commitMetaChange(key, value._metaSerialized);
- } else if (!key.getMeta().isHaveLocalCopy()) {
- bundle.commit(key, value._metaSerialized, null);
- } else {
- throw new IllegalStateException("Unexpected object flush combination");
- }
- }
- } catch (Exception ex) {
- Log.error("Error creating tx bundle ", ex);
- txWriteback.dropBundle(bundle);
- throw ex;
- }
-
- for (var e : state._writeObjects.entrySet())
- e.getKey().getMeta().setLastModifiedTx(bundle.getId());
-
- state._writeObjects.forEach((key, value) -> key.rwUnlock());
-
- state._callbacks.forEach(s -> txWriteback.asyncFence(bundle.getId(), () -> s.accept(null)));
-
- txWriteback.commitBundle(bundle);
-
- _state.remove();
-
- return bundle.getId();
- }
-
- private void notifyWrite(JObject> obj, boolean metaChanged, boolean hasDataChanged) {
- jObjectLRU.updateSize(obj);
- jObjectManager.runWriteListeners(obj, metaChanged, hasDataChanged);
- if (hasDataChanged && obj.getMeta().isHaveLocalCopy()) {
- invalidationQueueService.pushInvalidationToAll(obj);
- }
- }
-
- private void verifyRefs(JObject> obj, @Nullable Collection oldRefs) {
- if (!refVerification) return;
-
- if (obj.getData() == null) return;
- if (obj.getMeta().isDeleted()) return;
- var newRefs = obj.getData().extractRefs();
- if (oldRefs != null)
- for (var o : oldRefs)
- if (!newRefs.contains(o)) {
- jObjectManager.get(o).ifPresent(refObj -> {
- if (refObj.getMeta().checkRef(obj.getMeta().getName()))
- throw new IllegalStateException("Object " + o + " is referenced from " + obj.getMeta().getName() + " but shouldn't be");
- });
- }
- for (var r : newRefs) {
- var refObj = jObjectManager.get(r).orElseThrow(() -> new IllegalStateException("Object " + r + " not found but should be referenced from " + obj.getMeta().getName()));
- if (refObj.getMeta().isDeleted())
- throw new IllegalStateException("Object " + r + " deleted but referenced from " + obj.getMeta().getName());
- if (!refObj.getMeta().checkRef(obj.getMeta().getName()))
- throw new IllegalStateException("Object " + r + " is not referenced by " + obj.getMeta().getName() + " but should be");
- }
- }
-
- private void applyMutator(JObject> obj, JMutator mutator) {
- mutator.mutate((T) obj.getData());
- }
-
- private void revertMutator(JObject> obj, JMutator mutator) {
- mutator.revert((T) obj.getData());
- }
-
- public void rollback(String message) {
- var state = _state.get();
- if (state == null)
- throw new IllegalStateException("Transaction not running");
- Log.debug("Rollback of " + state._id);
-
- for (var obj : state._writeObjects.entrySet()) {
- Log.debug("Rollback of " + obj.getKey().getMeta().getName());
- try {
- if (obj.getValue()._copy) {
- obj.getKey().rollback(
- metaProtoSerializer.deserialize(obj.getValue().snapshot.meta()),
- obj.getValue().snapshot.data() != null ? dataProtoSerializer.deserialize(obj.getValue().snapshot.data()) : null);
- } else {
- for (var mut : obj.getValue()._mutators.reversed())
- revertMutator(obj.getKey(), mut);
- obj.getKey().rollback(metaProtoSerializer.deserialize(obj.getValue().snapshot.meta()), obj.getKey().getData());
- }
- obj.getKey().updateDeletionState();
- } finally {
- obj.getKey().rwUnlock();
- }
- }
-
- state._callbacks.forEach(c -> c.accept(message != null ? message : "Unknown error"));
- Log.debug("Rollback of " + state._id + " done");
- _state.remove();
- }
-
- public void executeTxAndFlushAsync(VoidFn fn, Consumer callback) {
- var state = _state.get();
- if (state != null) {
- _state.get()._callbacks.add(callback);
- fn.apply();
- return;
- }
-
- begin();
- try {
- _state.get()._callbacks.add(callback);
- fn.apply();
- commit();
- } catch (Exception e) {
- Log.debug("Error in transaction " + _state.get()._id, e);
- rollback(e.getMessage());
- throw e;
- }
- }
-
- public void executeTxAndFlush(VoidFn fn) {
- executeTxAndFlush(() -> {
- fn.apply();
- return null;
- });
- }
-
- public