mirror of
https://github.com/usatiuk/dhfs.git
synced 2025-10-28 20:47:49 +01:00
Remove server-old
This commit is contained in:
@@ -1,5 +0,0 @@
|
||||
*
|
||||
!target/*-runner
|
||||
!target/*-runner.jar
|
||||
!target/lib/*
|
||||
!target/quarkus-app/*
|
||||
43
dhfs-parent/server-old/.gitignore
vendored
43
dhfs-parent/server-old/.gitignore
vendored
@@ -1,43 +0,0 @@
|
||||
#Maven
|
||||
target/
|
||||
pom.xml.tag
|
||||
pom.xml.releaseBackup
|
||||
pom.xml.versionsBackup
|
||||
release.properties
|
||||
.flattened-pom.xml
|
||||
|
||||
# Eclipse
|
||||
.project
|
||||
.classpath
|
||||
.settings/
|
||||
bin/
|
||||
|
||||
# IntelliJ
|
||||
.idea
|
||||
*.ipr
|
||||
*.iml
|
||||
*.iws
|
||||
|
||||
# NetBeans
|
||||
nb-configuration.xml
|
||||
|
||||
# Visual Studio Code
|
||||
.vscode
|
||||
.factorypath
|
||||
|
||||
# OSX
|
||||
.DS_Store
|
||||
|
||||
# Vim
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
# patch
|
||||
*.orig
|
||||
*.rej
|
||||
|
||||
# Local environment
|
||||
.env
|
||||
|
||||
# Plugin directory
|
||||
/.quarkus/cli/plugins/
|
||||
@@ -1,2 +0,0 @@
|
||||
FROM azul/zulu-openjdk-debian:21-jre-latest
|
||||
RUN apt update && apt install -y libfuse2 curl
|
||||
@@ -1,42 +0,0 @@
|
||||
version: "3.2"
|
||||
|
||||
services:
|
||||
dhfs1:
|
||||
build: .
|
||||
privileged: true
|
||||
devices:
|
||||
- /dev/fuse
|
||||
volumes:
|
||||
- $HOME/dhfs/dhfs1:/dhfs_root
|
||||
- $HOME/dhfs/dhfs1_f:/dhfs_root/fuse:rshared
|
||||
- ./target/quarkus-app:/app
|
||||
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
|
||||
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
|
||||
-Ddhfs.objects.root=/dhfs_root/d
|
||||
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
|
||||
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005
|
||||
-jar /app/quarkus-run.jar"
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 8081:8443
|
||||
- 5005:5005
|
||||
dhfs2:
|
||||
build: .
|
||||
privileged: true
|
||||
devices:
|
||||
- /dev/fuse
|
||||
volumes:
|
||||
- $HOME/dhfs/dhfs2:/dhfs_root
|
||||
- $HOME/dhfs/dhfs2_f:/dhfs_root/fuse:rshared
|
||||
- ./target/quarkus-app:/app
|
||||
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
|
||||
--add-exports java.base/jdk.internal.access=ALL-UNNAMED
|
||||
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
|
||||
-Ddhfs.objects.root=/dhfs_root/d
|
||||
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
|
||||
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5010
|
||||
-jar /app/quarkus-run.jar"
|
||||
ports:
|
||||
- 8090:8080
|
||||
- 8091:8443
|
||||
- 5010:5010
|
||||
@@ -1,209 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>server</artifactId>
|
||||
<version>1.0.0-SNAPSHOT</version>
|
||||
|
||||
<parent>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>parent</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.testcontainers</groupId>
|
||||
<artifactId>testcontainers</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.awaitility</groupId>
|
||||
<artifactId>awaitility</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>autoprotomap-deployment</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcprov-jdk18on</artifactId>
|
||||
<version>1.78.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcpkix-jdk18on</artifactId>
|
||||
<version>1.78.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-security</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>net.openhft</groupId>
|
||||
<artifactId>zero-allocation-hashing</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-grpc</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-arc</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest-client</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest-client-jsonb</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-rest-jsonb</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-scheduler</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.quarkus</groupId>
|
||||
<artifactId>quarkus-junit5</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.projectlombok</groupId>
|
||||
<artifactId>lombok</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.SerCeMan</groupId>
|
||||
<artifactId>jnr-fuse</artifactId>
|
||||
<version>44ed40f8ce</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-ffi</artifactId>
|
||||
<version>2.2.16</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-posix</artifactId>
|
||||
<version>3.1.19</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.jnr</groupId>
|
||||
<artifactId>jnr-constants</artifactId>
|
||||
<version>0.10.4</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.jboss.slf4j</groupId>
|
||||
<artifactId>slf4j-jboss-logmanager</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-codec</groupId>
|
||||
<artifactId>commons-codec</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-math3</artifactId>
|
||||
<version>3.6.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk</groupId>
|
||||
<artifactId>kleppmanntree</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>supportlib</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>objects</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.usatiuk.dhfs</groupId>
|
||||
<artifactId>utils</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<configuration>
|
||||
<forkCount>1C</forkCount>
|
||||
<reuseForks>false</reuseForks>
|
||||
<parallel>classes</parallel>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-failsafe-plugin</artifactId>
|
||||
<configuration>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.parallel.enabled>
|
||||
true
|
||||
</junit.jupiter.execution.parallel.enabled>
|
||||
<junit.jupiter.execution.parallel.mode.default>
|
||||
concurrent
|
||||
</junit.jupiter.execution.parallel.mode.default>
|
||||
<junit.jupiter.execution.parallel.config.dynamic.factor>
|
||||
0.5
|
||||
</junit.jupiter.execution.parallel.config.dynamic.factor>
|
||||
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
|
||||
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>${quarkus.platform.group-id}</groupId>
|
||||
<artifactId>quarkus-maven-plugin</artifactId>
|
||||
<version>${quarkus.platform.version}</version>
|
||||
<extensions>true</extensions>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>quarkus-plugin</id>
|
||||
<goals>
|
||||
<goal>build</goal>
|
||||
<goal>generate-code</goal>
|
||||
<goal>generate-code-tests</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
@@ -1 +0,0 @@
|
||||
lombok.accessors.prefix += _
|
||||
@@ -1,97 +0,0 @@
|
||||
####
|
||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
|
||||
#
|
||||
# Before building the container image run:
|
||||
#
|
||||
# ./mvnw package
|
||||
#
|
||||
# Then, build the image with:
|
||||
#
|
||||
# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/server-jvm .
|
||||
#
|
||||
# Then run the container using:
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
|
||||
#
|
||||
# If you want to include the debug port into your docker image
|
||||
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
|
||||
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
|
||||
# when running the container
|
||||
#
|
||||
# Then run the container using :
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
|
||||
#
|
||||
# This image uses the `run-java.sh` script to run the application.
|
||||
# This scripts computes the command line to execute your Java application, and
|
||||
# includes memory/GC tuning.
|
||||
# You can configure the behavior using the following environment properties:
|
||||
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
|
||||
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
|
||||
# in JAVA_OPTS (example: "-Dsome.property=foo")
|
||||
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
|
||||
# used to calculate a default maximal heap memory based on a containers restriction.
|
||||
# If used in a container without any memory constraints for the container then this
|
||||
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
|
||||
# of the container available memory as set here. The default is `50` which means 50%
|
||||
# of the available memory is used as an upper boundary. You can skip this mechanism by
|
||||
# setting this value to `0` in which case no `-Xmx` option is added.
|
||||
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
|
||||
# is used to calculate a default initial heap memory based on the maximum heap memory.
|
||||
# If used in a container without any memory constraints for the container then this
|
||||
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
|
||||
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
|
||||
# is used as the initial heap size. You can skip this mechanism by setting this value
|
||||
# to `0` in which case no `-Xms` option is added (example: "25")
|
||||
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
|
||||
# This is used to calculate the maximum value of the initial heap memory. If used in
|
||||
# a container without any memory constraints for the container then this option has
|
||||
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
|
||||
# here. The default is 4096MB which means the calculated value of `-Xms` never will
|
||||
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
|
||||
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
|
||||
# when things are happening. This option, if set to true, will set
|
||||
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
|
||||
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
|
||||
# true").
|
||||
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
|
||||
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
|
||||
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
|
||||
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
|
||||
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
|
||||
# (example: "20")
|
||||
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
|
||||
# (example: "40")
|
||||
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
|
||||
# (example: "4")
|
||||
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
|
||||
# previous GC times. (example: "90")
|
||||
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
|
||||
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
|
||||
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
|
||||
# contain the necessary JRE command-line options to specify the required GC, which
|
||||
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
|
||||
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
|
||||
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
|
||||
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
|
||||
# accessed directly. (example: "foo.example.com,bar.example.com")
|
||||
#
|
||||
###
|
||||
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
|
||||
|
||||
ENV LANGUAGE='en_US:en'
|
||||
|
||||
|
||||
# We make four distinct layers so if there are application changes the library layers can be re-used
|
||||
COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/
|
||||
COPY --chown=185 target/quarkus-app/*.jar /deployments/
|
||||
COPY --chown=185 target/quarkus-app/app/ /deployments/app/
|
||||
COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/
|
||||
|
||||
EXPOSE 8080
|
||||
USER 185
|
||||
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
|
||||
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
|
||||
|
||||
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]
|
||||
|
||||
@@ -1,93 +0,0 @@
|
||||
####
|
||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
|
||||
#
|
||||
# Before building the container image run:
|
||||
#
|
||||
# ./mvnw package -Dquarkus.package.jar.type=legacy-jar
|
||||
#
|
||||
# Then, build the image with:
|
||||
#
|
||||
# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/server-legacy-jar .
|
||||
#
|
||||
# Then run the container using:
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
|
||||
#
|
||||
# If you want to include the debug port into your docker image
|
||||
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
|
||||
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
|
||||
# when running the container
|
||||
#
|
||||
# Then run the container using :
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
|
||||
#
|
||||
# This image uses the `run-java.sh` script to run the application.
|
||||
# This scripts computes the command line to execute your Java application, and
|
||||
# includes memory/GC tuning.
|
||||
# You can configure the behavior using the following environment properties:
|
||||
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
|
||||
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
|
||||
# in JAVA_OPTS (example: "-Dsome.property=foo")
|
||||
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
|
||||
# used to calculate a default maximal heap memory based on a containers restriction.
|
||||
# If used in a container without any memory constraints for the container then this
|
||||
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
|
||||
# of the container available memory as set here. The default is `50` which means 50%
|
||||
# of the available memory is used as an upper boundary. You can skip this mechanism by
|
||||
# setting this value to `0` in which case no `-Xmx` option is added.
|
||||
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
|
||||
# is used to calculate a default initial heap memory based on the maximum heap memory.
|
||||
# If used in a container without any memory constraints for the container then this
|
||||
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
|
||||
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
|
||||
# is used as the initial heap size. You can skip this mechanism by setting this value
|
||||
# to `0` in which case no `-Xms` option is added (example: "25")
|
||||
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
|
||||
# This is used to calculate the maximum value of the initial heap memory. If used in
|
||||
# a container without any memory constraints for the container then this option has
|
||||
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
|
||||
# here. The default is 4096MB which means the calculated value of `-Xms` never will
|
||||
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
|
||||
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
|
||||
# when things are happening. This option, if set to true, will set
|
||||
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
|
||||
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
|
||||
# true").
|
||||
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
|
||||
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
|
||||
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
|
||||
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
|
||||
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
|
||||
# (example: "20")
|
||||
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
|
||||
# (example: "40")
|
||||
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
|
||||
# (example: "4")
|
||||
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
|
||||
# previous GC times. (example: "90")
|
||||
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
|
||||
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
|
||||
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
|
||||
# contain the necessary JRE command-line options to specify the required GC, which
|
||||
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
|
||||
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
|
||||
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
|
||||
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
|
||||
# accessed directly. (example: "foo.example.com,bar.example.com")
|
||||
#
|
||||
###
|
||||
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
|
||||
|
||||
ENV LANGUAGE='en_US:en'
|
||||
|
||||
|
||||
COPY target/lib/* /deployments/lib/
|
||||
COPY target/*-runner.jar /deployments/quarkus-run.jar
|
||||
|
||||
EXPOSE 8080
|
||||
USER 185
|
||||
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
|
||||
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
|
||||
|
||||
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]
|
||||
@@ -1,27 +0,0 @@
|
||||
####
|
||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
|
||||
#
|
||||
# Before building the container image run:
|
||||
#
|
||||
# ./mvnw package -Dnative
|
||||
#
|
||||
# Then, build the image with:
|
||||
#
|
||||
# docker build -f src/main/docker/Dockerfile.native -t quarkus/server .
|
||||
#
|
||||
# Then run the container using:
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server
|
||||
#
|
||||
###
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9
|
||||
WORKDIR /work/
|
||||
RUN chown 1001 /work \
|
||||
&& chmod "g+rwX" /work \
|
||||
&& chown 1001:root /work
|
||||
COPY --chown=1001:root target/*-runner /work/application
|
||||
|
||||
EXPOSE 8080
|
||||
USER 1001
|
||||
|
||||
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]
|
||||
@@ -1,30 +0,0 @@
|
||||
####
|
||||
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
|
||||
# It uses a micro base image, tuned for Quarkus native executables.
|
||||
# It reduces the size of the resulting container image.
|
||||
# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image.
|
||||
#
|
||||
# Before building the container image run:
|
||||
#
|
||||
# ./mvnw package -Dnative
|
||||
#
|
||||
# Then, build the image with:
|
||||
#
|
||||
# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/server .
|
||||
#
|
||||
# Then run the container using:
|
||||
#
|
||||
# docker run -i --rm -p 8080:8080 quarkus/server
|
||||
#
|
||||
###
|
||||
FROM quay.io/quarkus/quarkus-micro-image:2.0
|
||||
WORKDIR /work/
|
||||
RUN chown 1001 /work \
|
||||
&& chmod "g+rwX" /work \
|
||||
&& chown 1001:root /work
|
||||
COPY --chown=1001:root target/*-runner /work/application
|
||||
|
||||
EXPOSE 8080
|
||||
USER 1001
|
||||
|
||||
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]
|
||||
@@ -1,63 +0,0 @@
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.ThreadInfo;
|
||||
import java.lang.management.ThreadMXBean;
|
||||
import java.util.Arrays;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
||||
@ApplicationScoped
|
||||
public class DeadlockDetector {
|
||||
private final ExecutorService _executor = Executors.newSingleThreadExecutor();
|
||||
|
||||
void init(@Observes @Priority(1) StartupEvent event) {
|
||||
_executor.submit(this::run);
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(100000) ShutdownEvent event) {
|
||||
_executor.shutdownNow();
|
||||
}
|
||||
|
||||
private void run() {
|
||||
ThreadMXBean bean = ManagementFactory.getThreadMXBean();
|
||||
try {
|
||||
while (!Thread.interrupted()) {
|
||||
Thread.sleep(4000);
|
||||
|
||||
long[] threadIds = bean.findDeadlockedThreads(); // Returns null if no threads are deadlocked.
|
||||
|
||||
if (threadIds != null) {
|
||||
ThreadInfo[] infos = bean.getThreadInfo(threadIds, Integer.MAX_VALUE);
|
||||
|
||||
StringBuilder sb = new StringBuilder();
|
||||
|
||||
sb.append("Deadlock detected!\n");
|
||||
|
||||
for (ThreadInfo info : infos) {
|
||||
StackTraceElement[] stack = info.getStackTrace();
|
||||
sb.append(info.getThreadName()).append("\n");
|
||||
sb.append("getLockedMonitors: ").append(Arrays.toString(info.getLockedMonitors())).append("\n");
|
||||
sb.append("getLockedSynchronizers: ").append(Arrays.toString(info.getLockedSynchronizers())).append("\n");
|
||||
sb.append("waiting on: ").append(info.getLockInfo()).append("\n");
|
||||
sb.append("locked by: ").append(info.getLockOwnerName()).append("\n");
|
||||
sb.append("Stack trace:\n");
|
||||
for (var e : stack) {
|
||||
sb.append(e.toString()).append("\n");
|
||||
}
|
||||
sb.append("===");
|
||||
}
|
||||
|
||||
Log.error(sb);
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
}
|
||||
Log.info("Deadlock detector thread exiting");
|
||||
}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
import io.quarkus.runtime.Quarkus;
|
||||
import io.quarkus.runtime.QuarkusApplication;
|
||||
import io.quarkus.runtime.annotations.QuarkusMain;
|
||||
|
||||
@QuarkusMain
|
||||
public class Main {
|
||||
public static void main(String... args) {
|
||||
Quarkus.run(DhfsStorageServerApp.class, args);
|
||||
}
|
||||
|
||||
public static class DhfsStorageServerApp implements QuarkusApplication {
|
||||
|
||||
@Override
|
||||
public int run(String... args) throws Exception {
|
||||
Quarkus.waitForExit();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package com.usatiuk.dhfs;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Paths;
|
||||
|
||||
@ApplicationScoped
|
||||
public class ShutdownChecker {
|
||||
private static final String dataFileName = "running";
|
||||
@ConfigProperty(name = "dhfs.objects.root")
|
||||
String dataRoot;
|
||||
boolean _cleanShutdown = true;
|
||||
boolean _initialized = false;
|
||||
|
||||
void init(@Observes @Priority(2) StartupEvent event) throws IOException {
|
||||
Paths.get(dataRoot).toFile().mkdirs();
|
||||
Log.info("Initializing with root " + dataRoot);
|
||||
if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) {
|
||||
_cleanShutdown = false;
|
||||
Log.error("Unclean shutdown detected!");
|
||||
} else {
|
||||
Paths.get(dataRoot).resolve(dataFileName).toFile().createNewFile();
|
||||
}
|
||||
_initialized = true;
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(100000) ShutdownEvent event) throws IOException {
|
||||
Paths.get(dataRoot).resolve(dataFileName).toFile().delete();
|
||||
}
|
||||
|
||||
public boolean lastShutdownClean() {
|
||||
if (!_initialized) throw new IllegalStateException("ShutdownChecker not initialized");
|
||||
return _cleanShutdown;
|
||||
}
|
||||
}
|
||||
@@ -1,107 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.conflicts;
|
||||
|
||||
import com.usatiuk.dhfs.files.objects.Directory;
|
||||
import com.usatiuk.dhfs.files.objects.FsNode;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObject;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectData;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectManager;
|
||||
import com.usatiuk.dhfs.objects.repository.ConflictResolver;
|
||||
import com.usatiuk.dhfs.objects.repository.ObjectHeader;
|
||||
import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.NotImplementedException;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
@ApplicationScoped
|
||||
public class DirectoryConflictResolver implements ConflictResolver {
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
|
||||
@Inject
|
||||
JObjectManager jObjectManager;
|
||||
|
||||
@Override
|
||||
public void resolve(UUID conflictHost, ObjectHeader theirsHeader, JObjectData theirsData, JObject<?> ours) {
|
||||
var theirsDir = (Directory) theirsData;
|
||||
if (!theirsDir.getClass().equals(Directory.class)) {
|
||||
Log.error("Object type mismatch!");
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
ours.runWriteLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, oursDirU, bump, invalidate) -> {
|
||||
if (oursDirU == null)
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict but we don't have local copy"));
|
||||
if (!(oursDirU instanceof Directory oursDir))
|
||||
throw new NotImplementedException("Type conflict for " + ours.getMeta().getName() + ", directory was expected");
|
||||
|
||||
Directory first;
|
||||
Directory second;
|
||||
UUID otherHostname;
|
||||
|
||||
if (oursDir.getMtime() >= theirsDir.getMtime()) {
|
||||
first = oursDir;
|
||||
second = theirsDir;
|
||||
otherHostname = conflictHost;
|
||||
} else {
|
||||
second = oursDir;
|
||||
first = theirsDir;
|
||||
otherHostname = persistentPeerDataService.getSelfUuid();
|
||||
}
|
||||
|
||||
LinkedHashMap<String, UUID> mergedChildren = new LinkedHashMap<>(first.getChildren());
|
||||
Map<UUID, Long> newChangelog = new LinkedHashMap<>(m.getChangelog());
|
||||
|
||||
for (var entry : second.getChildren().entrySet()) {
|
||||
if (mergedChildren.containsKey(entry.getKey()) &&
|
||||
!Objects.equals(mergedChildren.get(entry.getKey()), entry.getValue())) {
|
||||
int i = 0;
|
||||
do {
|
||||
String name = entry.getKey() + ".conflict." + i + "." + otherHostname;
|
||||
if (mergedChildren.containsKey(name)) {
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
mergedChildren.put(name, entry.getValue());
|
||||
break;
|
||||
} while (true);
|
||||
} else {
|
||||
mergedChildren.put(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
for (var entry : theirsHeader.getChangelog().getEntriesList()) {
|
||||
newChangelog.merge(UUID.fromString(entry.getHost()), entry.getVersion(), Long::max);
|
||||
}
|
||||
|
||||
boolean wasChanged = oursDir.getChildren().size() != mergedChildren.size()
|
||||
|| oursDir.getMtime() != first.getMtime()
|
||||
|| oursDir.getCtime() != first.getCtime();
|
||||
|
||||
if (m.getBestVersion() > newChangelog.values().stream().reduce(0L, Long::sum))
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("Race when conflict resolving"));
|
||||
|
||||
if (wasChanged) {
|
||||
newChangelog.merge(persistentPeerDataService.getSelfUuid(), 1L, Long::sum);
|
||||
|
||||
for (var child : mergedChildren.values()) {
|
||||
if (!(new HashSet<>(oursDir.getChildren().values()).contains(child))) {
|
||||
jObjectManager.getOrPut(child.toString(), FsNode.class, Optional.of(oursDir.getName()));
|
||||
}
|
||||
}
|
||||
|
||||
oursDir.setMtime(first.getMtime());
|
||||
oursDir.setCtime(first.getCtime());
|
||||
|
||||
oursDir.setChildren(mergedChildren);
|
||||
}
|
||||
|
||||
m.setChangelog(newChangelog);
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,182 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.conflicts;
|
||||
|
||||
import com.usatiuk.dhfs.files.objects.ChunkData;
|
||||
import com.usatiuk.dhfs.files.objects.File;
|
||||
import com.usatiuk.dhfs.files.service.DhfsFileService;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeManager;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObject;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectData;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectManager;
|
||||
import com.usatiuk.dhfs.objects.repository.ConflictResolver;
|
||||
import com.usatiuk.dhfs.objects.repository.ObjectHeader;
|
||||
import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService;
|
||||
import com.usatiuk.kleppmanntree.AlreadyExistsException;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.NotImplementedException;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
@ApplicationScoped
|
||||
public class FileConflictResolver implements ConflictResolver {
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
@Inject
|
||||
DhfsFileService fileService;
|
||||
@Inject
|
||||
JKleppmannTreeManager jKleppmannTreeManager;
|
||||
@Inject
|
||||
JObjectManager jObjectManager;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.use_hash_for_chunks")
|
||||
boolean useHashForChunks;
|
||||
|
||||
// FIXME: There might be a race where node with conflict deletes a file, and we answer that
|
||||
// it can do it as we haven't recorded the received file in the object model yet
|
||||
@Override
|
||||
public void resolve(UUID conflictHost, ObjectHeader theirsHeader, JObjectData theirsData, JObject<?> ours) {
|
||||
var theirsFile = (File) theirsData;
|
||||
if (!theirsFile.getClass().equals(File.class)) {
|
||||
Log.error("Object type mismatch!");
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
var newJFile = ours.runWriteLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, oursFileU, bumpFile, invalidateFile) -> {
|
||||
if (oursFileU == null)
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict but we don't have local copy"));
|
||||
if (!(oursFileU instanceof File oursFile))
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("Bad type for file"));
|
||||
|
||||
File first;
|
||||
File second;
|
||||
UUID otherHostname;
|
||||
|
||||
if (oursFile.getMtime() >= theirsFile.getMtime()) {
|
||||
first = oursFile;
|
||||
second = theirsFile;
|
||||
otherHostname = conflictHost;
|
||||
} else {
|
||||
second = oursFile;
|
||||
first = theirsFile;
|
||||
otherHostname = persistentPeerDataService.getSelfUuid();
|
||||
}
|
||||
|
||||
Map<UUID, Long> newChangelog = new LinkedHashMap<>(m.getChangelog());
|
||||
|
||||
for (var entry : theirsHeader.getChangelog().getEntriesList()) {
|
||||
newChangelog.merge(UUID.fromString(entry.getHost()), entry.getVersion(), Long::max);
|
||||
}
|
||||
|
||||
boolean chunksDiff = !Objects.equals(first.getChunks(), second.getChunks());
|
||||
|
||||
var firstChunksCopy = first.getChunks().entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList();
|
||||
var secondChunksCopy = second.getChunks().entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList();
|
||||
|
||||
boolean wasChanged = oursFile.getMtime() != first.getMtime()
|
||||
|| oursFile.getCtime() != first.getCtime()
|
||||
|| first.isSymlink() != second.isSymlink()
|
||||
|| chunksDiff;
|
||||
|
||||
if (m.getBestVersion() > newChangelog.values().stream().reduce(0L, Long::sum))
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("Race when conflict resolving"));
|
||||
|
||||
m.setChangelog(newChangelog);
|
||||
|
||||
if (wasChanged) {
|
||||
m.getChangelog().merge(persistentPeerDataService.getSelfUuid(), 1L, Long::sum);
|
||||
|
||||
if (useHashForChunks)
|
||||
throw new NotImplementedException();
|
||||
|
||||
HashSet<String> oursBackup = new HashSet<>(oursFile.getChunks().values());
|
||||
oursFile.getChunks().clear();
|
||||
|
||||
for (var e : firstChunksCopy) {
|
||||
oursFile.getChunks().put(e.getLeft(), e.getValue());
|
||||
jObjectManager.getOrPut(e.getValue(), ChunkData.class, Optional.of(oursFile.getName()));
|
||||
}
|
||||
HashSet<String> oursNew = new HashSet<>(oursFile.getChunks().values());
|
||||
|
||||
oursFile.setMtime(first.getMtime());
|
||||
oursFile.setCtime(first.getCtime());
|
||||
|
||||
var newFile = new File(UUID.randomUUID(), second.getMode(), second.isSymlink());
|
||||
|
||||
newFile.setMtime(second.getMtime());
|
||||
newFile.setCtime(second.getCtime());
|
||||
|
||||
for (var e : secondChunksCopy) {
|
||||
newFile.getChunks().put(e.getLeft(), e.getValue());
|
||||
jObjectManager.getOrPut(e.getValue(), ChunkData.class, Optional.of(newFile.getName()));
|
||||
}
|
||||
|
||||
fileService.updateFileSize((JObject<File>) ours);
|
||||
|
||||
var ret = jObjectManager.putLocked(newFile, Optional.empty());
|
||||
|
||||
fileService.updateFileSize((JObject<File>) ret);
|
||||
|
||||
try {
|
||||
for (var cuuid : oursBackup) {
|
||||
if (!oursNew.contains(cuuid))
|
||||
jObjectManager
|
||||
.get(cuuid)
|
||||
.ifPresent(jObject -> jObject.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (mc, d, b, v) -> {
|
||||
mc.removeRef(oursFile.getName());
|
||||
return null;
|
||||
}));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
ret.getMeta().unfreeze();
|
||||
ret.rwUnlock();
|
||||
return null;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
return null;
|
||||
});
|
||||
|
||||
if (newJFile == null) return;
|
||||
boolean locked = true;
|
||||
|
||||
// FIXME: Slow and what happens if a directory is deleted?
|
||||
try {
|
||||
var parent = fileService.inoToParent(ours.getMeta().getName());
|
||||
// FIXME?
|
||||
var tree = jKleppmannTreeManager.getTree("fs");
|
||||
|
||||
var nodeId = tree.getNewNodeId();
|
||||
newJFile.getMeta().addRef(nodeId);
|
||||
newJFile.getMeta().unfreeze();
|
||||
newJFile.rwUnlock();
|
||||
locked = false;
|
||||
|
||||
int i = 0;
|
||||
|
||||
do {
|
||||
try {
|
||||
tree.move(parent.getRight(), new JKleppmannTreeNodeMetaFile(parent.getLeft() + ".fconflict." + persistentPeerDataService.getSelfUuid() + "." + conflictHost + "." + i, newJFile.getMeta().getName()), nodeId);
|
||||
} catch (AlreadyExistsException aex) {
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
} while (true);
|
||||
} catch (Exception e) {
|
||||
Log.error("Error when creating new file for " + ours.getMeta().getName(), e);
|
||||
} finally {
|
||||
if (locked) {
|
||||
newJFile.getMeta().unfreeze();
|
||||
newJFile.getMeta().getReferrersMutable().clear();
|
||||
newJFile.rwUnlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.conflicts;
|
||||
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObject;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectData;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectManager;
|
||||
import com.usatiuk.dhfs.objects.repository.ConflictResolver;
|
||||
import com.usatiuk.dhfs.objects.repository.ObjectHeader;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.UUID;
|
||||
|
||||
@ApplicationScoped
|
||||
public class NoOpConflictResolver implements ConflictResolver {
|
||||
@Override
|
||||
public void resolve(UUID conflictHost, ObjectHeader theirsHeader, JObjectData theirsData, JObject<?> ours) {
|
||||
ours.runWriteLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d, b, i) -> {
|
||||
if (d == null)
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict but we don't have local copy"));
|
||||
|
||||
if (!Objects.equals(theirsData.getClass(), ours.getData().getClass()))
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("Type conflict for object " + m.getName()
|
||||
+ " ours: " + ours.getData().getClass() + " theirs: " + theirsData.getClass()));
|
||||
|
||||
if (!Objects.equals(theirsData, ours.getData()))
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict for immutable object " + m.getName()));
|
||||
|
||||
Map<UUID, Long> newChangelog = new LinkedHashMap<>(m.getChangelog());
|
||||
|
||||
for (var entry : theirsHeader.getChangelog().getEntriesList())
|
||||
newChangelog.merge(UUID.fromString(entry.getHost()), entry.getVersion(), Long::max);
|
||||
|
||||
if (m.getBestVersion() > newChangelog.values().stream().reduce(0L, Long::sum))
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("Race when conflict resolving"));
|
||||
|
||||
m.setChangelog(newChangelog);
|
||||
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,90 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.usatiuk.dhfs.files.conflicts.NoOpConflictResolver;
|
||||
import com.usatiuk.dhfs.objects.jrepository.AssumedUnique;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectData;
|
||||
import com.usatiuk.dhfs.objects.jrepository.Leaf;
|
||||
import com.usatiuk.dhfs.objects.persistence.ChunkDataP;
|
||||
import com.usatiuk.dhfs.objects.repository.ConflictResolver;
|
||||
import net.openhft.hashing.LongTupleHashFunction;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@AssumedUnique
|
||||
@Leaf
|
||||
public class ChunkData extends JObjectData {
|
||||
final ChunkDataP _data;
|
||||
|
||||
public ChunkData(ByteString bytes) {
|
||||
super();
|
||||
_data = ChunkDataP.newBuilder()
|
||||
.setData(bytes)
|
||||
// TODO: There might be (most definitely) a copy there
|
||||
.setName(Arrays.stream(LongTupleHashFunction.xx128().hashBytes(bytes.asReadOnlyByteBuffer()))
|
||||
.mapToObj(Long::toHexString).collect(Collectors.joining()))
|
||||
.build();
|
||||
}
|
||||
|
||||
public ChunkData(ByteString bytes, String name) {
|
||||
super();
|
||||
_data = ChunkDataP.newBuilder()
|
||||
.setData(bytes)
|
||||
.setName(name)
|
||||
.build();
|
||||
}
|
||||
|
||||
public ChunkData(ChunkDataP chunkDataP) {
|
||||
super();
|
||||
_data = chunkDataP;
|
||||
}
|
||||
|
||||
ChunkDataP getData() {
|
||||
return _data;
|
||||
}
|
||||
|
||||
public ByteString getBytes() {
|
||||
return _data.getData();
|
||||
}
|
||||
|
||||
public int getSize() {
|
||||
return _data.getData().size();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
ChunkData chunkData = (ChunkData) o;
|
||||
return Objects.equals(getName(), chunkData.getName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hashCode(getName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return _data.getName();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends ConflictResolver> getConflictResolver() {
|
||||
return NoOpConflictResolver.class;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> extractRefs() {
|
||||
return List.of();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int estimateSize() {
|
||||
return _data.getData().size();
|
||||
}
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.objects.persistence.ChunkDataP;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
@Singleton
|
||||
public class ChunkDataSerializer implements ProtoSerializer<ChunkDataP, ChunkData> {
|
||||
@Override
|
||||
public ChunkData deserialize(ChunkDataP message) {
|
||||
return new ChunkData(message);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ChunkDataP serialize(ChunkData object) {
|
||||
return object.getData();
|
||||
}
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.dhfs.files.conflicts.DirectoryConflictResolver;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectData;
|
||||
import com.usatiuk.dhfs.objects.repository.ConflictResolver;
|
||||
import lombok.Getter;
|
||||
import lombok.Setter;
|
||||
|
||||
import java.io.Serial;
|
||||
import java.util.*;
|
||||
|
||||
public class Directory extends FsNode {
|
||||
@Serial
|
||||
private static final long serialVersionUID = 1;
|
||||
@Getter
|
||||
@Setter
|
||||
private Map<String, UUID> _children = new HashMap<>();
|
||||
|
||||
public Directory(UUID uuid) {
|
||||
super(uuid);
|
||||
}
|
||||
|
||||
public Directory(UUID uuid, long mode) {
|
||||
super(uuid, mode);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends ConflictResolver> getConflictResolver() {
|
||||
return DirectoryConflictResolver.class;
|
||||
}
|
||||
|
||||
public Optional<UUID> getKid(String name) {
|
||||
return Optional.ofNullable(_children.get(name));
|
||||
}
|
||||
|
||||
public boolean removeKid(String name) {
|
||||
return _children.remove(name) != null;
|
||||
}
|
||||
|
||||
public boolean putKid(String name, UUID uuid) {
|
||||
if (_children.containsKey(name)) return false;
|
||||
|
||||
_children.put(name, uuid);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends JObjectData> getRefType() {
|
||||
return FsNode.class;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> extractRefs() {
|
||||
return _children.values().stream().map(UUID::toString).toList();
|
||||
}
|
||||
|
||||
public List<String> getChildrenList() {
|
||||
return _children.keySet().stream().toList();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int estimateSize() {
|
||||
return _children.size() * 192;
|
||||
}
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.objects.persistence.DirectoryP;
|
||||
import com.usatiuk.dhfs.objects.persistence.FsNodeP;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@Singleton
|
||||
public class DirectorySerializer implements ProtoSerializer<DirectoryP, Directory> {
|
||||
@Override
|
||||
public Directory deserialize(DirectoryP message) {
|
||||
var ret = new Directory(UUID.fromString(message.getFsNode().getUuid()));
|
||||
ret.setMtime(message.getFsNode().getMtime());
|
||||
ret.setCtime(message.getFsNode().getCtime());
|
||||
ret.setMode(message.getFsNode().getMode());
|
||||
ret.getChildren().putAll(message.getChildrenMap().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> UUID.fromString(e.getValue()))));
|
||||
return ret;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DirectoryP serialize(Directory object) {
|
||||
return DirectoryP.newBuilder()
|
||||
.setFsNode(FsNodeP.newBuilder()
|
||||
.setCtime(object.getCtime())
|
||||
.setMtime(object.getMtime())
|
||||
.setMode(object.getMode())
|
||||
.setUuid(object.getUuid().toString())
|
||||
.build())
|
||||
.putAllChildren(object.getChildren().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().toString())))
|
||||
.build();
|
||||
}
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.dhfs.files.conflicts.FileConflictResolver;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectData;
|
||||
import com.usatiuk.dhfs.objects.repository.ConflictResolver;
|
||||
import lombok.Getter;
|
||||
import lombok.Setter;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
public class File extends FsNode {
|
||||
@Getter
|
||||
private final NavigableMap<Long, String> _chunks;
|
||||
@Getter
|
||||
private final boolean _symlink;
|
||||
@Getter
|
||||
@Setter
|
||||
private long _size = 0;
|
||||
|
||||
public File(UUID uuid, long mode, boolean symlink) {
|
||||
super(uuid, mode);
|
||||
_symlink = symlink;
|
||||
_chunks = new TreeMap<>();
|
||||
}
|
||||
|
||||
public File(UUID uuid, long mode, boolean symlink, NavigableMap<Long, String> chunks) {
|
||||
super(uuid, mode);
|
||||
_symlink = symlink;
|
||||
_chunks = chunks;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends ConflictResolver> getConflictResolver() {
|
||||
return FileConflictResolver.class;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends JObjectData> getRefType() {
|
||||
return ChunkData.class;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> extractRefs() {
|
||||
return Collections.unmodifiableCollection(_chunks.values());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int estimateSize() {
|
||||
return _chunks.size() * 192;
|
||||
}
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.objects.persistence.FileP;
|
||||
import com.usatiuk.dhfs.objects.persistence.FsNodeP;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
import java.util.TreeMap;
|
||||
import java.util.UUID;
|
||||
|
||||
@Singleton
|
||||
public class FileSerializer implements ProtoSerializer<FileP, File> {
|
||||
@Override
|
||||
public File deserialize(FileP message) {
|
||||
TreeMap<Long, String> chunks = new TreeMap<>();
|
||||
message.getChunksList().forEach(chunk -> {
|
||||
chunks.put(chunk.getStart(), chunk.getId());
|
||||
});
|
||||
var ret = new File(UUID.fromString(message.getFsNode().getUuid()),
|
||||
message.getFsNode().getMode(),
|
||||
message.getSymlink(),
|
||||
chunks
|
||||
);
|
||||
ret.setMtime(message.getFsNode().getMtime());
|
||||
ret.setCtime(message.getFsNode().getCtime());
|
||||
ret.setSize(message.getSize());
|
||||
return ret;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileP serialize(File object) {
|
||||
var builder = FileP.newBuilder()
|
||||
.setFsNode(FsNodeP.newBuilder()
|
||||
.setCtime(object.getCtime())
|
||||
.setMtime(object.getMtime())
|
||||
.setMode(object.getMode())
|
||||
.setUuid(object.getUuid().toString())
|
||||
.build())
|
||||
.setSymlink(object.isSymlink())
|
||||
.setSize(object.getSize());
|
||||
object.getChunks().forEach((s, i) -> {
|
||||
builder.addChunksBuilder().setStart(s).setId(i);
|
||||
});
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.objects;
|
||||
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectData;
|
||||
import lombok.Getter;
|
||||
import lombok.Setter;
|
||||
|
||||
import java.io.Serial;
|
||||
import java.util.UUID;
|
||||
|
||||
public abstract class FsNode extends JObjectData {
|
||||
@Serial
|
||||
private static final long serialVersionUID = 1;
|
||||
|
||||
@Getter
|
||||
final UUID _uuid;
|
||||
@Getter
|
||||
@Setter
|
||||
private long _mode;
|
||||
@Getter
|
||||
@Setter
|
||||
private long _ctime;
|
||||
@Getter
|
||||
@Setter
|
||||
private long _mtime;
|
||||
|
||||
protected FsNode(UUID uuid) {
|
||||
this._uuid = uuid;
|
||||
this._ctime = System.currentTimeMillis();
|
||||
this._mtime = this._ctime;
|
||||
}
|
||||
|
||||
protected FsNode(UUID uuid, long mode) {
|
||||
this._uuid = uuid;
|
||||
this._mode = mode;
|
||||
this._ctime = System.currentTimeMillis();
|
||||
this._mtime = this._ctime;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return _uuid.toString();
|
||||
}
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.service;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import com.usatiuk.dhfs.files.objects.File;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
public interface DhfsFileService {
|
||||
Optional<String> open(String name);
|
||||
|
||||
Optional<String> create(String name, long mode);
|
||||
|
||||
Pair<String, String> inoToParent(String ino);
|
||||
|
||||
void mkdir(String name, long mode);
|
||||
|
||||
Optional<GetattrRes> getattr(String name);
|
||||
|
||||
Boolean chmod(String name, long mode);
|
||||
|
||||
void unlink(String name);
|
||||
|
||||
Boolean rename(String from, String to);
|
||||
|
||||
Boolean setTimes(String fileUuid, long atimeMs, long mtimeMs);
|
||||
|
||||
Iterable<String> readDir(String name);
|
||||
|
||||
void updateFileSize(JObject<File> file);
|
||||
|
||||
Long size(String f);
|
||||
|
||||
Optional<ByteString> read(String fileUuid, long offset, int length);
|
||||
|
||||
Long write(String fileUuid, long offset, ByteString data);
|
||||
|
||||
default Long write(String fileUuid, long offset, byte[] data) {
|
||||
return write(fileUuid, offset, UnsafeByteOperations.unsafeWrap(data));
|
||||
}
|
||||
|
||||
Boolean truncate(String fileUuid, long length);
|
||||
|
||||
String readlink(String uuid);
|
||||
|
||||
ByteString readlinkBS(String uuid);
|
||||
|
||||
String symlink(String oldpath, String newpath);
|
||||
}
|
||||
@@ -1,814 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.service;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import com.usatiuk.dhfs.files.objects.ChunkData;
|
||||
import com.usatiuk.dhfs.files.objects.File;
|
||||
import com.usatiuk.dhfs.files.objects.FsNode;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeManager;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaDirectory;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JMutator;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObject;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectManager;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager;
|
||||
import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService;
|
||||
import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Path;
|
||||
import java.util.*;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
@ApplicationScoped
|
||||
public class DhfsFileServiceImpl implements DhfsFileService {
|
||||
@Inject
|
||||
JObjectManager jObjectManager;
|
||||
@Inject
|
||||
JObjectTxManager jObjectTxManager;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.target_chunk_size")
|
||||
int targetChunkSize;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.write_merge_threshold")
|
||||
float writeMergeThreshold;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.write_merge_max_chunk_to_take")
|
||||
float writeMergeMaxChunkToTake;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.write_merge_limit")
|
||||
float writeMergeLimit;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.write_last_chunk_limit")
|
||||
float writeLastChunkLimit;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.use_hash_for_chunks")
|
||||
boolean useHashForChunks;
|
||||
|
||||
@ConfigProperty(name = "dhfs.files.allow_recursive_delete")
|
||||
boolean allowRecursiveDelete;
|
||||
|
||||
@ConfigProperty(name = "dhfs.objects.ref_verification")
|
||||
boolean refVerification;
|
||||
|
||||
@ConfigProperty(name = "dhfs.objects.write_log")
|
||||
boolean writeLogging;
|
||||
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
@Inject
|
||||
JKleppmannTreeManager jKleppmannTreeManager;
|
||||
|
||||
private JKleppmannTreeManager.JKleppmannTree _tree;
|
||||
|
||||
private ChunkData createChunk(ByteString bytes) {
|
||||
if (useHashForChunks) {
|
||||
return new ChunkData(bytes);
|
||||
} else {
|
||||
return new ChunkData(bytes, persistentPeerDataService.getUniqueId());
|
||||
}
|
||||
}
|
||||
|
||||
void init(@Observes @Priority(500) StartupEvent event) {
|
||||
Log.info("Initializing file service");
|
||||
_tree = jKleppmannTreeManager.getTree("fs");
|
||||
}
|
||||
|
||||
private JObject<JKleppmannTreeNode> getDirEntry(String name) {
|
||||
var res = _tree.traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
|
||||
var ret = jObjectManager.get(res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
|
||||
if (!ret.getMeta().getKnownClass().equals(JKleppmannTreeNode.class))
|
||||
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not jObject: " + name));
|
||||
return (JObject<JKleppmannTreeNode>) ret;
|
||||
}
|
||||
|
||||
private Optional<JObject<JKleppmannTreeNode>> getDirEntryOpt(String name) {
|
||||
var res = _tree.traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
|
||||
if (res == null) return Optional.empty();
|
||||
var ret = jObjectManager.get(res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
|
||||
if (!ret.getMeta().getKnownClass().equals(JKleppmannTreeNode.class))
|
||||
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not jObject: " + name));
|
||||
return Optional.of((JObject<JKleppmannTreeNode>) ret);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<GetattrRes> getattr(String uuid) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var ref = jObjectManager.get(uuid);
|
||||
if (ref.isEmpty()) return Optional.empty();
|
||||
return ref.get().runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> {
|
||||
GetattrRes ret;
|
||||
if (d instanceof File f) {
|
||||
ret = new GetattrRes(f.getMtime(), f.getCtime(), f.getMode(), f.isSymlink() ? GetattrType.SYMLINK : GetattrType.FILE);
|
||||
} else if (d instanceof JKleppmannTreeNode) {
|
||||
ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY);
|
||||
} else {
|
||||
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + m.getName()));
|
||||
}
|
||||
return Optional.of(ret);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<String> open(String name) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
try {
|
||||
var ret = getDirEntry(name);
|
||||
return Optional.of(ret.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
|
||||
if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f) return f.getFileIno();
|
||||
else if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f) return m.getName();
|
||||
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + m.getName()));
|
||||
}));
|
||||
} catch (StatusRuntimeException e) {
|
||||
if (e.getStatus().getCode() == Status.Code.NOT_FOUND) {
|
||||
return Optional.empty();
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void ensureDir(JObject<JKleppmannTreeNode> entry) {
|
||||
entry.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> {
|
||||
if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f)
|
||||
throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription(m.getName() + " is a file, not directory"));
|
||||
else if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f) return null;
|
||||
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + m.getName()));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<String> create(String name, long mode) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
Path path = Path.of(name);
|
||||
var parent = getDirEntry(path.getParent().toString());
|
||||
|
||||
ensureDir(parent);
|
||||
|
||||
String fname = path.getFileName().toString();
|
||||
|
||||
var fuuid = UUID.randomUUID();
|
||||
Log.debug("Creating file " + fuuid);
|
||||
File f = new File(fuuid, mode, false);
|
||||
|
||||
var newNodeId = _tree.getNewNodeId();
|
||||
var fobj = jObjectManager.putLocked(f, Optional.of(newNodeId));
|
||||
try {
|
||||
_tree.move(parent.getMeta().getName(), new JKleppmannTreeNodeMetaFile(fname, f.getName()), newNodeId);
|
||||
} catch (Exception e) {
|
||||
fobj.getMeta().removeRef(newNodeId);
|
||||
throw e;
|
||||
} finally {
|
||||
fobj.rwUnlock();
|
||||
}
|
||||
return Optional.of(f.getName());
|
||||
});
|
||||
}
|
||||
|
||||
//FIXME: Slow..
|
||||
@Override
|
||||
public Pair<String, String> inoToParent(String ino) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
return _tree.findParent(w -> {
|
||||
if (w.getNode().getMeta() instanceof JKleppmannTreeNodeMetaFile f)
|
||||
if (f.getFileIno().equals(ino))
|
||||
return true;
|
||||
return false;
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void mkdir(String name, long mode) {
|
||||
jObjectTxManager.executeTx(() -> {
|
||||
Path path = Path.of(name);
|
||||
var parent = getDirEntry(path.getParent().toString());
|
||||
ensureDir(parent);
|
||||
|
||||
String dname = path.getFileName().toString();
|
||||
|
||||
Log.debug("Creating directory " + name);
|
||||
|
||||
_tree.move(parent.getMeta().getName(), new JKleppmannTreeNodeMetaDirectory(dname), _tree.getNewNodeId());
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unlink(String name) {
|
||||
jObjectTxManager.executeTx(() -> {
|
||||
var node = getDirEntryOpt(name).orElse(null);
|
||||
JKleppmannTreeNodeMeta meta = node.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> {
|
||||
if (d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory f)
|
||||
if (!d.getNode().getChildren().isEmpty()) throw new DirectoryNotEmptyException();
|
||||
return d.getNode().getMeta();
|
||||
});
|
||||
|
||||
_tree.trash(meta, node.getMeta().getName());
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean rename(String from, String to) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var node = getDirEntry(from);
|
||||
JKleppmannTreeNodeMeta meta = node.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> d.getNode().getMeta());
|
||||
|
||||
var toPath = Path.of(to);
|
||||
var toDentry = getDirEntry(toPath.getParent().toString());
|
||||
ensureDir(toDentry);
|
||||
|
||||
_tree.move(toDentry.getMeta().getName(), meta.withName(toPath.getFileName().toString()), node.getMeta().getName());
|
||||
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean chmod(String uuid, long mode) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var dent = jObjectManager.get(uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
|
||||
|
||||
dent.runWriteLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d, bump, i) -> {
|
||||
if (d instanceof JKleppmannTreeNode) {
|
||||
return null;//FIXME:?
|
||||
} else if (d instanceof File f) {
|
||||
bump.apply();
|
||||
f.setMtime(System.currentTimeMillis());
|
||||
f.setMode(mode);
|
||||
} else {
|
||||
throw new IllegalArgumentException(uuid + " is not a file");
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterable<String> readDir(String name) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var found = getDirEntry(name);
|
||||
|
||||
return found.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> {
|
||||
if (!(d instanceof JKleppmannTreeNode) || !(d.getNode().getMeta() instanceof JKleppmannTreeNodeMetaDirectory)) {
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
|
||||
}
|
||||
return new ArrayList<>(d.getNode().getChildren().keySet());
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<ByteString> read(String fileUuid, long offset, int length) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
if (length < 0)
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
|
||||
if (offset < 0)
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
|
||||
|
||||
var fileOpt = jObjectManager.get(fileUuid);
|
||||
if (fileOpt.isEmpty()) {
|
||||
Log.error("File not found when trying to read: " + fileUuid);
|
||||
return Optional.empty();
|
||||
}
|
||||
var file = fileOpt.get();
|
||||
|
||||
try {
|
||||
return file.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (md, fileData) -> {
|
||||
if (!(fileData instanceof File)) {
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
|
||||
}
|
||||
var chunksAll = ((File) fileData).getChunks();
|
||||
if (chunksAll.isEmpty()) {
|
||||
return Optional.of(ByteString.empty());
|
||||
}
|
||||
var chunksList = chunksAll.tailMap(chunksAll.floorKey(offset)).entrySet();
|
||||
|
||||
if (chunksList.isEmpty()) {
|
||||
return Optional.of(ByteString.empty());
|
||||
}
|
||||
|
||||
var chunks = chunksList.iterator();
|
||||
ByteString buf = ByteString.empty();
|
||||
|
||||
long curPos = offset;
|
||||
var chunk = chunks.next();
|
||||
|
||||
while (curPos < offset + length) {
|
||||
var chunkPos = chunk.getKey();
|
||||
|
||||
long offInChunk = curPos - chunkPos;
|
||||
|
||||
long toReadInChunk = (offset + length) - curPos;
|
||||
|
||||
var chunkBytes = readChunk(chunk.getValue());
|
||||
|
||||
long readableLen = chunkBytes.size() - offInChunk;
|
||||
|
||||
var toReadReally = Math.min(readableLen, toReadInChunk);
|
||||
|
||||
if (toReadReally < 0) break;
|
||||
|
||||
buf = buf.concat(chunkBytes.substring((int) offInChunk, (int) (offInChunk + toReadReally)));
|
||||
|
||||
curPos += toReadReally;
|
||||
|
||||
if (readableLen > toReadInChunk)
|
||||
break;
|
||||
|
||||
if (!chunks.hasNext()) break;
|
||||
|
||||
chunk = chunks.next();
|
||||
}
|
||||
|
||||
// FIXME:
|
||||
return Optional.of(buf);
|
||||
});
|
||||
} catch (Exception e) {
|
||||
Log.error("Error reading file: " + fileUuid, e);
|
||||
return Optional.empty();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private ByteString readChunk(String uuid) {
|
||||
var chunkRead = jObjectManager.get(uuid).orElse(null);
|
||||
|
||||
if (chunkRead == null) {
|
||||
Log.error("Chunk requested not found: " + uuid);
|
||||
throw new StatusRuntimeException(Status.NOT_FOUND);
|
||||
}
|
||||
|
||||
return chunkRead.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, d) -> {
|
||||
if (!(d instanceof ChunkData cd))
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
|
||||
return cd.getBytes();
|
||||
});
|
||||
}
|
||||
|
||||
private int getChunkSize(String uuid) {
|
||||
return readChunk(uuid).size();
|
||||
}
|
||||
|
||||
private void cleanupChunks(File f, Collection<String> uuids) {
|
||||
// FIXME:
|
||||
var inFile = useHashForChunks ? new HashSet<>(f.getChunks().values()) : Collections.emptySet();
|
||||
for (var cuuid : uuids) {
|
||||
try {
|
||||
if (inFile.contains(cuuid)) continue;
|
||||
jObjectManager.get(cuuid)
|
||||
.ifPresent(jObject -> jObject.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION,
|
||||
(m, d, b, v) -> {
|
||||
m.removeRef(f.getName());
|
||||
return null;
|
||||
}));
|
||||
} catch (Exception e) {
|
||||
Log.error("Error when cleaning chunk " + cuuid, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long write(String fileUuid, long offset, ByteString data) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
if (offset < 0)
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
|
||||
|
||||
// FIXME:
|
||||
var file = (JObject<File>) jObjectManager.get(fileUuid).orElse(null);
|
||||
if (file == null) {
|
||||
Log.error("File not found when trying to read: " + fileUuid);
|
||||
return -1L;
|
||||
}
|
||||
|
||||
file.rwLockNoCopy();
|
||||
try {
|
||||
file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE);
|
||||
// FIXME:
|
||||
if (!(file.getData() instanceof File))
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
|
||||
|
||||
if (writeLogging) {
|
||||
Log.info("Writing to file: " + file.getMeta().getName() + " size=" + size(fileUuid) + " "
|
||||
+ offset + " " + data.size());
|
||||
}
|
||||
|
||||
if (size(fileUuid) < offset)
|
||||
truncate(fileUuid, offset);
|
||||
|
||||
// FIXME: Some kind of immutable interface?
|
||||
var chunksAll = Collections.unmodifiableNavigableMap(file.getData().getChunks());
|
||||
var first = chunksAll.floorEntry(offset);
|
||||
var last = chunksAll.lowerEntry(offset + data.size());
|
||||
NavigableMap<Long, String> removedChunks = new TreeMap<>();
|
||||
|
||||
long start = 0;
|
||||
|
||||
NavigableMap<Long, String> beforeFirst = first != null ? chunksAll.headMap(first.getKey(), false) : Collections.emptyNavigableMap();
|
||||
NavigableMap<Long, String> afterLast = last != null ? chunksAll.tailMap(last.getKey(), false) : Collections.emptyNavigableMap();
|
||||
|
||||
if (first != null && (getChunkSize(first.getValue()) + first.getKey() <= offset)) {
|
||||
beforeFirst = chunksAll;
|
||||
afterLast = Collections.emptyNavigableMap();
|
||||
first = null;
|
||||
last = null;
|
||||
start = offset;
|
||||
} else if (!chunksAll.isEmpty()) {
|
||||
var between = chunksAll.subMap(first.getKey(), true, last.getKey(), true);
|
||||
removedChunks.putAll(between);
|
||||
start = first.getKey();
|
||||
}
|
||||
|
||||
ByteString pendingWrites = ByteString.empty();
|
||||
|
||||
if (first != null && first.getKey() < offset) {
|
||||
var chunkBytes = readChunk(first.getValue());
|
||||
pendingWrites = pendingWrites.concat(chunkBytes.substring(0, (int) (offset - first.getKey())));
|
||||
}
|
||||
pendingWrites = pendingWrites.concat(data);
|
||||
|
||||
if (last != null) {
|
||||
var lchunkBytes = readChunk(last.getValue());
|
||||
if (last.getKey() + lchunkBytes.size() > offset + data.size()) {
|
||||
var startInFile = offset + data.size();
|
||||
var startInChunk = startInFile - last.getKey();
|
||||
pendingWrites = pendingWrites.concat(lchunkBytes.substring((int) startInChunk, lchunkBytes.size()));
|
||||
}
|
||||
}
|
||||
|
||||
int combinedSize = pendingWrites.size();
|
||||
|
||||
if (targetChunkSize > 0) {
|
||||
if (combinedSize < (targetChunkSize * writeMergeThreshold)) {
|
||||
boolean leftDone = false;
|
||||
boolean rightDone = false;
|
||||
while (!leftDone && !rightDone) {
|
||||
if (beforeFirst.isEmpty()) leftDone = true;
|
||||
if (!beforeFirst.isEmpty() || !leftDone) {
|
||||
var takeLeft = beforeFirst.lastEntry();
|
||||
|
||||
var cuuid = takeLeft.getValue();
|
||||
|
||||
if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) {
|
||||
leftDone = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) {
|
||||
leftDone = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
// FIXME: (and test this)
|
||||
beforeFirst = beforeFirst.headMap(takeLeft.getKey(), false);
|
||||
start = takeLeft.getKey();
|
||||
pendingWrites = readChunk(cuuid).concat(pendingWrites);
|
||||
combinedSize += getChunkSize(cuuid);
|
||||
removedChunks.put(takeLeft.getKey(), takeLeft.getValue());
|
||||
}
|
||||
if (afterLast.isEmpty()) rightDone = true;
|
||||
if (!afterLast.isEmpty() && !rightDone) {
|
||||
var takeRight = afterLast.firstEntry();
|
||||
|
||||
var cuuid = takeRight.getValue();
|
||||
|
||||
if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) {
|
||||
rightDone = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) {
|
||||
rightDone = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
// FIXME: (and test this)
|
||||
afterLast = afterLast.tailMap(takeRight.getKey(), false);
|
||||
pendingWrites = pendingWrites.concat(readChunk(cuuid));
|
||||
combinedSize += getChunkSize(cuuid);
|
||||
removedChunks.put(takeRight.getKey(), takeRight.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NavigableMap<Long, String> newChunks = new TreeMap<>();
|
||||
|
||||
{
|
||||
int cur = 0;
|
||||
while (cur < combinedSize) {
|
||||
int end;
|
||||
|
||||
if (targetChunkSize <= 0)
|
||||
end = combinedSize;
|
||||
else {
|
||||
if ((combinedSize - cur) > (targetChunkSize * writeLastChunkLimit)) {
|
||||
end = Math.min(cur + targetChunkSize, combinedSize);
|
||||
} else {
|
||||
end = combinedSize;
|
||||
}
|
||||
}
|
||||
|
||||
var thisChunk = pendingWrites.substring(cur, end);
|
||||
|
||||
ChunkData newChunkData = createChunk(thisChunk);
|
||||
//FIXME:
|
||||
jObjectManager.put(newChunkData, Optional.of(file.getMeta().getName()));
|
||||
newChunks.put(start, newChunkData.getName());
|
||||
|
||||
start += thisChunk.size();
|
||||
cur = end;
|
||||
}
|
||||
}
|
||||
|
||||
file.mutate(new FileChunkMutator(file.getData().getMtime(), System.currentTimeMillis(), removedChunks, newChunks));
|
||||
|
||||
cleanupChunks(file.getData(), removedChunks.values());
|
||||
updateFileSize((JObject<File>) file);
|
||||
} finally {
|
||||
file.rwUnlock();
|
||||
}
|
||||
|
||||
return (long) data.size();
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean truncate(String fileUuid, long length) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
if (length < 0)
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
|
||||
|
||||
var file = (JObject<File>) jObjectManager.get(fileUuid).orElse(null);
|
||||
if (file == null) {
|
||||
Log.error("File not found when trying to read: " + fileUuid);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (length == 0) {
|
||||
file.rwLockNoCopy();
|
||||
try {
|
||||
file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE);
|
||||
|
||||
var oldChunks = Collections.unmodifiableNavigableMap(new TreeMap<>(file.getData().getChunks()));
|
||||
|
||||
file.mutate(new JMutator<>() {
|
||||
long oldMtime;
|
||||
|
||||
@Override
|
||||
public boolean mutate(File object) {
|
||||
oldMtime = object.getMtime();
|
||||
object.getChunks().clear();
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void revert(File object) {
|
||||
object.setMtime(oldMtime);
|
||||
object.getChunks().putAll(oldChunks);
|
||||
}
|
||||
});
|
||||
cleanupChunks(file.getData(), oldChunks.values());
|
||||
updateFileSize((JObject<File>) file);
|
||||
} catch (Exception e) {
|
||||
Log.error("Error writing file chunks: " + fileUuid, e);
|
||||
return false;
|
||||
} finally {
|
||||
file.rwUnlock();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
file.rwLockNoCopy();
|
||||
try {
|
||||
file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE);
|
||||
|
||||
var curSize = size(fileUuid);
|
||||
if (curSize == length) return true;
|
||||
|
||||
var chunksAll = Collections.unmodifiableNavigableMap(file.getData().getChunks());
|
||||
NavigableMap<Long, String> removedChunks = new TreeMap<>();
|
||||
NavigableMap<Long, String> newChunks = new TreeMap<>();
|
||||
|
||||
if (curSize < length) {
|
||||
long combinedSize = (length - curSize);
|
||||
|
||||
long start = curSize;
|
||||
|
||||
// Hack
|
||||
HashMap<Long, ByteString> zeroCache = new HashMap<>();
|
||||
|
||||
{
|
||||
long cur = 0;
|
||||
while (cur < combinedSize) {
|
||||
long end;
|
||||
|
||||
if (targetChunkSize <= 0)
|
||||
end = combinedSize;
|
||||
else {
|
||||
if ((combinedSize - cur) > (targetChunkSize * 1.5)) {
|
||||
end = cur + targetChunkSize;
|
||||
} else {
|
||||
end = combinedSize;
|
||||
}
|
||||
}
|
||||
|
||||
if (!zeroCache.containsKey(end - cur))
|
||||
zeroCache.put(end - cur, UnsafeByteOperations.unsafeWrap(new byte[Math.toIntExact(end - cur)]));
|
||||
|
||||
ChunkData newChunkData = createChunk(zeroCache.get(end - cur));
|
||||
//FIXME:
|
||||
jObjectManager.put(newChunkData, Optional.of(file.getMeta().getName()));
|
||||
newChunks.put(start, newChunkData.getName());
|
||||
|
||||
start += newChunkData.getSize();
|
||||
cur = end;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
var tail = chunksAll.lowerEntry(length);
|
||||
var afterTail = chunksAll.tailMap(tail.getKey(), false);
|
||||
|
||||
removedChunks.put(tail.getKey(), tail.getValue());
|
||||
removedChunks.putAll(afterTail);
|
||||
|
||||
var tailBytes = readChunk(tail.getValue());
|
||||
var newChunk = tailBytes.substring(0, (int) (length - tail.getKey()));
|
||||
|
||||
ChunkData newChunkData = createChunk(newChunk);
|
||||
//FIXME:
|
||||
jObjectManager.put(newChunkData, Optional.of(file.getMeta().getName()));
|
||||
newChunks.put(tail.getKey(), newChunkData.getName());
|
||||
}
|
||||
|
||||
file.mutate(new FileChunkMutator(file.getData().getMtime(), System.currentTimeMillis(), removedChunks, newChunks));
|
||||
|
||||
cleanupChunks(file.getData(), removedChunks.values());
|
||||
updateFileSize((JObject<File>) file);
|
||||
return true;
|
||||
} catch (Exception e) {
|
||||
Log.error("Error reading file: " + fileUuid, e);
|
||||
return false;
|
||||
} finally {
|
||||
file.rwUnlock();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public String readlink(String uuid) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
return readlinkBS(uuid).toStringUtf8();
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public ByteString readlinkBS(String uuid) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var fileOpt = jObjectManager.get(uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid)));
|
||||
|
||||
return fileOpt.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (md, fileData) -> {
|
||||
if (!(fileData instanceof File)) {
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
|
||||
}
|
||||
|
||||
if (!((File) fileData).isSymlink())
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Not a symlink: " + uuid));
|
||||
|
||||
return read(uuid, 0, Math.toIntExact(size(uuid))).get();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public String symlink(String oldpath, String newpath) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
Path path = Path.of(newpath);
|
||||
var parent = getDirEntry(path.getParent().toString());
|
||||
|
||||
ensureDir(parent);
|
||||
|
||||
String fname = path.getFileName().toString();
|
||||
|
||||
var fuuid = UUID.randomUUID();
|
||||
Log.debug("Creating file " + fuuid);
|
||||
|
||||
File f = new File(fuuid, 0, true);
|
||||
var newNodeId = _tree.getNewNodeId();
|
||||
ChunkData newChunkData = createChunk(UnsafeByteOperations.unsafeWrap(oldpath.getBytes(StandardCharsets.UTF_8)));
|
||||
|
||||
f.getChunks().put(0L, newChunkData.getName());
|
||||
|
||||
jObjectManager.put(newChunkData, Optional.of(f.getName()));
|
||||
var newFile = jObjectManager.putLocked(f, Optional.of(newNodeId));
|
||||
try {
|
||||
updateFileSize(newFile);
|
||||
} finally {
|
||||
newFile.rwUnlock();
|
||||
}
|
||||
|
||||
_tree.move(parent.getMeta().getName(), new JKleppmannTreeNodeMetaFile(fname, f.getName()), newNodeId);
|
||||
return f.getName();
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean setTimes(String fileUuid, long atimeMs, long mtimeMs) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var file = jObjectManager.get(fileUuid).orElseThrow(
|
||||
() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription(
|
||||
"File not found for setTimes: " + fileUuid))
|
||||
);
|
||||
|
||||
file.runWriteLocked(JObjectManager.ResolutionStrategy.REMOTE, (m, fileData, bump, i) -> {
|
||||
if (fileData instanceof JKleppmannTreeNode) return null; // FIXME:
|
||||
if (!(fileData instanceof FsNode fd))
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
|
||||
|
||||
bump.apply();
|
||||
fd.setMtime(mtimeMs);
|
||||
return null;
|
||||
});
|
||||
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateFileSize(JObject<File> file) {
|
||||
jObjectTxManager.executeTx(() -> {
|
||||
file.rwLockNoCopy();
|
||||
try {
|
||||
file.tryResolve(JObjectManager.ResolutionStrategy.REMOTE);
|
||||
if (!(file.getData() instanceof File fd))
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
|
||||
|
||||
long realSize = 0;
|
||||
|
||||
var last = fd.getChunks().lastEntry();
|
||||
if (last != null) {
|
||||
var lastSize = getChunkSize(last.getValue());
|
||||
realSize = last.getKey() + lastSize;
|
||||
}
|
||||
|
||||
if (realSize != fd.getSize()) {
|
||||
long finalRealSize = realSize;
|
||||
file.mutate(new JMutator<File>() {
|
||||
long oldSize;
|
||||
|
||||
@Override
|
||||
public boolean mutate(File object) {
|
||||
oldSize = object.getSize();
|
||||
object.setSize(finalRealSize);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void revert(File object) {
|
||||
object.setSize(oldSize);
|
||||
}
|
||||
});
|
||||
}
|
||||
} catch (Exception e) {
|
||||
Log.error("Error updating file size: " + file.getMeta().getName(), e);
|
||||
} finally {
|
||||
file.rwUnlock();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long size(String uuid) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var read = jObjectManager.get(uuid)
|
||||
.orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND));
|
||||
|
||||
try {
|
||||
return read.runReadLocked(JObjectManager.ResolutionStrategy.REMOTE, (fsNodeData, fileData) -> {
|
||||
if (!(fileData instanceof File fd))
|
||||
throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
|
||||
|
||||
return fd.getSize();
|
||||
});
|
||||
} catch (Exception e) {
|
||||
Log.error("Error reading file: " + uuid, e);
|
||||
return -1L;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.service;
|
||||
|
||||
public class DirectoryNotEmptyException extends RuntimeException {
|
||||
@Override
|
||||
public synchronized Throwable fillInStackTrace() {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.service;
|
||||
|
||||
import com.usatiuk.dhfs.files.objects.File;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JMutator;
|
||||
|
||||
import java.util.NavigableMap;
|
||||
|
||||
public class FileChunkMutator implements JMutator<File> {
|
||||
private final long _oldTime;
|
||||
private final long _newTime;
|
||||
private final NavigableMap<Long, String> _removedChunks;
|
||||
private final NavigableMap<Long, String> _newChunks;
|
||||
|
||||
public FileChunkMutator(long oldTime, long newTime, NavigableMap<Long, String> removedChunks, NavigableMap<Long, String> newChunks) {
|
||||
_oldTime = oldTime;
|
||||
_newTime = newTime;
|
||||
_removedChunks = removedChunks;
|
||||
_newChunks = newChunks;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean mutate(File object) {
|
||||
object.setMtime(_newTime);
|
||||
object.getChunks().keySet().removeAll(_removedChunks.keySet());
|
||||
object.getChunks().putAll(_newChunks);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void revert(File object) {
|
||||
object.setMtime(_oldTime);
|
||||
object.getChunks().keySet().removeAll(_newChunks.keySet());
|
||||
object.getChunks().putAll(_removedChunks);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.service;
|
||||
|
||||
public record GetattrRes(long mtime, long ctime, long mode, GetattrType type) {
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package com.usatiuk.dhfs.files.service;
|
||||
|
||||
public enum GetattrType {
|
||||
FILE,
|
||||
DIRECTORY,
|
||||
SYMLINK
|
||||
}
|
||||
@@ -1,391 +0,0 @@
|
||||
package com.usatiuk.dhfs.fuse;
|
||||
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import com.sun.security.auth.module.UnixSystem;
|
||||
import com.usatiuk.dhfs.files.service.DhfsFileService;
|
||||
import com.usatiuk.dhfs.files.service.DirectoryNotEmptyException;
|
||||
import com.usatiuk.dhfs.files.service.GetattrRes;
|
||||
import com.usatiuk.dhfs.objects.repository.persistence.ObjectPersistentStore;
|
||||
import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer;
|
||||
import com.usatiuk.kleppmanntree.AlreadyExistsException;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.inject.Inject;
|
||||
import jnr.ffi.Pointer;
|
||||
import org.apache.commons.lang3.SystemUtils;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
import ru.serce.jnrfuse.ErrorCodes;
|
||||
import ru.serce.jnrfuse.FuseFillDir;
|
||||
import ru.serce.jnrfuse.FuseStubFS;
|
||||
import ru.serce.jnrfuse.struct.FileStat;
|
||||
import ru.serce.jnrfuse.struct.FuseFileInfo;
|
||||
import ru.serce.jnrfuse.struct.Statvfs;
|
||||
import ru.serce.jnrfuse.struct.Timespec;
|
||||
|
||||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Optional;
|
||||
|
||||
import static jnr.posix.FileStat.*;
|
||||
|
||||
@ApplicationScoped
|
||||
public class DhfsFuse extends FuseStubFS {
|
||||
private static final int blksize = 1048576;
|
||||
private static final int iosize = 1048576;
|
||||
@Inject
|
||||
ObjectPersistentStore persistentStore; // FIXME?
|
||||
@ConfigProperty(name = "dhfs.fuse.root")
|
||||
String root;
|
||||
@ConfigProperty(name = "dhfs.fuse.enabled")
|
||||
boolean enabled;
|
||||
@ConfigProperty(name = "dhfs.fuse.debug")
|
||||
Boolean debug;
|
||||
@ConfigProperty(name = "dhfs.files.target_chunk_size")
|
||||
int targetChunkSize;
|
||||
@Inject
|
||||
JnrPtrByteOutputAccessors jnrPtrByteOutputAccessors;
|
||||
@Inject
|
||||
DhfsFileService fileService;
|
||||
|
||||
void init(@Observes @Priority(100000) StartupEvent event) {
|
||||
if (!enabled) return;
|
||||
Paths.get(root).toFile().mkdirs();
|
||||
Log.info("Mounting with root " + root);
|
||||
|
||||
var uid = new UnixSystem().getUid();
|
||||
var gid = new UnixSystem().getGid();
|
||||
|
||||
var opts = new ArrayList<String>();
|
||||
|
||||
// Assuming macFuse
|
||||
if (SystemUtils.IS_OS_MAC) {
|
||||
opts.add("-o");
|
||||
opts.add("iosize=" + iosize);
|
||||
} else if (SystemUtils.IS_OS_LINUX) {
|
||||
// FIXME: There's something else missing: the writes still seem to be 32k max
|
||||
// opts.add("-o");
|
||||
// opts.add("large_read");
|
||||
opts.add("-o");
|
||||
opts.add("big_writes");
|
||||
opts.add("-o");
|
||||
opts.add("max_read=" + iosize);
|
||||
opts.add("-o");
|
||||
opts.add("max_write=" + iosize);
|
||||
}
|
||||
opts.add("-o");
|
||||
opts.add("auto_cache");
|
||||
opts.add("-o");
|
||||
opts.add("uid=" + uid);
|
||||
opts.add("-o");
|
||||
opts.add("gid=" + gid);
|
||||
|
||||
mount(Paths.get(root), false, debug, opts.toArray(String[]::new));
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(1) ShutdownEvent event) {
|
||||
if (!enabled) return;
|
||||
Log.info("Unmounting");
|
||||
umount();
|
||||
Log.info("Unmounted");
|
||||
}
|
||||
|
||||
@Override
|
||||
public int statfs(String path, Statvfs stbuf) {
|
||||
try {
|
||||
stbuf.f_frsize.set(blksize);
|
||||
stbuf.f_bsize.set(blksize);
|
||||
stbuf.f_blocks.set(persistentStore.getTotalSpace() / blksize); // total data blocks in file system
|
||||
stbuf.f_bfree.set(persistentStore.getFreeSpace() / blksize); // free blocks in fs
|
||||
stbuf.f_bavail.set(persistentStore.getUsableSpace() / blksize); // avail blocks in fs
|
||||
stbuf.f_files.set(1000); //FIXME:
|
||||
stbuf.f_ffree.set(Integer.MAX_VALUE - 2000); //FIXME:
|
||||
stbuf.f_favail.set(Integer.MAX_VALUE - 2000); //FIXME:
|
||||
stbuf.f_namemax.set(2048);
|
||||
return super.statfs(path, stbuf);
|
||||
} catch (Exception e) {
|
||||
Log.error("When statfs " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getattr(String path, FileStat stat) {
|
||||
try {
|
||||
var fileOpt = fileService.open(path);
|
||||
if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
|
||||
var uuid = fileOpt.get();
|
||||
Optional<GetattrRes> found = fileService.getattr(uuid);
|
||||
if (found.isEmpty()) {
|
||||
return -ErrorCodes.ENOENT();
|
||||
}
|
||||
switch (found.get().type()) {
|
||||
case FILE -> {
|
||||
stat.st_mode.set(S_IFREG | found.get().mode());
|
||||
stat.st_nlink.set(1);
|
||||
stat.st_size.set(fileService.size(uuid));
|
||||
}
|
||||
case DIRECTORY -> {
|
||||
stat.st_mode.set(S_IFDIR | found.get().mode());
|
||||
stat.st_nlink.set(2);
|
||||
}
|
||||
case SYMLINK -> {
|
||||
stat.st_mode.set(S_IFLNK | 0777);
|
||||
stat.st_nlink.set(1);
|
||||
stat.st_size.set(fileService.size(uuid));
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: Race?
|
||||
stat.st_ctim.tv_sec.set(found.get().ctime() / 1000);
|
||||
stat.st_ctim.tv_nsec.set((found.get().ctime() % 1000) * 1000);
|
||||
stat.st_mtim.tv_sec.set(found.get().mtime() / 1000);
|
||||
stat.st_mtim.tv_nsec.set((found.get().mtime() % 1000) * 1000);
|
||||
stat.st_atim.tv_sec.set(found.get().mtime() / 1000);
|
||||
stat.st_atim.tv_nsec.set((found.get().mtime() % 1000) * 1000);
|
||||
stat.st_blksize.set(blksize);
|
||||
} catch (Exception e) {
|
||||
Log.error("When getattr " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
} catch (Throwable e) {
|
||||
Log.error("When getattr " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int utimens(String path, Timespec[] timespec) {
|
||||
try {
|
||||
var fileOpt = fileService.open(path);
|
||||
if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
|
||||
var file = fileOpt.get();
|
||||
var res = fileService.setTimes(file,
|
||||
timespec[0].tv_sec.get() * 1000,
|
||||
timespec[1].tv_sec.get() * 1000);
|
||||
if (!res) return -ErrorCodes.EINVAL();
|
||||
else return 0;
|
||||
} catch (Exception e) {
|
||||
Log.error("When utimens " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int open(String path, FuseFileInfo fi) {
|
||||
try {
|
||||
if (fileService.open(path).isEmpty()) return -ErrorCodes.ENOENT();
|
||||
return 0;
|
||||
} catch (Exception e) {
|
||||
Log.error("When open " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(String path, Pointer buf, long size, long offset, FuseFileInfo fi) {
|
||||
if (size < 0) return -ErrorCodes.EINVAL();
|
||||
if (offset < 0) return -ErrorCodes.EINVAL();
|
||||
try {
|
||||
var fileOpt = fileService.open(path);
|
||||
if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
|
||||
var file = fileOpt.get();
|
||||
var read = fileService.read(fileOpt.get(), offset, (int) size);
|
||||
if (read.isEmpty()) return 0;
|
||||
UnsafeByteOperations.unsafeWriteTo(read.get(), new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size));
|
||||
return read.get().size();
|
||||
} catch (Exception e) {
|
||||
Log.error("When reading " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int write(String path, Pointer buf, long size, long offset, FuseFileInfo fi) {
|
||||
if (offset < 0) return -ErrorCodes.EINVAL();
|
||||
try {
|
||||
var fileOpt = fileService.open(path);
|
||||
if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
|
||||
var buffer = UninitializedByteBuffer.allocateUninitialized((int) size);
|
||||
|
||||
jnrPtrByteOutputAccessors.getUnsafe().copyMemory(
|
||||
buf.address(),
|
||||
jnrPtrByteOutputAccessors.getNioAccess().getBufferAddress(buffer),
|
||||
size
|
||||
);
|
||||
|
||||
var written = fileService.write(fileOpt.get(), offset, UnsafeByteOperations.unsafeWrap(buffer));
|
||||
return written.intValue();
|
||||
} catch (Exception e) {
|
||||
Log.error("When writing " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int create(String path, long mode, FuseFileInfo fi) {
|
||||
try {
|
||||
var ret = fileService.create(path, mode);
|
||||
if (ret.isEmpty()) return -ErrorCodes.ENOSPC();
|
||||
else return 0;
|
||||
} catch (Exception e) {
|
||||
Log.error("When creating " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int mkdir(String path, long mode) {
|
||||
try {
|
||||
fileService.mkdir(path, mode);
|
||||
return 0;
|
||||
} catch (AlreadyExistsException aex) {
|
||||
return -ErrorCodes.EEXIST();
|
||||
} catch (Exception e) {
|
||||
Log.error("When creating dir " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int rmdir(String path) {
|
||||
try {
|
||||
fileService.unlink(path);
|
||||
return 0;
|
||||
} catch (DirectoryNotEmptyException ex) {
|
||||
return -ErrorCodes.ENOTEMPTY();
|
||||
} catch (Exception e) {
|
||||
Log.error("When removing dir " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int rename(String path, String newName) {
|
||||
try {
|
||||
var ret = fileService.rename(path, newName);
|
||||
if (!ret) return -ErrorCodes.ENOENT();
|
||||
else return 0;
|
||||
} catch (Exception e) {
|
||||
Log.error("When renaming " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int unlink(String path) {
|
||||
try {
|
||||
fileService.unlink(path);
|
||||
return 0;
|
||||
} catch (Exception e) {
|
||||
Log.error("When unlinking " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int truncate(String path, long size) {
|
||||
if (size < 0) return -ErrorCodes.EINVAL();
|
||||
try {
|
||||
var fileOpt = fileService.open(path);
|
||||
if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
|
||||
var file = fileOpt.get();
|
||||
var ok = fileService.truncate(file, size);
|
||||
if (ok)
|
||||
return 0;
|
||||
else
|
||||
return -ErrorCodes.ENOSPC();
|
||||
} catch (Exception e) {
|
||||
Log.error("When truncating " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int chmod(String path, long mode) {
|
||||
try {
|
||||
var fileOpt = fileService.open(path);
|
||||
if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
|
||||
var ret = fileService.chmod(fileOpt.get(), mode);
|
||||
if (ret) return 0;
|
||||
else return -ErrorCodes.EINVAL();
|
||||
} catch (Exception e) {
|
||||
Log.error("When chmod " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int readdir(String path, Pointer buf, FuseFillDir filler, long offset, FuseFileInfo fi) {
|
||||
try {
|
||||
Iterable<String> found;
|
||||
try {
|
||||
found = fileService.readDir(path);
|
||||
} catch (StatusRuntimeException e) {
|
||||
if (e.getStatus().getCode().equals(Status.NOT_FOUND.getCode()))
|
||||
return -ErrorCodes.ENOENT();
|
||||
else throw e;
|
||||
}
|
||||
|
||||
filler.apply(buf, ".", null, 0);
|
||||
filler.apply(buf, "..", null, 0);
|
||||
|
||||
for (var c : found) {
|
||||
filler.apply(buf, c, null, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
} catch (Exception e) {
|
||||
Log.error("When readdir " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int readlink(String path, Pointer buf, long size) {
|
||||
if (size < 0) return -ErrorCodes.EINVAL();
|
||||
try {
|
||||
var fileOpt = fileService.open(path);
|
||||
if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
|
||||
var file = fileOpt.get();
|
||||
var read = fileService.readlinkBS(fileOpt.get());
|
||||
if (read.isEmpty()) return 0;
|
||||
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size));
|
||||
buf.putByte(Math.min(size - 1, read.size()), (byte) 0);
|
||||
return 0;
|
||||
} catch (Exception e) {
|
||||
Log.error("When reading " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int chown(String path, long uid, long gid) {
|
||||
try {
|
||||
var fileOpt = fileService.open(path);
|
||||
if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
|
||||
return 0;
|
||||
} catch (Exception e) {
|
||||
Log.error("When chown " + path, e);
|
||||
return -ErrorCodes.EIO();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int symlink(String oldpath, String newpath) {
|
||||
try {
|
||||
var ret = fileService.symlink(oldpath, newpath);
|
||||
if (ret == null) return -ErrorCodes.EEXIST();
|
||||
else return 0;
|
||||
} catch (Exception e) {
|
||||
Log.error("When creating " + newpath, e);
|
||||
return -ErrorCodes.EIO();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
package com.usatiuk.dhfs.fuse;
|
||||
|
||||
import com.google.protobuf.ByteOutput;
|
||||
import jnr.ffi.Pointer;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.MappedByteBuffer;
|
||||
|
||||
public class JnrPtrByteOutput extends ByteOutput {
|
||||
private final Pointer _backing;
|
||||
private final long _size;
|
||||
private final JnrPtrByteOutputAccessors _accessors;
|
||||
private long _pos;
|
||||
|
||||
public JnrPtrByteOutput(JnrPtrByteOutputAccessors accessors, Pointer backing, long size) {
|
||||
_backing = backing;
|
||||
_size = size;
|
||||
_pos = 0;
|
||||
_accessors = accessors;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(byte value) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(byte[] value, int offset, int length) {
|
||||
if (length + _pos > _size) throw new IndexOutOfBoundsException();
|
||||
_backing.put(_pos, value, offset, length);
|
||||
_pos += length;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeLazy(byte[] value, int offset, int length) {
|
||||
if (length + _pos > _size) throw new IndexOutOfBoundsException();
|
||||
_backing.put(_pos, value, offset, length);
|
||||
_pos += length;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(ByteBuffer value) {
|
||||
var rem = value.remaining();
|
||||
if (rem + _pos > _size) throw new IndexOutOfBoundsException();
|
||||
|
||||
if (value.isDirect()) {
|
||||
if (value instanceof MappedByteBuffer mb) {
|
||||
mb.load();
|
||||
}
|
||||
long addr = _accessors.getNioAccess().getBufferAddress(value) + value.position();
|
||||
var out = _backing.address() + _pos;
|
||||
_accessors.getUnsafe().copyMemory(addr, out, rem);
|
||||
} else {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
_pos += rem;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeLazy(ByteBuffer value) {
|
||||
write(value);
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package com.usatiuk.dhfs.fuse;
|
||||
|
||||
import jakarta.inject.Singleton;
|
||||
import jdk.internal.access.JavaNioAccess;
|
||||
import jdk.internal.access.SharedSecrets;
|
||||
import lombok.Getter;
|
||||
import sun.misc.Unsafe;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
|
||||
@Singleton
|
||||
class JnrPtrByteOutputAccessors {
|
||||
@Getter
|
||||
JavaNioAccess _nioAccess;
|
||||
@Getter
|
||||
Unsafe _unsafe;
|
||||
|
||||
JnrPtrByteOutputAccessors() throws NoSuchFieldException, IllegalAccessException {
|
||||
_nioAccess = SharedSecrets.getJavaNioAccess();
|
||||
Field f = Unsafe.class.getDeclaredField("theUnsafe");
|
||||
f.setAccessible(true);
|
||||
_unsafe = (Unsafe) f.get(null);
|
||||
}
|
||||
}
|
||||
@@ -1,566 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jkleppmanntree;
|
||||
|
||||
import com.usatiuk.dhfs.files.objects.File;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.*;
|
||||
import com.usatiuk.dhfs.objects.jrepository.*;
|
||||
import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService;
|
||||
import com.usatiuk.dhfs.objects.repository.opsupport.Op;
|
||||
import com.usatiuk.dhfs.objects.repository.opsupport.OpObject;
|
||||
import com.usatiuk.dhfs.objects.repository.opsupport.OpObjectRegistry;
|
||||
import com.usatiuk.dhfs.objects.repository.opsupport.OpSender;
|
||||
import com.usatiuk.kleppmanntree.*;
|
||||
import com.usatiuk.dhfs.utils.VoidFn;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.function.Function;
|
||||
|
||||
@ApplicationScoped
|
||||
public class JKleppmannTreeManager {
|
||||
private static final String dataFileName = "trees";
|
||||
private final ConcurrentHashMap<String, JKleppmannTree> _trees = new ConcurrentHashMap<>();
|
||||
@Inject
|
||||
JKleppmannTreePeerInterface jKleppmannTreePeerInterface;
|
||||
@Inject
|
||||
OpSender opSender;
|
||||
@Inject
|
||||
OpObjectRegistry opObjectRegistry;
|
||||
@Inject
|
||||
JObjectManager jObjectManager;
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
@Inject
|
||||
JObjectTxManager jObjectTxManager;
|
||||
@Inject
|
||||
SoftJObjectFactory softJObjectFactory;
|
||||
@Inject
|
||||
JKleppmannTreePeerInterface peerInterface;
|
||||
|
||||
public JKleppmannTree getTree(String name) {
|
||||
return _trees.computeIfAbsent(name, this::createTree);
|
||||
}
|
||||
|
||||
private JKleppmannTree createTree(String name) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
var data = jObjectManager.get(JKleppmannTreePersistentData.nameFromTreeName(name)).orElse(null);
|
||||
if (data == null) {
|
||||
data = jObjectManager.put(new JKleppmannTreePersistentData(name), Optional.empty());
|
||||
}
|
||||
var tree = new JKleppmannTree(name);
|
||||
opObjectRegistry.registerObject(tree);
|
||||
return tree;
|
||||
});
|
||||
}
|
||||
|
||||
public class JKleppmannTree implements OpObject {
|
||||
private final KleppmannTree<Long, UUID, JKleppmannTreeNodeMeta, String, JKleppmannTreeNodeWrapper> _tree;
|
||||
|
||||
private final SoftJObject<JKleppmannTreePersistentData> _persistentData;
|
||||
|
||||
private final JKleppmannTreeStorageInterface _storageInterface;
|
||||
private final JKleppmannTreeClock _clock;
|
||||
|
||||
private final String _treeName;
|
||||
|
||||
JKleppmannTree(String treeName) {
|
||||
_treeName = treeName;
|
||||
|
||||
_persistentData = softJObjectFactory.create(JKleppmannTreePersistentData.class, JKleppmannTreePersistentData.nameFromTreeName(treeName));
|
||||
|
||||
_storageInterface = new JKleppmannTreeStorageInterface();
|
||||
_clock = new JKleppmannTreeClock();
|
||||
|
||||
_tree = new KleppmannTree<>(_storageInterface, peerInterface, _clock, new JOpRecorder());
|
||||
}
|
||||
|
||||
public String traverse(List<String> names) {
|
||||
return _tree.traverse(names);
|
||||
}
|
||||
|
||||
public String getNewNodeId() {
|
||||
return _storageInterface.getNewNodeId();
|
||||
}
|
||||
|
||||
public void move(String newParent, JKleppmannTreeNodeMeta newMeta, String node) {
|
||||
_tree.move(newParent, newMeta, node);
|
||||
}
|
||||
|
||||
public void trash(JKleppmannTreeNodeMeta newMeta, String node) {
|
||||
_tree.move(_storageInterface.getTrashId(), newMeta.withName(node), node);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPendingOpsForHost(UUID host) {
|
||||
return _persistentData.get()
|
||||
.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY,
|
||||
(m, d) -> d.getQueues().containsKey(host) &&
|
||||
!d.getQueues().get(host).isEmpty()
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Op> getPendingOpsForHost(UUID host, int limit) {
|
||||
return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
|
||||
if (d.getQueues().containsKey(host)) {
|
||||
var queue = d.getQueues().get(host);
|
||||
ArrayList<Op> collected = new ArrayList<>();
|
||||
|
||||
for (var node : queue.entrySet()) {
|
||||
collected.add(new JKleppmannTreeOpWrapper(node.getValue()));
|
||||
if (collected.size() >= limit) break;
|
||||
}
|
||||
|
||||
return collected;
|
||||
}
|
||||
return List.of();
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getId() {
|
||||
return _treeName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commitOpForHost(UUID host, Op op) {
|
||||
if (!(op instanceof JKleppmannTreeOpWrapper jop))
|
||||
throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass() + " " + getId());
|
||||
_persistentData.get().assertRwLock();
|
||||
_persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
|
||||
|
||||
var got = _persistentData.get().getData().getQueues().get(host).firstEntry().getValue();
|
||||
if (!Objects.equals(jop.getOp(), got))
|
||||
throw new IllegalArgumentException("Committed op push was not the oldest");
|
||||
|
||||
_persistentData.get().mutate(new JMutator<JKleppmannTreePersistentData>() {
|
||||
@Override
|
||||
public boolean mutate(JKleppmannTreePersistentData object) {
|
||||
object.getQueues().get(host).pollFirstEntry();
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void revert(JKleppmannTreePersistentData object) {
|
||||
object.getQueues().get(host).put(jop.getOp().timestamp(), jop.getOp());
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void pushBootstrap(UUID host) {
|
||||
_tree.recordBoostrapFor(host);
|
||||
}
|
||||
|
||||
public Pair<String, String> findParent(Function<JKleppmannTreeNodeWrapper, Boolean> predicate) {
|
||||
return _tree.findParent(predicate);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean acceptExternalOp(UUID from, Op op) {
|
||||
if (op instanceof JKleppmannTreePeriodicPushOp pushOp) {
|
||||
return _tree.updateExternalTimestamp(pushOp.getFrom(), pushOp.getTimestamp());
|
||||
}
|
||||
|
||||
if (!(op instanceof JKleppmannTreeOpWrapper jop))
|
||||
throw new IllegalArgumentException("Invalid incoming op type for JKleppmannTree: " + op.getClass() + " " + getId());
|
||||
|
||||
JObject<?> fileRef;
|
||||
if (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile f) {
|
||||
var fino = f.getFileIno();
|
||||
fileRef = jObjectManager.getOrPut(fino, File.class, Optional.of(jop.getOp().childId()));
|
||||
} else {
|
||||
fileRef = null;
|
||||
}
|
||||
|
||||
if (Log.isTraceEnabled())
|
||||
Log.trace("Received op from " + from + ": " + jop.getOp().timestamp().timestamp() + " " + jop.getOp().childId() + "->" + jop.getOp().newParentId() + " as " + jop.getOp().newMeta().getName());
|
||||
|
||||
try {
|
||||
_tree.applyExternalOp(from, jop.getOp());
|
||||
} catch (Exception e) {
|
||||
Log.error("Error applying external op", e);
|
||||
throw e;
|
||||
} finally {
|
||||
// FIXME:
|
||||
// Fixup the ref if it didn't really get applied
|
||||
|
||||
if ((fileRef == null) && (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile))
|
||||
Log.error("Could not create child of pushed op: " + jop.getOp());
|
||||
|
||||
if (jop.getOp().newMeta() instanceof JKleppmannTreeNodeMetaFile f) {
|
||||
if (fileRef != null) {
|
||||
var got = jObjectManager.get(jop.getOp().childId()).orElse(null);
|
||||
|
||||
VoidFn remove = () -> {
|
||||
fileRef.runWriteLockedVoid(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d, b, v) -> {
|
||||
m.removeRef(jop.getOp().childId());
|
||||
});
|
||||
};
|
||||
|
||||
if (got == null) {
|
||||
remove.apply();
|
||||
} else {
|
||||
try {
|
||||
got.rLock();
|
||||
try {
|
||||
got.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
|
||||
if (got.getData() == null || !got.getData().extractRefs().contains(f.getFileIno()))
|
||||
remove.apply();
|
||||
} finally {
|
||||
got.rUnlock();
|
||||
}
|
||||
} catch (DeletedObjectAccessException dex) {
|
||||
remove.apply();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Op getPeriodicPushOp() {
|
||||
return new JKleppmannTreePeriodicPushOp(persistentPeerDataService.getSelfUuid(), _clock.peekTimestamp());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addToTx() {
|
||||
// FIXME: a hack
|
||||
_persistentData.get().rwLockNoCopy();
|
||||
_persistentData.get().rwUnlock();
|
||||
}
|
||||
|
||||
private class JOpRecorder implements OpRecorder<Long, UUID, JKleppmannTreeNodeMeta, String> {
|
||||
@Override
|
||||
public void recordOp(OpMove<Long, UUID, JKleppmannTreeNodeMeta, String> op) {
|
||||
_persistentData.get().assertRwLock();
|
||||
_persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
|
||||
var hostUuds = persistentPeerDataService.getHostUuids().stream().toList();
|
||||
_persistentData.get().mutate(new JMutator<JKleppmannTreePersistentData>() {
|
||||
@Override
|
||||
public boolean mutate(JKleppmannTreePersistentData object) {
|
||||
object.recordOp(hostUuds, op);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void revert(JKleppmannTreePersistentData object) {
|
||||
object.removeOp(hostUuds, op);
|
||||
}
|
||||
});
|
||||
opSender.push(JKleppmannTree.this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void recordOpForPeer(UUID peer, OpMove<Long, UUID, JKleppmannTreeNodeMeta, String> op) {
|
||||
_persistentData.get().assertRwLock();
|
||||
_persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
|
||||
_persistentData.get().mutate(new JMutator<JKleppmannTreePersistentData>() {
|
||||
@Override
|
||||
public boolean mutate(JKleppmannTreePersistentData object) {
|
||||
object.recordOp(peer, op);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void revert(JKleppmannTreePersistentData object) {
|
||||
object.removeOp(peer, op);
|
||||
}
|
||||
});
|
||||
opSender.push(JKleppmannTree.this);
|
||||
}
|
||||
}
|
||||
|
||||
private class JKleppmannTreeClock implements Clock<Long> {
|
||||
@Override
|
||||
public Long getTimestamp() {
|
||||
_persistentData.get().assertRwLock();
|
||||
_persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
|
||||
var ret = _persistentData.get().getData().getClock().peekTimestamp() + 1;
|
||||
_persistentData.get().mutate(new JMutator<JKleppmannTreePersistentData>() {
|
||||
@Override
|
||||
public boolean mutate(JKleppmannTreePersistentData object) {
|
||||
object.getClock().getTimestamp();
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void revert(JKleppmannTreePersistentData object) {
|
||||
object.getClock().ungetTimestamp();
|
||||
}
|
||||
});
|
||||
return ret;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long peekTimestamp() {
|
||||
return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getClock().peekTimestamp());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long updateTimestamp(Long receivedTimestamp) {
|
||||
_persistentData.get().assertRwLock();
|
||||
_persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
|
||||
_persistentData.get().mutate(new JMutator<JKleppmannTreePersistentData>() {
|
||||
Long _old;
|
||||
|
||||
@Override
|
||||
public boolean mutate(JKleppmannTreePersistentData object) {
|
||||
_old = object.getClock().updateTimestamp(receivedTimestamp);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void revert(JKleppmannTreePersistentData object) {
|
||||
object.getClock().setTimestamp(_old);
|
||||
}
|
||||
});
|
||||
return _persistentData.get().getData().getClock().peekTimestamp();
|
||||
}
|
||||
}
|
||||
|
||||
public class JKleppmannTreeStorageInterface implements StorageInterface<Long, UUID, JKleppmannTreeNodeMeta, String, JKleppmannTreeNodeWrapper> {
|
||||
private final LogWrapper _logWrapper = new LogWrapper();
|
||||
private final PeerLogWrapper _peerLogWrapper = new PeerLogWrapper();
|
||||
|
||||
public JKleppmannTreeStorageInterface() {
|
||||
if (jObjectManager.get(getRootId()).isEmpty()) {
|
||||
putNode(new JKleppmannTreeNode(new TreeNode<>(getRootId(), null, new JKleppmannTreeNodeMetaDirectory(""))));
|
||||
putNode(new JKleppmannTreeNode(new TreeNode<>(getTrashId(), null, null)));
|
||||
}
|
||||
}
|
||||
|
||||
public JObject<JKleppmannTreeNode> putNode(JKleppmannTreeNode node) {
|
||||
return jObjectManager.put(node, Optional.ofNullable(node.getNode().getParent()));
|
||||
}
|
||||
|
||||
public JObject<JKleppmannTreeNode> putNodeLocked(JKleppmannTreeNode node) {
|
||||
return jObjectManager.putLocked(node, Optional.ofNullable(node.getNode().getParent()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getRootId() {
|
||||
return _treeName + "_jt_root";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getTrashId() {
|
||||
return _treeName + "_jt_trash";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getNewNodeId() {
|
||||
return persistentPeerDataService.getUniqueId();
|
||||
}
|
||||
|
||||
@Override
|
||||
public JKleppmannTreeNodeWrapper getById(String id) {
|
||||
var got = jObjectManager.get(id);
|
||||
if (got.isEmpty()) return null;
|
||||
return new JKleppmannTreeNodeWrapper((JObject<JKleppmannTreeNode>) got.get());
|
||||
}
|
||||
|
||||
@Override
|
||||
public JKleppmannTreeNodeWrapper createNewNode(TreeNode<Long, UUID, JKleppmannTreeNodeMeta, String> node) {
|
||||
return new JKleppmannTreeNodeWrapper(putNodeLocked(new JKleppmannTreeNode(node)));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeNode(String id) {}
|
||||
|
||||
@Override
|
||||
public LogInterface<Long, UUID, JKleppmannTreeNodeMeta, String> getLog() {
|
||||
return _logWrapper;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PeerTimestampLogInterface<Long, UUID> getPeerTimestampLog() {
|
||||
return _peerLogWrapper;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rLock() {
|
||||
_persistentData.get().rLock();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rUnlock() {
|
||||
_persistentData.get().rUnlock();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rwLock() {
|
||||
_persistentData.get().rwLockNoCopy();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rwUnlock() {
|
||||
_persistentData.get().rwUnlock();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void assertRwLock() {
|
||||
_persistentData.get().assertRwLock();
|
||||
}
|
||||
|
||||
private class PeerLogWrapper implements PeerTimestampLogInterface<Long, UUID> {
|
||||
|
||||
@Override
|
||||
public Long getForPeer(UUID peerId) {
|
||||
return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY,
|
||||
(m, d) -> d.getPeerTimestampLog().get(peerId));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void putForPeer(UUID peerId, Long timestamp) {
|
||||
_persistentData.get().assertRwLock();
|
||||
_persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
|
||||
_persistentData.get().mutate(new JMutator<JKleppmannTreePersistentData>() {
|
||||
Long old;
|
||||
|
||||
@Override
|
||||
public boolean mutate(JKleppmannTreePersistentData object) {
|
||||
old = object.getPeerTimestampLog().put(peerId, timestamp);
|
||||
return !Objects.equals(old, timestamp);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void revert(JKleppmannTreePersistentData object) {
|
||||
if (old != null)
|
||||
object.getPeerTimestampLog().put(peerId, old);
|
||||
else
|
||||
object.getPeerTimestampLog().remove(peerId, timestamp);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private class LogWrapper implements LogInterface<Long, UUID, JKleppmannTreeNodeMeta, String> {
|
||||
@Override
|
||||
public Pair<CombinedTimestamp<Long, UUID>, LogRecord<Long, UUID, JKleppmannTreeNodeMeta, String>> peekOldest() {
|
||||
return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
|
||||
var ret = d.getLog().firstEntry();
|
||||
if (ret == null) return null;
|
||||
return Pair.of(ret);
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<CombinedTimestamp<Long, UUID>, LogRecord<Long, UUID, JKleppmannTreeNodeMeta, String>> takeOldest() {
|
||||
_persistentData.get().assertRwLock();
|
||||
_persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
|
||||
|
||||
var ret = _persistentData.get().getData().getLog().firstEntry();
|
||||
if (ret != null)
|
||||
_persistentData.get().mutate(new JMutator<JKleppmannTreePersistentData>() {
|
||||
@Override
|
||||
public boolean mutate(JKleppmannTreePersistentData object) {
|
||||
object.getLog().pollFirstEntry();
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void revert(JKleppmannTreePersistentData object) {
|
||||
object.getLog().put(ret.getKey(), ret.getValue());
|
||||
}
|
||||
});
|
||||
return Pair.of(ret);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<CombinedTimestamp<Long, UUID>, LogRecord<Long, UUID, JKleppmannTreeNodeMeta, String>> peekNewest() {
|
||||
return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
|
||||
var ret = d.getLog().lastEntry();
|
||||
if (ret == null) return null;
|
||||
return Pair.of(ret);
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Pair<CombinedTimestamp<Long, UUID>, LogRecord<Long, UUID, JKleppmannTreeNodeMeta, String>>> newestSlice(CombinedTimestamp<Long, UUID> since, boolean inclusive) {
|
||||
return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
|
||||
var tail = d.getLog().tailMap(since, inclusive);
|
||||
return tail.entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList();
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Pair<CombinedTimestamp<Long, UUID>, LogRecord<Long, UUID, JKleppmannTreeNodeMeta, String>>> getAll() {
|
||||
return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
|
||||
return d.getLog().entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue())).toList();
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEmpty() {
|
||||
return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
|
||||
return d.getLog().isEmpty();
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean containsKey(CombinedTimestamp<Long, UUID> timestamp) {
|
||||
return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
|
||||
return d.getLog().containsKey(timestamp);
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public long size() {
|
||||
return _persistentData.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
|
||||
return (long) d.getLog().size();
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void put(CombinedTimestamp<Long, UUID> timestamp, LogRecord<Long, UUID, JKleppmannTreeNodeMeta, String> record) {
|
||||
_persistentData.get().assertRwLock();
|
||||
_persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
|
||||
if (_persistentData.get().getData().getLog().containsKey(timestamp))
|
||||
throw new IllegalStateException("Overwriting log entry?");
|
||||
_persistentData.get().mutate(new JMutator<JKleppmannTreePersistentData>() {
|
||||
@Override
|
||||
public boolean mutate(JKleppmannTreePersistentData object) {
|
||||
object.getLog().put(timestamp, record);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void revert(JKleppmannTreePersistentData object) {
|
||||
object.getLog().remove(timestamp, record);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void replace(CombinedTimestamp<Long, UUID> timestamp, LogRecord<Long, UUID, JKleppmannTreeNodeMeta, String> record) {
|
||||
_persistentData.get().assertRwLock();
|
||||
_persistentData.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
|
||||
_persistentData.get().mutate(new JMutator<JKleppmannTreePersistentData>() {
|
||||
LogRecord<Long, UUID, JKleppmannTreeNodeMeta, String> old;
|
||||
|
||||
@Override
|
||||
public boolean mutate(JKleppmannTreePersistentData object) {
|
||||
old = object.getLog().put(timestamp, record);
|
||||
return !Objects.equals(old, record);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void revert(JKleppmannTreePersistentData object) {
|
||||
if (old != null)
|
||||
object.getLog().put(timestamp, old);
|
||||
else
|
||||
object.getLog().remove(timestamp, record);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jkleppmanntree;
|
||||
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObject;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectManager;
|
||||
import com.usatiuk.kleppmanntree.TreeNode;
|
||||
import com.usatiuk.kleppmanntree.TreeNodeWrapper;
|
||||
|
||||
import java.util.UUID;
|
||||
|
||||
public class JKleppmannTreeNodeWrapper implements TreeNodeWrapper<Long, UUID, JKleppmannTreeNodeMeta, String> {
|
||||
private final JObject<JKleppmannTreeNode> _backing;
|
||||
|
||||
public JKleppmannTreeNodeWrapper(JObject<JKleppmannTreeNode> backing) {_backing = backing;}
|
||||
|
||||
@Override
|
||||
public void rLock() {
|
||||
_backing.rLock();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rUnlock() {
|
||||
_backing.rUnlock();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rwLock() {
|
||||
_backing.rwLock();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rwUnlock() {
|
||||
_backing.bumpVer(); // FIXME:?
|
||||
_backing.rwUnlock();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void freeze() {
|
||||
_backing.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> {
|
||||
m.freeze();
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unfreeze() {
|
||||
_backing.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> {
|
||||
m.unfreeze();
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void notifyRef(String id) {
|
||||
_backing.getMeta().addRef(id);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void notifyRmRef(String id) {
|
||||
_backing.getMeta().removeRef(id);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TreeNode<Long, UUID, JKleppmannTreeNodeMeta, String> getNode() {
|
||||
_backing.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
|
||||
if (_backing.getData() == null)
|
||||
throw new IllegalStateException("Node " + _backing.getMeta().getName() + " data lost!");
|
||||
return _backing.getData().getNode();
|
||||
}
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jkleppmanntree;
|
||||
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile;
|
||||
import com.usatiuk.dhfs.objects.repository.opsupport.Op;
|
||||
import com.usatiuk.kleppmanntree.OpMove;
|
||||
import lombok.Getter;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
// Wrapper to avoid having to specify generic types
|
||||
public class JKleppmannTreeOpWrapper implements Op {
|
||||
@Getter
|
||||
private final OpMove<Long, UUID, JKleppmannTreeNodeMeta, String> _op;
|
||||
|
||||
public JKleppmannTreeOpWrapper(OpMove<Long, UUID, JKleppmannTreeNodeMeta, String> op) {
|
||||
if (op == null) throw new IllegalArgumentException("op shouldn't be null");
|
||||
_op = op;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> getEscapedRefs() {
|
||||
if (_op.newMeta() instanceof JKleppmannTreeNodeMetaFile mf) {
|
||||
return List.of(mf.getFileIno());
|
||||
}
|
||||
return List.of();
|
||||
}
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jkleppmanntree;
|
||||
|
||||
import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService;
|
||||
import com.usatiuk.kleppmanntree.PeerInterface;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.UUID;
|
||||
|
||||
@Singleton
|
||||
public class JKleppmannTreePeerInterface implements PeerInterface<UUID> {
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
|
||||
@Override
|
||||
public UUID getSelfId() {
|
||||
return persistentPeerDataService.getSelfUuid();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<UUID> getAllPeers() {
|
||||
return persistentPeerDataService.getHostUuidsAndSelf();
|
||||
}
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jkleppmanntree;
|
||||
|
||||
import com.usatiuk.dhfs.objects.repository.opsupport.Op;
|
||||
import lombok.Getter;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
public class JKleppmannTreePeriodicPushOp implements Op {
|
||||
@Getter
|
||||
private final UUID _from;
|
||||
@Getter
|
||||
private final long _timestamp;
|
||||
|
||||
public JKleppmannTreePeriodicPushOp(UUID from, long timestamp) {
|
||||
_from = from;
|
||||
_timestamp = timestamp;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> getEscapedRefs() {
|
||||
return List.of();
|
||||
}
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jkleppmanntree.serializers;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeOpWrapper;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||
import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaP;
|
||||
import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeOpLogEffectP;
|
||||
import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeOpP;
|
||||
import com.usatiuk.kleppmanntree.LogEffect;
|
||||
import com.usatiuk.kleppmanntree.LogEffectOld;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
import java.util.UUID;
|
||||
|
||||
@Singleton
|
||||
public class JKleppmannTreeLogEffectSerializer implements ProtoSerializer<JKleppmannTreeOpLogEffectP, LogEffect<Long, UUID, JKleppmannTreeNodeMeta, String>> {
|
||||
@Inject
|
||||
ProtoSerializer<JKleppmannTreeOpP, JKleppmannTreeOpWrapper> opProtoSerializer;
|
||||
@Inject
|
||||
ProtoSerializer<JKleppmannTreeNodeMetaP, JKleppmannTreeNodeMeta> metaProtoSerializer;
|
||||
|
||||
@Override
|
||||
public LogEffect<Long, UUID, JKleppmannTreeNodeMeta, String> deserialize(JKleppmannTreeOpLogEffectP message) {
|
||||
return new LogEffect<>(
|
||||
message.hasOldParent() ? new LogEffectOld<>(
|
||||
opProtoSerializer.deserialize(message.getOldEffectiveMove()).getOp(),
|
||||
message.getOldParent(),
|
||||
metaProtoSerializer.deserialize(message.getOldMeta())
|
||||
) : null,
|
||||
opProtoSerializer.deserialize(message.getEffectiveOp()).getOp(),
|
||||
message.getNewParentId(),
|
||||
metaProtoSerializer.deserialize(message.getNewMeta()),
|
||||
message.getSelfId()
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public JKleppmannTreeOpLogEffectP serialize(LogEffect<Long, UUID, JKleppmannTreeNodeMeta, String> object) {
|
||||
var builder = JKleppmannTreeOpLogEffectP.newBuilder();
|
||||
// FIXME: all these wrappers
|
||||
if (object.oldInfo() != null) {
|
||||
builder.setOldEffectiveMove(opProtoSerializer.serialize(new JKleppmannTreeOpWrapper(object.oldInfo().oldEffectiveMove())));
|
||||
builder.setOldParent(object.oldInfo().oldParent());
|
||||
builder.setOldMeta(metaProtoSerializer.serialize(object.oldInfo().oldMeta()));
|
||||
}
|
||||
builder.setEffectiveOp(opProtoSerializer.serialize(new JKleppmannTreeOpWrapper(object.effectiveOp())));
|
||||
builder.setNewParentId(object.newParentId());
|
||||
builder.setNewMeta(metaProtoSerializer.serialize(object.newMeta()));
|
||||
builder.setSelfId(object.childId());
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jkleppmanntree.serializers;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeOpWrapper;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||
import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaP;
|
||||
import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeP;
|
||||
import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeOpP;
|
||||
import com.usatiuk.kleppmanntree.TreeNode;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.UUID;
|
||||
|
||||
@Singleton
|
||||
public class JKleppmannTreeNodeProtoSerializer implements ProtoSerializer<JKleppmannTreeNodeP, JKleppmannTreeNode> {
|
||||
@Inject
|
||||
ProtoSerializer<JKleppmannTreeNodeMetaP, JKleppmannTreeNodeMeta> metaProtoSerializer;
|
||||
@Inject
|
||||
ProtoSerializer<JKleppmannTreeOpP, JKleppmannTreeOpWrapper> opProtoSerializer;
|
||||
|
||||
@Override
|
||||
public JKleppmannTreeNode deserialize(JKleppmannTreeNodeP message) {
|
||||
var children = new HashMap<String, String>();
|
||||
message.getChildrenList().forEach(child -> children.put(child.getKey(), child.getValue()));
|
||||
var node = new TreeNode<Long, UUID, JKleppmannTreeNodeMeta, String>(
|
||||
message.getId(),
|
||||
message.hasParent() ? message.getParent() : null,
|
||||
message.hasMeta() ? metaProtoSerializer.deserialize(message.getMeta()) : null,
|
||||
children
|
||||
);
|
||||
if (message.hasLastEffectiveOp())
|
||||
node.setLastEffectiveOp((opProtoSerializer.deserialize(message.getLastEffectiveOp())).getOp());
|
||||
return new JKleppmannTreeNode(node);
|
||||
}
|
||||
|
||||
@Override
|
||||
public JKleppmannTreeNodeP serialize(JKleppmannTreeNode object) {
|
||||
var builder = JKleppmannTreeNodeP.newBuilder().setId(object.getNode().getId());
|
||||
if (object.getNode().getParent() != null)
|
||||
builder.setParent(object.getNode().getParent());
|
||||
if (object.getNode().getMeta() != null) {
|
||||
builder.setMeta(metaProtoSerializer.serialize(object.getNode().getMeta()));
|
||||
}
|
||||
if (object.getNode().getLastEffectiveOp() != null)
|
||||
builder.setLastEffectiveOp(
|
||||
opProtoSerializer.serialize(new JKleppmannTreeOpWrapper(object.getNode().getLastEffectiveOp()))
|
||||
);
|
||||
object.getNode().getChildren().forEach((k, v) -> {
|
||||
builder.addChildrenBuilder().setKey(k).setValue(v);
|
||||
});
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jkleppmanntree.serializers;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeOpWrapper;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||
import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaP;
|
||||
import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeOpP;
|
||||
import com.usatiuk.kleppmanntree.CombinedTimestamp;
|
||||
import com.usatiuk.kleppmanntree.OpMove;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
import java.util.UUID;
|
||||
|
||||
@Singleton
|
||||
public class JKleppmannTreeOpProtoSerializer implements ProtoSerializer<JKleppmannTreeOpP, JKleppmannTreeOpWrapper> {
|
||||
@Inject
|
||||
ProtoSerializer<JKleppmannTreeNodeMetaP, JKleppmannTreeNodeMeta> metaProtoSerializer;
|
||||
|
||||
@Override
|
||||
public JKleppmannTreeOpWrapper deserialize(JKleppmannTreeOpP message) {
|
||||
return new JKleppmannTreeOpWrapper(new OpMove<>(
|
||||
new CombinedTimestamp<>(message.getTimestamp(), UUID.fromString(message.getPeer())), message.getNewParentId(),
|
||||
message.hasMeta() ? metaProtoSerializer.deserialize(message.getMeta()) : null,
|
||||
message.getChild()
|
||||
));
|
||||
}
|
||||
|
||||
@Override
|
||||
public JKleppmannTreeOpP serialize(JKleppmannTreeOpWrapper object) {
|
||||
var builder = JKleppmannTreeOpP.newBuilder();
|
||||
builder.setTimestamp(object.getOp().timestamp().timestamp())
|
||||
.setPeer(object.getOp().timestamp().nodeId().toString())
|
||||
.setNewParentId(object.getOp().newParentId())
|
||||
.setChild(object.getOp().childId());
|
||||
if (object.getOp().newMeta() != null)
|
||||
builder.setMeta(metaProtoSerializer.serialize(object.getOp().newMeta()));
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jkleppmanntree.serializers;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreePeriodicPushOp;
|
||||
import com.usatiuk.dhfs.objects.repository.JKleppmannTreePeriodicPushOpP;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
import java.util.UUID;
|
||||
|
||||
@Singleton
|
||||
public class JKleppmannTreePeriodicPushOpProtoSerializer implements ProtoSerializer<JKleppmannTreePeriodicPushOpP, JKleppmannTreePeriodicPushOp> {
|
||||
|
||||
@Override
|
||||
public JKleppmannTreePeriodicPushOp deserialize(JKleppmannTreePeriodicPushOpP message) {
|
||||
return new JKleppmannTreePeriodicPushOp(UUID.fromString(message.getFromUuid()), message.getTimestamp());
|
||||
}
|
||||
|
||||
@Override
|
||||
public JKleppmannTreePeriodicPushOpP serialize(JKleppmannTreePeriodicPushOp object) {
|
||||
return JKleppmannTreePeriodicPushOpP.newBuilder().setTimestamp(object.getTimestamp()).setFromUuid(object.getFrom().toString()).build();
|
||||
}
|
||||
}
|
||||
@@ -1,86 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jkleppmanntree.serializers;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeOpWrapper;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
|
||||
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreePersistentData;
|
||||
import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeOpLogEffectP;
|
||||
import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeOpP;
|
||||
import com.usatiuk.dhfs.objects.persistence.JKleppmannTreePersistentDataP;
|
||||
import com.usatiuk.kleppmanntree.*;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.UUID;
|
||||
|
||||
@Singleton
|
||||
public class JKleppmannTreePersistentDataProtoSerializer implements ProtoSerializer<JKleppmannTreePersistentDataP, JKleppmannTreePersistentData> {
|
||||
@Inject
|
||||
ProtoSerializer<JKleppmannTreeOpP, JKleppmannTreeOpWrapper> opProtoSerializer;
|
||||
@Inject
|
||||
ProtoSerializer<JKleppmannTreeOpLogEffectP, LogEffect<Long, UUID, JKleppmannTreeNodeMeta, String>> effectProtoSerializer;
|
||||
|
||||
@Override
|
||||
public JKleppmannTreePersistentData deserialize(JKleppmannTreePersistentDataP message) {
|
||||
HashMap<UUID, TreeMap<CombinedTimestamp<Long, UUID>, OpMove<Long, UUID, JKleppmannTreeNodeMeta, String>>> queues = new HashMap<>();
|
||||
|
||||
for (var q : message.getQueuesList()) {
|
||||
var qmap = new TreeMap<CombinedTimestamp<Long, UUID>, OpMove<Long, UUID, JKleppmannTreeNodeMeta, String>>();
|
||||
for (var o : q.getEntriesList()) {
|
||||
var op = (JKleppmannTreeOpWrapper) opProtoSerializer.deserialize(o.getOp());
|
||||
qmap.put(new CombinedTimestamp<>(o.getClock(), UUID.fromString(o.getUuid())), op.getOp());
|
||||
}
|
||||
queues.put(UUID.fromString(q.getNode()), qmap);
|
||||
}
|
||||
|
||||
var log = new HashMap<UUID, Long>();
|
||||
|
||||
for (var l : message.getPeerLogList()) {
|
||||
log.put(UUID.fromString(l.getHost()), l.getTimestamp());
|
||||
}
|
||||
|
||||
var opLog = new TreeMap<CombinedTimestamp<Long, UUID>, LogRecord<Long, UUID, JKleppmannTreeNodeMeta, String>>();
|
||||
for (var l : message.getOpLogList()) {
|
||||
opLog.put(new CombinedTimestamp<>(l.getClock(), UUID.fromString(l.getUuid())),
|
||||
new LogRecord<>(opProtoSerializer.deserialize(l.getOp()).getOp(), l.getEffectsList().stream().map(effectProtoSerializer::deserialize).toList())
|
||||
);
|
||||
}
|
||||
|
||||
return new JKleppmannTreePersistentData(
|
||||
message.getTreeName(),
|
||||
new AtomicClock(message.getClock()),
|
||||
queues,
|
||||
log,
|
||||
opLog
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public JKleppmannTreePersistentDataP serialize(JKleppmannTreePersistentData object) {
|
||||
var builder = JKleppmannTreePersistentDataP.newBuilder()
|
||||
.setTreeName(object.getTreeName())
|
||||
.setClock(object.getClock().peekTimestamp());
|
||||
for (var q : object.getQueues().entrySet()) {
|
||||
if (q.getValue().isEmpty()) continue;
|
||||
var qb = builder.addQueuesBuilder();
|
||||
qb.setNode(q.getKey().toString());
|
||||
for (var e : q.getValue().entrySet()) {
|
||||
qb.addEntriesBuilder().setClock(e.getKey().timestamp()).setUuid(e.getKey().nodeId().toString())
|
||||
.setOp((JKleppmannTreeOpP) opProtoSerializer.serialize(new JKleppmannTreeOpWrapper(e.getValue())));
|
||||
}
|
||||
}
|
||||
for (var peerLogEntry : object.getPeerTimestampLog().entrySet()) {
|
||||
builder.addPeerLogBuilder().setHost(peerLogEntry.getKey().toString()).setTimestamp(peerLogEntry.getValue());
|
||||
}
|
||||
for (var e : object.getLog().entrySet()) {
|
||||
builder.addOpLogBuilder()
|
||||
.setClock(e.getKey().timestamp())
|
||||
.setUuid(e.getKey().nodeId().toString())
|
||||
.setOp(opProtoSerializer.serialize(new JKleppmannTreeOpWrapper(e.getValue().op())))
|
||||
.addAllEffects(e.getValue().effects().stream().map(effectProtoSerializer::serialize).toList());
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jkleppmanntree.structs;
|
||||
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectData;
|
||||
import com.usatiuk.dhfs.objects.jrepository.OnlyLocal;
|
||||
import com.usatiuk.dhfs.objects.repository.ConflictResolver;
|
||||
import com.usatiuk.kleppmanntree.TreeNode;
|
||||
import lombok.Getter;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
// FIXME: Ideally this is two classes?
|
||||
@OnlyLocal
|
||||
public class JKleppmannTreeNode extends JObjectData {
|
||||
@Getter
|
||||
final TreeNode<Long, UUID, JKleppmannTreeNodeMeta, String> _node;
|
||||
|
||||
public JKleppmannTreeNode(TreeNode<Long, UUID, JKleppmannTreeNodeMeta, String> node) {
|
||||
_node = node;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return _node.getId();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends ConflictResolver> getConflictResolver() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> extractRefs() {
|
||||
if (_node.getMeta() instanceof JKleppmannTreeNodeMetaFile)
|
||||
return List.of(((JKleppmannTreeNodeMetaFile) _node.getMeta()).getFileIno());
|
||||
return Collections.unmodifiableCollection(_node.getChildren().values());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends JObjectData> getRefType() {
|
||||
return JObjectData.class;
|
||||
}
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jkleppmanntree.structs;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaP;
|
||||
import com.usatiuk.kleppmanntree.NodeMeta;
|
||||
import lombok.Getter;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
@ProtoMirror(JKleppmannTreeNodeMetaP.class)
|
||||
public abstract class JKleppmannTreeNodeMeta implements NodeMeta {
|
||||
@Getter
|
||||
private final String _name;
|
||||
|
||||
public JKleppmannTreeNodeMeta(String name) {_name = name;}
|
||||
|
||||
public abstract JKleppmannTreeNodeMeta withName(String name);
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
JKleppmannTreeNodeMeta that = (JKleppmannTreeNodeMeta) o;
|
||||
return Objects.equals(_name, that._name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hashCode(_name);
|
||||
}
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jkleppmanntree.structs;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaDirectoryP;
|
||||
|
||||
@ProtoMirror(JKleppmannTreeNodeMetaDirectoryP.class)
|
||||
public class JKleppmannTreeNodeMetaDirectory extends JKleppmannTreeNodeMeta {
|
||||
public JKleppmannTreeNodeMetaDirectory(String name) {
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public JKleppmannTreeNodeMeta withName(String name) {
|
||||
return new JKleppmannTreeNodeMetaDirectory(name);
|
||||
}
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jkleppmanntree.structs;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
import com.usatiuk.dhfs.objects.persistence.JKleppmannTreeNodeMetaFileP;
|
||||
import lombok.Getter;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
@ProtoMirror(JKleppmannTreeNodeMetaFileP.class)
|
||||
public class JKleppmannTreeNodeMetaFile extends JKleppmannTreeNodeMeta {
|
||||
@Getter
|
||||
private final String _fileIno;
|
||||
|
||||
public JKleppmannTreeNodeMetaFile(String name, String fileIno) {
|
||||
super(name);
|
||||
_fileIno = fileIno;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JKleppmannTreeNodeMeta withName(String name) {
|
||||
return new JKleppmannTreeNodeMetaFile(name, _fileIno);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
if (!super.equals(o)) return false;
|
||||
JKleppmannTreeNodeMetaFile that = (JKleppmannTreeNodeMetaFile) o;
|
||||
return Objects.equals(_fileIno, that._fileIno);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), _fileIno);
|
||||
}
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jkleppmanntree.structs;
|
||||
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectData;
|
||||
import com.usatiuk.dhfs.objects.jrepository.OnlyLocal;
|
||||
import com.usatiuk.dhfs.objects.repository.ConflictResolver;
|
||||
import com.usatiuk.kleppmanntree.AtomicClock;
|
||||
import com.usatiuk.kleppmanntree.CombinedTimestamp;
|
||||
import com.usatiuk.kleppmanntree.LogRecord;
|
||||
import com.usatiuk.kleppmanntree.OpMove;
|
||||
import lombok.Getter;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
@OnlyLocal
|
||||
public class JKleppmannTreePersistentData extends JObjectData {
|
||||
private final String _treeName;
|
||||
@Getter
|
||||
private final AtomicClock _clock;
|
||||
@Getter
|
||||
private final HashMap<UUID, TreeMap<CombinedTimestamp<Long, UUID>, OpMove<Long, UUID, JKleppmannTreeNodeMeta, String>>> _queues;
|
||||
@Getter
|
||||
private final HashMap<UUID, Long> _peerTimestampLog;
|
||||
@Getter
|
||||
private final TreeMap<CombinedTimestamp<Long, UUID>, LogRecord<Long, UUID, JKleppmannTreeNodeMeta, String>> _log;
|
||||
|
||||
public JKleppmannTreePersistentData(String treeName, AtomicClock clock,
|
||||
HashMap<UUID, TreeMap<CombinedTimestamp<Long, UUID>, OpMove<Long, UUID, JKleppmannTreeNodeMeta, String>>> queues,
|
||||
HashMap<UUID, Long> peerTimestampLog, TreeMap<CombinedTimestamp<Long, UUID>, LogRecord<Long, UUID, JKleppmannTreeNodeMeta, String>> log) {
|
||||
_treeName = treeName;
|
||||
_clock = clock;
|
||||
_queues = queues;
|
||||
_peerTimestampLog = peerTimestampLog;
|
||||
_log = log;
|
||||
}
|
||||
|
||||
public JKleppmannTreePersistentData(String treeName) {
|
||||
_treeName = treeName;
|
||||
_clock = new AtomicClock(1);
|
||||
_queues = new HashMap<>();
|
||||
_peerTimestampLog = new HashMap<>();
|
||||
_log = new TreeMap<>();
|
||||
}
|
||||
|
||||
public static String nameFromTreeName(String treeName) {
|
||||
return treeName + "_pd";
|
||||
}
|
||||
|
||||
public void recordOp(UUID host, OpMove<Long, UUID, JKleppmannTreeNodeMeta, String> opMove) {
|
||||
_queues.computeIfAbsent(host, h -> new TreeMap<>());
|
||||
_queues.get(host).put(opMove.timestamp(), opMove);
|
||||
}
|
||||
|
||||
public void removeOp(UUID host, OpMove<Long, UUID, JKleppmannTreeNodeMeta, String> opMove) {
|
||||
_queues.get(host).remove(opMove.timestamp(), opMove);
|
||||
}
|
||||
|
||||
public void recordOp(Collection<UUID> hosts, OpMove<Long, UUID, JKleppmannTreeNodeMeta, String> opMove) {
|
||||
for (var u : hosts) {
|
||||
recordOp(u, opMove);
|
||||
}
|
||||
}
|
||||
|
||||
public void removeOp(Collection<UUID> hosts, OpMove<Long, UUID, JKleppmannTreeNodeMeta, String> opMove) {
|
||||
for (var u : hosts) {
|
||||
removeOp(u, opMove);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return nameFromTreeName(_treeName);
|
||||
}
|
||||
|
||||
public String getTreeName() {
|
||||
return _treeName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends ConflictResolver> getConflictResolver() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> extractRefs() {
|
||||
return List.of();
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target(ElementType.TYPE)
|
||||
public @interface AssumedUnique {
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
public class DeletedObjectAccessException extends RuntimeException {
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
public interface JMutator<T extends JObjectData> {
|
||||
boolean mutate(T object);
|
||||
|
||||
void revert(T object);
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
import com.usatiuk.dhfs.utils.VoidFn;
|
||||
|
||||
public abstract class JObject<T extends JObjectData> {
|
||||
public abstract ObjectMetadata getMeta();
|
||||
|
||||
public abstract T getData();
|
||||
|
||||
abstract void rollback(ObjectMetadata meta, JObjectData data);
|
||||
|
||||
public abstract <R> R runReadLocked(JObjectManager.ResolutionStrategy resolutionStrategy, JObjectManager.ObjectFnRead<T, R> fn);
|
||||
|
||||
// Note: this is expensive
|
||||
public abstract <R> R runWriteLocked(JObjectManager.ResolutionStrategy resolutionStrategy, JObjectManager.ObjectFnWrite<T, R> fn);
|
||||
|
||||
public void runReadLockedVoid(JObjectManager.ResolutionStrategy resolutionStrategy, JObjectManager.ObjectFnReadVoid<T> fn) {
|
||||
runReadLocked(resolutionStrategy, (m, d) -> {
|
||||
fn.apply(m, d);
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
public void runWriteLockedVoid(JObjectManager.ResolutionStrategy resolutionStrategy, JObjectManager.ObjectFnWriteVoid<T> fn) {
|
||||
runWriteLocked(resolutionStrategy, (m, d, b, v) -> {
|
||||
fn.apply(m, d, b, v);
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
public <X extends JObjectData> JObject<? extends X> as(Class<X> klass) {
|
||||
if (klass.isAssignableFrom(getMeta().getKnownClass())) return (JObject<? extends X>) this;
|
||||
throw new IllegalStateException("Class mismatch for " + getMeta().getName() + " got: " + getMeta().getKnownClass());
|
||||
}
|
||||
|
||||
public JObject<T> local() {
|
||||
tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
|
||||
if (getData() == null)
|
||||
throw new IllegalStateException("Data missing for " + getMeta().getName());
|
||||
return this;
|
||||
}
|
||||
|
||||
public JObject<T> remote() {
|
||||
tryResolve(JObjectManager.ResolutionStrategy.REMOTE);
|
||||
if (getData() == null)
|
||||
throw new IllegalStateException("Data missing for " + getMeta().getName());
|
||||
return this;
|
||||
}
|
||||
|
||||
public abstract void mutate(JMutator<? super T> mutator);
|
||||
|
||||
public abstract boolean tryResolve(JObjectManager.ResolutionStrategy resolutionStrategy);
|
||||
|
||||
public abstract void externalResolution(JObjectData data);
|
||||
|
||||
public abstract void rwLock();
|
||||
|
||||
public abstract boolean tryRwLock();
|
||||
|
||||
public abstract void rwLockNoCopy();
|
||||
|
||||
public abstract void rwUnlock();
|
||||
|
||||
public abstract void drop();
|
||||
|
||||
abstract boolean haveRwLock();
|
||||
|
||||
public abstract void assertRwLock();
|
||||
|
||||
public abstract void doDelete();
|
||||
|
||||
public abstract void markSeen();
|
||||
|
||||
public abstract void rLock();
|
||||
|
||||
public abstract void rUnlock();
|
||||
|
||||
public abstract void bumpVer();
|
||||
|
||||
public abstract void commitFence();
|
||||
|
||||
public abstract void commitFenceAsync(VoidFn callback);
|
||||
|
||||
public abstract int estimateSize();
|
||||
|
||||
abstract boolean updateDeletionState();
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
import com.usatiuk.dhfs.objects.persistence.JObjectDataP;
|
||||
import com.usatiuk.dhfs.objects.repository.ConflictResolver;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
@ProtoMirror(JObjectDataP.class)
|
||||
public abstract class JObjectData {
|
||||
public abstract String getName();
|
||||
|
||||
public Class<? extends ConflictResolver> getConflictResolver() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
public Class<? extends JObjectData> getRefType() {
|
||||
throw new UnsupportedOperationException("This object shouldn't have refs");
|
||||
}
|
||||
|
||||
public Collection<String> extractRefs() {
|
||||
return List.of();
|
||||
}
|
||||
|
||||
public int estimateSize() {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
public record JObjectKey(short type) {
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.Shutdown;
|
||||
import io.quarkus.runtime.Startup;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
||||
@ApplicationScoped
|
||||
public class JObjectLRU {
|
||||
private final LinkedHashMap<JObject<?>, Long> _cache = new LinkedHashMap<>();
|
||||
@ConfigProperty(name = "dhfs.objects.lru.limit")
|
||||
long sizeLimit;
|
||||
@ConfigProperty(name = "dhfs.objects.lru.print-stats")
|
||||
boolean printStats;
|
||||
private long _curSize = 0;
|
||||
private long _evict = 0;
|
||||
private ExecutorService _statusExecutor = null;
|
||||
|
||||
@Startup
|
||||
void init() {
|
||||
if (printStats) {
|
||||
_statusExecutor = Executors.newSingleThreadExecutor();
|
||||
_statusExecutor.submit(() -> {
|
||||
try {
|
||||
while (true) {
|
||||
Thread.sleep(10000);
|
||||
if (_curSize > 0)
|
||||
Log.info("Cache status: size="
|
||||
+ _curSize / 1024 / 1024 + "MB"
|
||||
+ " evicted=" + _evict);
|
||||
_evict = 0;
|
||||
if (Log.isTraceEnabled()) {
|
||||
long realSize = 0;
|
||||
synchronized (_cache) {
|
||||
for (JObject<?> object : _cache.keySet()) {
|
||||
realSize += object.estimateSize();
|
||||
}
|
||||
Log.info("Cache status: real size="
|
||||
+ realSize / 1024 / 1024 + "MB" + " entries=" + _cache.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@Shutdown
|
||||
void shutdown() {
|
||||
if (_statusExecutor != null)
|
||||
_statusExecutor.shutdownNow();
|
||||
}
|
||||
|
||||
public void notifyAccess(JObject<?> obj) {
|
||||
if (obj.getData() == null) return;
|
||||
long size = obj.estimateSize();
|
||||
synchronized (_cache) {
|
||||
_curSize += size;
|
||||
var old = _cache.putLast(obj, size);
|
||||
if (old != null)
|
||||
_curSize -= old;
|
||||
|
||||
while (_curSize >= sizeLimit) {
|
||||
var del = _cache.pollFirstEntry();
|
||||
_curSize -= del.getValue();
|
||||
_evict++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void updateSize(JObject<?> obj) {
|
||||
long size = obj.estimateSize();
|
||||
synchronized (_cache) {
|
||||
var old = _cache.replace(obj, size);
|
||||
if (old != null) {
|
||||
_curSize += size;
|
||||
_curSize -= old;
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
|
||||
while (_curSize >= sizeLimit) {
|
||||
var del = _cache.pollFirstEntry();
|
||||
_curSize -= del.getValue();
|
||||
_evict++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
import com.usatiuk.dhfs.utils.VoidFn;
|
||||
import jakarta.annotation.Nullable;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Optional;
|
||||
|
||||
public interface JObjectManager {
|
||||
// FIXME:
|
||||
void runWriteListeners(JObject<?> obj, boolean metaChanged, boolean dataChanged);
|
||||
|
||||
<T extends JObjectData> void registerWriteListener(Class<T> klass, WriteListenerFn fn);
|
||||
|
||||
<T extends JObjectData> void registerMetaWriteListener(Class<T> klass, WriteListenerFn fn);
|
||||
|
||||
Optional<JObject<?>> get(String name);
|
||||
|
||||
Collection<String> findAll();
|
||||
|
||||
// Put a new object
|
||||
<T extends JObjectData> JObject<T> put(T object, Optional<String> parent);
|
||||
|
||||
<T extends JObjectData> JObject<T> putLocked(T object, Optional<String> parent);
|
||||
|
||||
// Get an object with a name if it exists, otherwise create new one based on metadata
|
||||
// Should be used when working with objects referenced from the outside
|
||||
JObject<?> getOrPut(String name, Class<? extends JObjectData> klass, Optional<String> parent);
|
||||
|
||||
JObject<?> getOrPutLocked(String name, Class<? extends JObjectData> klass, Optional<String> parent);
|
||||
|
||||
enum ResolutionStrategy {
|
||||
NO_RESOLUTION,
|
||||
LOCAL_ONLY,
|
||||
REMOTE
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
interface WriteListenerFn {
|
||||
void apply(JObject<?> obj);
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
interface ObjectFnRead<T, R> {
|
||||
R apply(ObjectMetadata meta, @Nullable T data);
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
interface ObjectFnWrite<T, R> {
|
||||
R apply(ObjectMetadata indexData, @Nullable T data, VoidFn bump, VoidFn invalidate);
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
interface ObjectFnReadVoid<T> {
|
||||
void apply(ObjectMetadata meta, @Nullable T data);
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
interface ObjectFnWriteVoid<T> {
|
||||
void apply(ObjectMetadata indexData, @Nullable T data, VoidFn bump, VoidFn invalidate);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,795 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.objects.persistence.JObjectDataP;
|
||||
import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP;
|
||||
import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService;
|
||||
import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient;
|
||||
import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService;
|
||||
import com.usatiuk.dhfs.objects.repository.persistence.ObjectPersistentStore;
|
||||
import com.usatiuk.dhfs.utils.VoidFn;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.Shutdown;
|
||||
import io.quarkus.runtime.Startup;
|
||||
import jakarta.inject.Inject;
|
||||
import jakarta.inject.Singleton;
|
||||
import lombok.Getter;
|
||||
import org.apache.commons.collections4.MultiValuedMap;
|
||||
import org.apache.commons.collections4.multimap.ArrayListValuedHashMap;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.lang.ref.ReferenceQueue;
|
||||
import java.lang.ref.WeakReference;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@Singleton
|
||||
public class JObjectManagerImpl implements JObjectManager {
|
||||
private final MultiValuedMap<Class<? extends JObjectData>, WriteListenerFn> _writeListeners
|
||||
= new ArrayListValuedHashMap<>();
|
||||
private final MultiValuedMap<Class<? extends JObjectData>, WriteListenerFn> _metaWriteListeners
|
||||
= new ArrayListValuedHashMap<>();
|
||||
private final ConcurrentHashMap<String, NamedWeakReference> _map = new ConcurrentHashMap<>();
|
||||
private final ReferenceQueue<JObjectImpl<?>> _refQueue = new ReferenceQueue<>();
|
||||
@Inject
|
||||
ObjectPersistentStore objectPersistentStore;
|
||||
@Inject
|
||||
RemoteObjectServiceClient remoteObjectServiceClient;
|
||||
@Inject
|
||||
InvalidationQueueService invalidationQueueService;
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
@Inject
|
||||
JObjectRefProcessor jObjectRefProcessor;
|
||||
@Inject
|
||||
SoftJObjectFactory softJObjectFactory;
|
||||
@Inject
|
||||
JObjectLRU jObjectLRU;
|
||||
@Inject
|
||||
JObjectTxManager jObjectTxManager;
|
||||
@Inject
|
||||
TxWriteback txWriteback;
|
||||
|
||||
@Inject
|
||||
ProtoSerializer<ObjectMetadataP, ObjectMetadata> metaProtoSerializer;
|
||||
@Inject
|
||||
ProtoSerializer<JObjectDataP, JObjectData> dataProtoSerializer;
|
||||
|
||||
@ConfigProperty(name = "dhfs.objects.ref_verification")
|
||||
boolean refVerification;
|
||||
@ConfigProperty(name = "dhfs.objects.lock_timeout_secs")
|
||||
int lockTimeoutSecs;
|
||||
private Thread _refCleanupThread;
|
||||
|
||||
@Override
|
||||
public void runWriteListeners(JObject<?> obj, boolean metaChanged, boolean dataChanged) {
|
||||
if (metaChanged)
|
||||
for (var t : _metaWriteListeners.keySet()) { // FIXME:?
|
||||
if (t.isAssignableFrom(obj.getMeta().getKnownClass()))
|
||||
for (var cb : _metaWriteListeners.get(t))
|
||||
cb.apply(obj);
|
||||
}
|
||||
if (dataChanged)
|
||||
for (var t : _writeListeners.keySet()) { // FIXME:?
|
||||
if (t.isAssignableFrom(obj.getMeta().getKnownClass()))
|
||||
for (var cb : _writeListeners.get(t))
|
||||
cb.apply(obj);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends JObjectData> void registerWriteListener(Class<T> klass, WriteListenerFn fn) {
|
||||
_writeListeners.put(klass, fn);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends JObjectData> void registerMetaWriteListener(Class<T> klass, WriteListenerFn fn) {
|
||||
_metaWriteListeners.put(klass, fn);
|
||||
}
|
||||
|
||||
@Startup
|
||||
void init() {
|
||||
_refCleanupThread = new Thread(this::refCleanupThread);
|
||||
_refCleanupThread.setName("JObject ref cleanup thread");
|
||||
_refCleanupThread.start();
|
||||
}
|
||||
|
||||
@Shutdown
|
||||
void shutdown() throws InterruptedException {
|
||||
_refCleanupThread.interrupt();
|
||||
_refCleanupThread.join();
|
||||
}
|
||||
|
||||
private void refCleanupThread() {
|
||||
try {
|
||||
while (!Thread.interrupted()) {
|
||||
NamedWeakReference cur = (NamedWeakReference) _refQueue.remove();
|
||||
_map.remove(cur._key, cur);
|
||||
}
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
Log.info("Ref cleanup thread exiting");
|
||||
}
|
||||
|
||||
private JObjectImpl<?> getFromMap(String key) {
|
||||
var ret = _map.get(key);
|
||||
if (ret != null && ret.get() != null) {
|
||||
return ret.get();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<JObject<?>> get(String name) {
|
||||
{
|
||||
var inMap = getFromMap(name);
|
||||
if (inMap != null) {
|
||||
jObjectLRU.notifyAccess(inMap);
|
||||
return Optional.of(inMap);
|
||||
}
|
||||
}
|
||||
|
||||
ObjectMetadataP readMd;
|
||||
try {
|
||||
readMd = objectPersistentStore.readObjectMeta(name);
|
||||
} catch (StatusRuntimeException ex) {
|
||||
if (ex.getStatus().getCode().equals(Status.NOT_FOUND.getCode()))
|
||||
return Optional.empty();
|
||||
throw ex;
|
||||
}
|
||||
var meta = metaProtoSerializer.deserialize(readMd);
|
||||
if (!(meta instanceof ObjectMetadata))
|
||||
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("Unexpected metadata type for " + name));
|
||||
|
||||
if (((ObjectMetadata) meta).isDeleted()) {
|
||||
Log.warn("Deleted meta on disk for " + name);
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
JObjectImpl<?> ret = null;
|
||||
var newObj = new JObjectImpl<>((ObjectMetadata) meta);
|
||||
while (ret == null) {
|
||||
var ref = _map.computeIfAbsent(name, k -> new NamedWeakReference(newObj, _refQueue));
|
||||
if (ref.get() == null) _map.remove(name, ref);
|
||||
else ret = ref.get();
|
||||
}
|
||||
jObjectLRU.notifyAccess(ret);
|
||||
return Optional.of(ret);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> findAll() {
|
||||
var out = _map.values().stream().map(WeakReference::get)
|
||||
.filter(Objects::nonNull)
|
||||
.map(JObjectImpl::getMeta).map(ObjectMetadata::getName)
|
||||
.collect(Collectors.toCollection((Supplier<LinkedHashSet<String>>) LinkedHashSet::new));
|
||||
out.addAll(objectPersistentStore.findAllObjects());
|
||||
return out;
|
||||
}
|
||||
|
||||
public <D extends JObjectData> JObjectImpl<D> putImpl(D object, Optional<String> parent, boolean lock) {
|
||||
while (true) {
|
||||
JObjectImpl<?> ret;
|
||||
JObjectImpl<?> newObj = null;
|
||||
try {
|
||||
ret = getFromMap(object.getName());
|
||||
if (ret != null) {
|
||||
if (!object.getClass().isAnnotationPresent(AssumedUnique.class))
|
||||
throw new IllegalArgumentException("Trying to insert different object with same key");
|
||||
} else {
|
||||
newObj = new JObjectImpl<D>(object.getName(), persistentPeerDataService.getSelfUuid(), object);
|
||||
newObj.rwLock();
|
||||
while (ret == null) {
|
||||
JObjectImpl<?> finalNewObj = newObj;
|
||||
var ref = _map.computeIfAbsent(object.getName(), k -> new NamedWeakReference(finalNewObj, _refQueue));
|
||||
if (ref.get() == null) _map.remove(object.getName(), ref);
|
||||
else ret = ref.get();
|
||||
}
|
||||
if (ret != newObj) {
|
||||
newObj.drop();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
JObjectImpl<D> finalRet = (JObjectImpl<D>) ret;
|
||||
|
||||
boolean shouldWrite = false;
|
||||
try {
|
||||
shouldWrite = ret.runReadLocked(ResolutionStrategy.NO_RESOLUTION, (m, d) -> {
|
||||
return (object.getClass().isAnnotationPresent(PushResolution.class)
|
||||
&& object.getClass().isAnnotationPresent(AssumedUnique.class)
|
||||
&& finalRet.getData() == null && !finalRet.getMeta().isHaveLocalCopy())
|
||||
|| (parent.isEmpty() && !m.isFrozen()) || (parent.isPresent() && !m.checkRef(parent.get()));
|
||||
});
|
||||
} catch (DeletedObjectAccessException dex) {
|
||||
shouldWrite = true;
|
||||
}
|
||||
|
||||
if (shouldWrite)
|
||||
ret.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, i) -> {
|
||||
if (object.getClass().isAnnotationPresent(PushResolution.class)
|
||||
&& object.getClass().isAnnotationPresent(AssumedUnique.class)
|
||||
&& finalRet.getData() == null && !finalRet.getMeta().isHaveLocalCopy()) {
|
||||
finalRet.externalResolution(object);
|
||||
}
|
||||
|
||||
if (parent.isPresent()) {
|
||||
m.addRef(parent.get());
|
||||
if (m.isFrozen())
|
||||
m.unfreeze();
|
||||
} else {
|
||||
m.freeze();
|
||||
}
|
||||
|
||||
return null;
|
||||
});
|
||||
} finally {
|
||||
// FIXME?
|
||||
if (newObj != null)
|
||||
newObj.forceInvalidate();
|
||||
}
|
||||
if (newObj == null) {
|
||||
jObjectLRU.notifyAccess(ret);
|
||||
if (lock)
|
||||
ret.rwLock();
|
||||
}
|
||||
if (newObj != null && !lock)
|
||||
newObj.rwUnlock();
|
||||
return (JObjectImpl<D>) ret;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public <D extends JObjectData> JObjectImpl<D> putLocked(D object, Optional<String> parent) {
|
||||
return putImpl(object, parent, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <D extends JObjectData> JObjectImpl<D> put(D object, Optional<String> parent) {
|
||||
return putImpl(object, parent, false);
|
||||
}
|
||||
|
||||
public JObject<?> getOrPutImpl(String name, Class<? extends JObjectData> klass, Optional<String> parent, boolean lock) {
|
||||
while (true) {
|
||||
var got = get(name).orElse(null);
|
||||
|
||||
if (got != null) {
|
||||
{
|
||||
boolean shouldWrite = false;
|
||||
try {
|
||||
// These two mutate in one direction only, it's ok to not take the lock
|
||||
var gotKlass = got.getMeta().getKnownClass();
|
||||
var gotSeen = got.getMeta().isSeen();
|
||||
shouldWrite
|
||||
= !(((gotKlass.equals(klass))
|
||||
|| (klass.isAssignableFrom(gotKlass)))
|
||||
&& gotSeen);
|
||||
} catch (DeletedObjectAccessException dex) {
|
||||
shouldWrite = true;
|
||||
}
|
||||
if (shouldWrite || lock) {
|
||||
got.rwLock();
|
||||
try {
|
||||
var meta = got.getMeta();
|
||||
meta.narrowClass(klass);
|
||||
meta.markSeen();
|
||||
} finally {
|
||||
if (!lock) got.rwUnlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
parent.ifPresent(s -> {
|
||||
boolean shouldWrite = false;
|
||||
try {
|
||||
shouldWrite = !got.runReadLocked(ResolutionStrategy.NO_RESOLUTION, (m, d) -> m.checkRef(s));
|
||||
} catch (DeletedObjectAccessException dex) {
|
||||
shouldWrite = true;
|
||||
}
|
||||
|
||||
if (!shouldWrite) return;
|
||||
|
||||
got.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, i) -> {
|
||||
if (m.isFrozen())
|
||||
m.unfreeze();
|
||||
m.addRef(s);
|
||||
return true;
|
||||
});
|
||||
});
|
||||
return got;
|
||||
}
|
||||
|
||||
JObjectImpl<?> ret = null;
|
||||
var created = new JObjectImpl<>(new ObjectMetadata(name, false, klass));
|
||||
created.rwLock();
|
||||
while (ret == null) {
|
||||
var ref = _map.computeIfAbsent(name, k -> new NamedWeakReference(created, _refQueue));
|
||||
if (ref.get() == null) _map.remove(name, ref);
|
||||
else ret = ref.get();
|
||||
}
|
||||
if (ret != created) {
|
||||
created.drop();
|
||||
continue;
|
||||
}
|
||||
|
||||
created.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, i) -> {
|
||||
parent.ifPresent(m::addRef);
|
||||
m.markSeen();
|
||||
return null;
|
||||
});
|
||||
if (!lock)
|
||||
created.rwUnlock();
|
||||
return created;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public JObject<?> getOrPutLocked(String name, Class<? extends JObjectData> klass, Optional<String> parent) {
|
||||
return getOrPutImpl(name, klass, parent, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public JObject<?> getOrPut(String name, Class<? extends JObjectData> klass, Optional<String> parent) {
|
||||
return getOrPutImpl(name, klass, parent, false);
|
||||
}
|
||||
|
||||
private static class NamedWeakReference extends WeakReference<JObjectImpl<?>> {
|
||||
@Getter
|
||||
final String _key;
|
||||
|
||||
public NamedWeakReference(JObjectImpl<?> target, ReferenceQueue<JObjectImpl<?>> q) {
|
||||
super(target, q);
|
||||
this._key = target.getMeta().getName();
|
||||
}
|
||||
}
|
||||
|
||||
public class JObjectImpl<T extends JObjectData> extends JObject<T> {
|
||||
private final ReentrantReadWriteLock _lock = new ReentrantReadWriteLock();
|
||||
private final AtomicReference<T> _dataPart = new AtomicReference<>();
|
||||
private ObjectMetadata _metaPart;
|
||||
|
||||
// Create a new object
|
||||
protected JObjectImpl(String name, UUID selfUuid, T obj) {
|
||||
_metaPart = new ObjectMetadata(name, false, obj.getClass());
|
||||
_metaPart.setHaveLocalCopy(true);
|
||||
_dataPart.set(obj);
|
||||
_metaPart.getChangelog().put(selfUuid, 1L);
|
||||
if (Log.isTraceEnabled())
|
||||
Log.trace("new JObject: " + getMeta().getName());
|
||||
}
|
||||
|
||||
// Create an object from existing metadata
|
||||
protected JObjectImpl(ObjectMetadata objectMetadata) {
|
||||
_metaPart = objectMetadata;
|
||||
if (Log.isTraceEnabled())
|
||||
Log.trace("new JObject (ext): " + getMeta().getName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public T getData() {
|
||||
return _dataPart.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
void rollback(ObjectMetadata meta, JObjectData data) {
|
||||
_metaPart = meta;
|
||||
_dataPart.set((T) data);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ObjectMetadata getMeta() {
|
||||
return _metaPart;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void markSeen() {
|
||||
if (!_metaPart.isSeen()) {
|
||||
runWriteLocked(ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> {
|
||||
m.markSeen();
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private void tryRemoteResolve() {
|
||||
if (_dataPart.get() == null) {
|
||||
rwLock();
|
||||
try {
|
||||
tryLocalResolve();
|
||||
if (_dataPart.get() == null) {
|
||||
var res = resolveDataRemote();
|
||||
_metaPart.narrowClass(res.getClass());
|
||||
_dataPart.set((T) res);
|
||||
_metaPart.setHaveLocalCopy(true);
|
||||
hydrateRefs();
|
||||
} // _dataPart.get() == null
|
||||
} finally {
|
||||
rwUnlock();
|
||||
} // try
|
||||
} // _dataPart.get() == null
|
||||
}
|
||||
|
||||
private void tryLocalResolve() {
|
||||
if (_dataPart.get() == null) {
|
||||
rLock();
|
||||
try {
|
||||
if (_dataPart.get() == null) {
|
||||
if (!getMeta().isHaveLocalCopy()) return;
|
||||
JObjectData res;
|
||||
try {
|
||||
res = resolveDataLocal();
|
||||
} catch (Exception e) {
|
||||
Log.error("Object " + _metaPart.getName() + " data couldn't be read but it should exist locally!", e);
|
||||
return;
|
||||
}
|
||||
|
||||
if (_metaPart.getSavedRefs() != null && !_metaPart.getSavedRefs().isEmpty())
|
||||
throw new IllegalStateException("Object " + _metaPart.getName() + " has non-hydrated refs when written locally");
|
||||
|
||||
_metaPart.narrowClass(res.getClass());
|
||||
if (_dataPart.compareAndSet(null, (T) res))
|
||||
onResolution();
|
||||
} // _dataPart.get() == null
|
||||
} finally {
|
||||
rUnlock();
|
||||
} // try
|
||||
} // _dataPart.get() == null
|
||||
}
|
||||
|
||||
@Override
|
||||
public void externalResolution(JObjectData data) {
|
||||
assertRwLock();
|
||||
if (Log.isTraceEnabled())
|
||||
Log.trace("External resolution of " + getMeta().getName());
|
||||
if (_dataPart.get() != null)
|
||||
throw new IllegalStateException("Data is not null when recording external resolution of " + getMeta().getName());
|
||||
if (!data.getClass().isAnnotationPresent(PushResolution.class))
|
||||
throw new IllegalStateException("Expected external resolution only for classes with pushResolution " + getMeta().getName());
|
||||
_metaPart.narrowClass(data.getClass());
|
||||
_dataPart.set((T) data);
|
||||
_metaPart.setHaveLocalCopy(true);
|
||||
hydrateRefs();
|
||||
}
|
||||
|
||||
public boolean tryRLock() {
|
||||
try {
|
||||
if (!_lock.readLock().tryLock(lockTimeoutSecs, TimeUnit.SECONDS))
|
||||
return false;
|
||||
if (_metaPart.isDeleted()) {
|
||||
_lock.readLock().unlock();
|
||||
throw new DeletedObjectAccessException();
|
||||
}
|
||||
return true;
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
boolean tryRwLockImpl(boolean block, boolean txCopy) {
|
||||
try {
|
||||
if (block) {
|
||||
if (!_lock.writeLock().tryLock(lockTimeoutSecs, TimeUnit.SECONDS))
|
||||
return false;
|
||||
} else {
|
||||
if (!_lock.writeLock().tryLock())
|
||||
return false;
|
||||
}
|
||||
try {
|
||||
// TODO: Fix putImpl
|
||||
// if (_metaPart.isDeleted())
|
||||
// throw new DeletedObjectAccessException();
|
||||
|
||||
if (_lock.writeLock().getHoldCount() == 1) {
|
||||
jObjectTxManager.addToTx(this, txCopy);
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
_lock.writeLock().unlock();
|
||||
throw t;
|
||||
}
|
||||
return true;
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rwLock() {
|
||||
if (!tryRwLockImpl(true, true))
|
||||
throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("Failed to acquire write lock for " + getMeta().getName()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean tryRwLock() {
|
||||
return tryRwLockImpl(false, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rwLockNoCopy() {
|
||||
if (!tryRwLockImpl(true, false))
|
||||
throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("Failed to acquire write lock for " + getMeta().getName()));
|
||||
}
|
||||
|
||||
public void rLock() {
|
||||
if (!tryRLock())
|
||||
throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("Failed to acquire read lock for " + getMeta().getName()));
|
||||
}
|
||||
|
||||
public void rUnlock() {
|
||||
_lock.readLock().unlock();
|
||||
}
|
||||
|
||||
protected void forceInvalidate() {
|
||||
assertRwLock();
|
||||
jObjectTxManager.forceInvalidate(this);
|
||||
}
|
||||
|
||||
public void rwUnlock() {
|
||||
int hc = _lock.writeLock().getHoldCount();
|
||||
|
||||
_lock.writeLock().unlock();
|
||||
|
||||
// FIXME: this relies on the transaction running
|
||||
if (hc == 2) {
|
||||
updateDeletionState();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void drop() {
|
||||
if (_lock.writeLock().getHoldCount() < 2) {
|
||||
throw new IllegalStateException("Expected for object to be locked and in transaction");
|
||||
}
|
||||
_lock.writeLock().unlock();
|
||||
jObjectTxManager.drop(this);
|
||||
}
|
||||
|
||||
public boolean haveRwLock() {
|
||||
return _lock.isWriteLockedByCurrentThread();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void assertRwLock() {
|
||||
if (!haveRwLock())
|
||||
throw new IllegalStateException("Expected to be write-locked there: " + getMeta().getName() + " " + Thread.currentThread().getName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public <R> R runReadLocked(ResolutionStrategy resolutionStrategy, ObjectFnRead<T, R> fn) {
|
||||
tryResolve(resolutionStrategy);
|
||||
|
||||
rLock();
|
||||
try {
|
||||
return fn.apply(_metaPart, _dataPart.get());
|
||||
} finally {
|
||||
rUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
protected boolean isResolved() {
|
||||
return _dataPart.get() != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <R> R runWriteLocked(ResolutionStrategy resolutionStrategy, ObjectFnWrite<T, R> fn) {
|
||||
rwLock();
|
||||
try {
|
||||
tryResolve(resolutionStrategy);
|
||||
VoidFn invalidateFn = () -> {
|
||||
tryLocalResolve();
|
||||
backupRefs();
|
||||
_dataPart.set(null);
|
||||
removeLocal(_metaPart.getName());
|
||||
};
|
||||
return fn.apply(_metaPart, _dataPart.get(), this::bumpVer, invalidateFn);
|
||||
} finally {
|
||||
rwUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void mutate(JMutator<? super T> mutator) {
|
||||
assertRwLock();
|
||||
|
||||
if (getData() == null) throw new IllegalStateException("Resolve before mutate!");
|
||||
|
||||
if (mutator.mutate(getData())) {
|
||||
bumpVer();
|
||||
jObjectTxManager.addMutator(this, mutator);
|
||||
}
|
||||
}
|
||||
|
||||
public boolean tryResolve(ResolutionStrategy resolutionStrategy) {
|
||||
if (resolutionStrategy == ResolutionStrategy.LOCAL_ONLY ||
|
||||
resolutionStrategy == ResolutionStrategy.REMOTE)
|
||||
tryLocalResolve();
|
||||
if (resolutionStrategy == ResolutionStrategy.REMOTE) tryRemoteResolve();
|
||||
|
||||
return _dataPart.get() != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void doDelete() {
|
||||
assertRwLock();
|
||||
getMeta().markDeleted();
|
||||
_dataPart.set(null);
|
||||
_metaPart.setHaveLocalCopy(false);
|
||||
_metaPart.setSavedRefs(new HashSet<>());
|
||||
}
|
||||
|
||||
public void backupRefs() {
|
||||
assertRwLock();
|
||||
if (getData() != null) {
|
||||
if ((getMeta().getSavedRefs() != null) && (!getMeta().getSavedRefs().isEmpty())) {
|
||||
Log.error("Saved refs not empty for " + getMeta().getName() + " will clean");
|
||||
getMeta().setSavedRefs(null);
|
||||
}
|
||||
getMeta().setSavedRefs(new LinkedHashSet<>(getData().extractRefs()));
|
||||
}
|
||||
}
|
||||
|
||||
public void hydrateRefs() {
|
||||
assertRwLock();
|
||||
if (getMeta().getSavedRefs() != null) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("Hydrating refs for ").append(getMeta().getName()).append("\n");
|
||||
sb.append("Saved refs: ");
|
||||
getMeta().getSavedRefs().forEach(r -> sb.append(r).append(" "));
|
||||
sb.append("\nExtracted refs: ");
|
||||
var extracted = new LinkedHashSet<>(getData().extractRefs());
|
||||
extracted.forEach(r -> sb.append(r).append(" "));
|
||||
Log.debug(sb.toString());
|
||||
for (var r : getMeta().getSavedRefs()) {
|
||||
if (!extracted.contains(r))
|
||||
get(r).ifPresent(ro -> ro.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, i) -> {
|
||||
m.removeRef(getMeta().getName());
|
||||
return null;
|
||||
}));
|
||||
}
|
||||
for (var r : extracted) {
|
||||
if (!getMeta().getSavedRefs().contains(r)) {
|
||||
Log.trace("Hydrating ref " + r + " for " + getMeta().getName());
|
||||
getOrPut(r, getData().getRefType(), Optional.of(getMeta().getName()));
|
||||
}
|
||||
}
|
||||
getMeta().setSavedRefs(null);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean updateDeletionState() {
|
||||
assertRwLock();
|
||||
|
||||
if (!getMeta().isDeletionCandidate() && getMeta().isDeleted()) {
|
||||
getMeta().undelete();
|
||||
Log.debug("Undelete: " + getMeta().getName());
|
||||
|
||||
Stream<String> refs = Stream.empty();
|
||||
|
||||
if (getMeta().getSavedRefs() != null)
|
||||
refs = getMeta().getSavedRefs().stream();
|
||||
if (getData() != null)
|
||||
refs = Stream.concat(refs, getData().extractRefs().stream());
|
||||
|
||||
refs.forEach(r -> {
|
||||
Log.trace("Hydrating ref after undelete " + r + " for " + getMeta().getName());
|
||||
getOrPut(r, getData() != null ? getData().getRefType() : JObjectData.class, Optional.of(getMeta().getName()));
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
if (getMeta().isDeletionCandidate() && !getMeta().isDeleted()) {
|
||||
if (!getMeta().isSeen())
|
||||
tryQuickDelete();
|
||||
else
|
||||
jObjectRefProcessor.putDeletionCandidate(getMeta().getName());
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private void quickDeleteRef(String name) {
|
||||
var got = get(name).orElse(null);
|
||||
if (got == null) return;
|
||||
if (got.tryRwLock()) {
|
||||
try {
|
||||
got.getMeta().removeRef(getMeta().getName());
|
||||
} finally {
|
||||
got.rwUnlock();
|
||||
}
|
||||
} else {
|
||||
jObjectRefProcessor.putQuickDeletionCandidate(softJObjectFactory.create(JObjectData.class, this), softJObjectFactory.create(JObjectData.class, got));
|
||||
}
|
||||
}
|
||||
|
||||
private void tryQuickDelete() {
|
||||
assertRwLock();
|
||||
if (!getMeta().getKnownClass().isAnnotationPresent(Leaf.class))
|
||||
tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
|
||||
|
||||
if (Log.isTraceEnabled())
|
||||
Log.trace("Quick delete of: " + getMeta().getName());
|
||||
|
||||
Collection<String> extracted = null;
|
||||
if (!getMeta().getKnownClass().isAnnotationPresent(Leaf.class) && getData() != null)
|
||||
extracted = getData().extractRefs();
|
||||
Collection<String> saved = getMeta().getSavedRefs();
|
||||
|
||||
doDelete();
|
||||
|
||||
if (saved != null)
|
||||
for (var r : saved) quickDeleteRef(r);
|
||||
if (extracted != null)
|
||||
for (var r : extracted) quickDeleteRef(r);
|
||||
}
|
||||
|
||||
public <T extends JObjectData> T resolveDataLocal() {
|
||||
// jObject.assertRwLock();
|
||||
// FIXME: No way to assert read lock?
|
||||
return (T) dataProtoSerializer.deserialize(objectPersistentStore.readObject(getMeta().getName()));
|
||||
}
|
||||
|
||||
public <T extends JObjectData> T resolveDataRemote() {
|
||||
var obj = remoteObjectServiceClient.getObject(this);
|
||||
invalidationQueueService.pushInvalidationToAll(this);
|
||||
return (T) dataProtoSerializer.deserialize(obj);
|
||||
}
|
||||
|
||||
// Really more like "onUpdateSize"
|
||||
// Also not called from tryResolveRemote/externalResolution because
|
||||
// there it's handled by the notifyWrite
|
||||
public void onResolution() {
|
||||
jObjectLRU.updateSize(this);
|
||||
}
|
||||
|
||||
public void removeLocal(String name) {
|
||||
assertRwLock();
|
||||
try {
|
||||
Log.debug("Invalidating " + name);
|
||||
getMeta().setHaveLocalCopy(false);
|
||||
} catch (StatusRuntimeException sx) {
|
||||
if (sx.getStatus() != Status.NOT_FOUND)
|
||||
Log.info("Couldn't delete object from persistent store: ", sx);
|
||||
} catch (Exception e) {
|
||||
Log.info("Couldn't delete object from persistent store: ", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void bumpVer() {
|
||||
assertRwLock();
|
||||
getMeta().bumpVersion(persistentPeerDataService.getSelfUuid());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commitFence() {
|
||||
if (haveRwLock())
|
||||
throw new IllegalStateException("Waiting on object flush inside transaction?");
|
||||
if (getMeta().getLastModifiedTx() == -1) return;
|
||||
txWriteback.fence(getMeta().getLastModifiedTx());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commitFenceAsync(VoidFn callback) {
|
||||
if (haveRwLock())
|
||||
throw new IllegalStateException("Waiting on object flush inside transaction?");
|
||||
if (getMeta().getLastModifiedTx() == -1) {
|
||||
callback.apply();
|
||||
return;
|
||||
}
|
||||
txWriteback.asyncFence(getMeta().getLastModifiedTx(), callback);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int estimateSize() {
|
||||
if (_dataPart.get() == null) return 1024; // Assume metadata etc takes up something
|
||||
else return _dataPart.get().estimateSize() + 1024;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,282 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService;
|
||||
import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient;
|
||||
import com.usatiuk.dhfs.objects.repository.autosync.AutoSyncProcessor;
|
||||
import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.concurrent.BasicThreadFactory;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
@ApplicationScoped
|
||||
public class JObjectRefProcessor {
|
||||
private final HashSetDelayedBlockingQueue<Pair<SoftJObject<?>, SoftJObject<?>>> _quickCandidates = new HashSetDelayedBlockingQueue<>(0);
|
||||
private final HashSetDelayedBlockingQueue<String> _candidates;
|
||||
private final HashSetDelayedBlockingQueue<String> _canDeleteRetries;
|
||||
private final HashSet<String> _movablesInProcessing = new HashSet<>();
|
||||
@Inject
|
||||
JObjectManager jObjectManager;
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
@Inject
|
||||
RemoteObjectServiceClient remoteObjectServiceClient;
|
||||
@Inject
|
||||
AutoSyncProcessor autoSyncProcessor;
|
||||
@Inject
|
||||
JObjectTxManager jObjectTxManager;
|
||||
@ConfigProperty(name = "dhfs.objects.move-processor.threads")
|
||||
int moveProcessorThreads;
|
||||
@ConfigProperty(name = "dhfs.objects.ref-processor.threads")
|
||||
int refProcessorThreads;
|
||||
@ConfigProperty(name = "dhfs.objects.deletion.can-delete-retry-delay")
|
||||
long canDeleteRetryDelay;
|
||||
@Inject
|
||||
ExecutorService executorService;
|
||||
|
||||
private ExecutorService _movableProcessorExecutorService;
|
||||
private ExecutorService _refProcessorExecutorService;
|
||||
|
||||
public JObjectRefProcessor(@ConfigProperty(name = "dhfs.objects.deletion.delay") long deletionDelay,
|
||||
@ConfigProperty(name = "dhfs.objects.deletion.can-delete-retry-delay") long canDeleteRetryDelay) {
|
||||
_candidates = new HashSetDelayedBlockingQueue<>(deletionDelay);
|
||||
_canDeleteRetries = new HashSetDelayedBlockingQueue<>(canDeleteRetryDelay);
|
||||
}
|
||||
|
||||
void init(@Observes @Priority(200) StartupEvent event) throws IOException {
|
||||
BasicThreadFactory factory = new BasicThreadFactory.Builder()
|
||||
.namingPattern("move-proc-%d")
|
||||
.build();
|
||||
_movableProcessorExecutorService = Executors.newFixedThreadPool(moveProcessorThreads, factory);
|
||||
|
||||
BasicThreadFactory factoryRef = new BasicThreadFactory.Builder()
|
||||
.namingPattern("ref-proc-%d")
|
||||
.build();
|
||||
_refProcessorExecutorService = Executors.newFixedThreadPool(refProcessorThreads, factoryRef);
|
||||
for (int i = 0; i < refProcessorThreads; i++) {
|
||||
_refProcessorExecutorService.submit(this::refProcessor);
|
||||
}
|
||||
|
||||
// Continue GC from last shutdown
|
||||
//FIXME
|
||||
// executorService.submit(() ->
|
||||
// jObjectManager.findAll().forEach(n -> {
|
||||
// jObjectManager.get(n).ifPresent(o -> o.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> {
|
||||
// return null;
|
||||
// }));
|
||||
// }));
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(800) ShutdownEvent event) throws InterruptedException {
|
||||
_refProcessorExecutorService.shutdownNow();
|
||||
if (!_refProcessorExecutorService.awaitTermination(30, TimeUnit.SECONDS)) {
|
||||
Log.error("Refcounting threads didn't exit in 30 seconds");
|
||||
}
|
||||
}
|
||||
|
||||
public void putQuickDeletionCandidate(SoftJObject<?> from, SoftJObject<?> obj) {
|
||||
_quickCandidates.add(Pair.of(from, obj));
|
||||
}
|
||||
|
||||
public void putDeletionCandidate(String name) {
|
||||
synchronized (_movablesInProcessing) {
|
||||
if (_movablesInProcessing.contains(name)) return;
|
||||
if (_candidates.add(name))
|
||||
Log.debug("Deletion candidate: " + name);
|
||||
}
|
||||
}
|
||||
|
||||
private void asyncProcessMovable(String objName) {
|
||||
synchronized (_movablesInProcessing) {
|
||||
if (_movablesInProcessing.contains(objName)) return;
|
||||
_movablesInProcessing.add(objName);
|
||||
}
|
||||
|
||||
_movableProcessorExecutorService.submit(() -> {
|
||||
var obj = jObjectManager.get(objName).orElse(null);
|
||||
if (obj == null || obj.getMeta().isDeleted()) return;
|
||||
boolean delay = false;
|
||||
try {
|
||||
var knownHosts = persistentPeerDataService.getHostUuids();
|
||||
List<UUID> missing = new ArrayList<>();
|
||||
|
||||
var ourReferrers = obj.runReadLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d) -> {
|
||||
for (var x : knownHosts)
|
||||
if (!m.getConfirmedDeletes().contains(x)) missing.add(x);
|
||||
return m.getReferrers();
|
||||
});
|
||||
var ret = remoteObjectServiceClient.canDelete(missing, obj.getMeta().getName(), ourReferrers);
|
||||
|
||||
long ok = 0;
|
||||
|
||||
for (var r : ret) {
|
||||
if (!r.getDeletionCandidate())
|
||||
for (var rr : r.getReferrersList())
|
||||
autoSyncProcessor.add(rr);
|
||||
else
|
||||
ok++;
|
||||
}
|
||||
|
||||
if (ok != missing.size()) {
|
||||
Log.debug("Delaying deletion check of " + obj.getMeta().getName());
|
||||
delay = true;
|
||||
}
|
||||
|
||||
jObjectTxManager.executeTx(() -> {
|
||||
obj.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, i) -> {
|
||||
for (var r : ret)
|
||||
if (r.getDeletionCandidate())
|
||||
m.getConfirmedDeletes().add(UUID.fromString(r.getSelfUuid()));
|
||||
return null;
|
||||
});
|
||||
});
|
||||
} catch (Exception e) {
|
||||
Log.warn("When processing deletion of movable object " + obj.getMeta().getName(), e);
|
||||
} finally {
|
||||
synchronized (_movablesInProcessing) {
|
||||
_movablesInProcessing.remove(obj.getMeta().getName());
|
||||
if (!delay)
|
||||
_candidates.add(obj.getMeta().getName());
|
||||
else
|
||||
_canDeleteRetries.add(obj.getMeta().getName());
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private boolean processMovable(JObject<?> obj) {
|
||||
obj.assertRwLock();
|
||||
var knownHosts = persistentPeerDataService.getHostUuids();
|
||||
boolean missing = false;
|
||||
for (var x : knownHosts)
|
||||
if (!obj.getMeta().getConfirmedDeletes().contains(x)) {
|
||||
missing = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!missing) return true;
|
||||
asyncProcessMovable(obj.getMeta().getName());
|
||||
return false;
|
||||
}
|
||||
|
||||
private void deleteRef(JObject<?> self, String name) {
|
||||
jObjectManager.get(name).ifPresent(ref -> ref.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (mc, dc, bc, ic) -> {
|
||||
mc.removeRef(self.getMeta().getName());
|
||||
return null;
|
||||
}));
|
||||
}
|
||||
|
||||
private void refProcessor() {
|
||||
try {
|
||||
while (!Thread.interrupted()) {
|
||||
String next = null;
|
||||
Pair<SoftJObject<?>, SoftJObject<?>> nextQuick = null;
|
||||
|
||||
while (next == null && nextQuick == null) {
|
||||
nextQuick = _quickCandidates.tryGet();
|
||||
|
||||
if (nextQuick != null) break;
|
||||
|
||||
next = _canDeleteRetries.tryGet();
|
||||
if (next == null)
|
||||
next = _candidates.tryGet();
|
||||
if (next == null)
|
||||
nextQuick = _quickCandidates.get(canDeleteRetryDelay);
|
||||
}
|
||||
|
||||
JObject<?> target;
|
||||
|
||||
if (nextQuick != null) {
|
||||
var fromSoft = nextQuick.getLeft();
|
||||
var toSoft = nextQuick.getRight();
|
||||
|
||||
var from = nextQuick.getLeft().get();
|
||||
var to = nextQuick.getRight().get();
|
||||
|
||||
if (from != null && !from.getMeta().isDeleted()) {
|
||||
Log.warn("Quick delete failed for " + from.getMeta().getName() + " -> " + toSoft.getName());
|
||||
continue;
|
||||
}
|
||||
|
||||
if (to == null) {
|
||||
Log.warn("Quick delete object missing: " + toSoft.getName());
|
||||
continue;
|
||||
}
|
||||
|
||||
target = to;
|
||||
|
||||
jObjectTxManager.executeTx(() -> {
|
||||
if (from != null)
|
||||
from.rwLock();
|
||||
try {
|
||||
try {
|
||||
to.rwLock();
|
||||
to.getMeta().removeRef(fromSoft.getName());
|
||||
} finally {
|
||||
to.rwUnlock();
|
||||
}
|
||||
} finally {
|
||||
if (from != null)
|
||||
from.rwUnlock();
|
||||
}
|
||||
});
|
||||
} else {
|
||||
target = jObjectManager.get(next).orElse(null);
|
||||
}
|
||||
|
||||
if (target == null) continue;
|
||||
try {
|
||||
jObjectTxManager.executeTx(() -> {
|
||||
target.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, v, i) -> {
|
||||
if (m.isFrozen()) return null;
|
||||
if (m.isDeleted()) return null;
|
||||
if (!m.isDeletionCandidate()) return null;
|
||||
if (m.isSeen() && !m.isOnlyLocal()) {
|
||||
if (!processMovable(target))
|
||||
return null;
|
||||
}
|
||||
if (m.isSeen() && m.isOnlyLocal())
|
||||
Log.warn("Seen only-local object: " + m.getName());
|
||||
|
||||
|
||||
if (!target.getMeta().getKnownClass().isAnnotationPresent(Leaf.class))
|
||||
target.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
|
||||
|
||||
Log.debug("Deleting " + m.getName());
|
||||
|
||||
Collection<String> extracted = null;
|
||||
if (!target.getMeta().getKnownClass().isAnnotationPresent(Leaf.class) && target.getData() != null)
|
||||
extracted = target.getData().extractRefs();
|
||||
Collection<String> saved = target.getMeta().getSavedRefs();
|
||||
|
||||
target.doDelete();
|
||||
|
||||
if (saved != null)
|
||||
for (var r : saved) deleteRef(target, r);
|
||||
if (extracted != null)
|
||||
for (var r : extracted) deleteRef(target, r);
|
||||
|
||||
return null;
|
||||
});
|
||||
});
|
||||
} catch (Exception ex) {
|
||||
Log.error("Error when deleting: " + next, ex);
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
Log.info("JObject Refcounter thread exiting");
|
||||
}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
import com.usatiuk.dhfs.objects.persistence.JObjectDataP;
|
||||
import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP;
|
||||
|
||||
public record JObjectSnapshot
|
||||
(ObjectMetadataP meta,
|
||||
JObjectDataP data,
|
||||
int changelogHash) {
|
||||
}
|
||||
@@ -1,397 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.objects.persistence.JObjectDataP;
|
||||
import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP;
|
||||
import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService;
|
||||
import com.usatiuk.dhfs.utils.VoidFn;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.annotation.Nullable;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.concurrent.BasicThreadFactory;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
@ApplicationScoped
|
||||
public class JObjectTxManager {
|
||||
private final ThreadLocal<TxState> _state = new ThreadLocal<>();
|
||||
private final ExecutorService _serializerThreads;
|
||||
private final AtomicLong _transientTxId = new AtomicLong();
|
||||
@Inject
|
||||
ProtoSerializer<JObjectDataP, JObjectData> dataProtoSerializer;
|
||||
@Inject
|
||||
ProtoSerializer<ObjectMetadataP, ObjectMetadata> metaProtoSerializer;
|
||||
@Inject
|
||||
JObjectLRU jObjectLRU;
|
||||
@Inject
|
||||
JObjectManager jObjectManager;
|
||||
@Inject
|
||||
InvalidationQueueService invalidationQueueService;
|
||||
@Inject
|
||||
TxWriteback txWriteback;
|
||||
@ConfigProperty(name = "dhfs.objects.ref_verification")
|
||||
boolean refVerification;
|
||||
|
||||
public JObjectTxManager() {
|
||||
BasicThreadFactory factory = new BasicThreadFactory.Builder()
|
||||
.namingPattern("tx-serializer-%d")
|
||||
.build();
|
||||
|
||||
_serializerThreads = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(), factory);
|
||||
}
|
||||
|
||||
public void begin() {
|
||||
if (_state.get() != null)
|
||||
throw new IllegalStateException("Transaction already running");
|
||||
|
||||
_state.set(new TxState());
|
||||
}
|
||||
|
||||
public void drop(JObject<?> obj) {
|
||||
var state = _state.get();
|
||||
if (state == null)
|
||||
throw new IllegalStateException("Transaction not running");
|
||||
Log.debug("Dropping " + obj.getMeta().getName() + " from " + state._id);
|
||||
obj.assertRwLock();
|
||||
state._writeObjects.remove(obj);
|
||||
obj.rwUnlock();
|
||||
}
|
||||
|
||||
// Returns Id of bundle to wait for, or -1 if there was nothing written
|
||||
public long commit() {
|
||||
var state = _state.get();
|
||||
if (state == null)
|
||||
throw new IllegalStateException("Transaction not running");
|
||||
|
||||
if (state._writeObjects.isEmpty()) {
|
||||
// Log.trace("Empty transaction " + state._id);
|
||||
state._callbacks.forEach(c -> c.accept(null));
|
||||
_state.remove();
|
||||
return -1;
|
||||
}
|
||||
|
||||
Log.debug("Committing transaction " + state._id);
|
||||
|
||||
for (var obj : state._writeObjects.entrySet()) {
|
||||
Log.debug("Committing " + obj.getKey().getMeta().getName() + " deleted=" + obj.getKey().getMeta().isDeleted() + " deletion-candidate=" + obj.getKey().getMeta().isDeletionCandidate());
|
||||
|
||||
var dataDiff = obj.getKey().getMeta().changelogHash() != obj.getValue().snapshot.changelogHash()
|
||||
|| obj.getValue()._forceInvalidated;
|
||||
|
||||
if (refVerification) {
|
||||
// Null check in case of object not being resolved before (though then we can't check this)
|
||||
boolean dataDiffReal = obj.getValue().snapshot.data() != null
|
||||
&& !Objects.equals(obj.getValue().snapshot.data(), obj.getKey().getData() == null ? null : dataProtoSerializer.serialize(obj.getKey().getData()));
|
||||
|
||||
if (dataDiffReal && !dataDiff) {
|
||||
var msg = "Data diff not equal for " + obj.getKey().getMeta().getName() + " " + obj.getKey().getData() + " before = " + ((obj.getValue().snapshot != null) ? obj.getValue().snapshot.data() : null) + " after = " + ((obj.getKey().getData() != null) ? dataProtoSerializer.serialize(obj.getKey().getData()) : null);
|
||||
throw new IllegalStateException(msg);
|
||||
}
|
||||
if (dataDiff && !dataDiffReal)
|
||||
Log.warn("Useless update for " + obj.getKey().getMeta().getName());
|
||||
}
|
||||
|
||||
// if (obj.getValue()._copy && !obj.getValue()._mutators.isEmpty())
|
||||
// throw new IllegalStateException("Object copied but had mutators!");
|
||||
|
||||
if (refVerification && !obj.getValue()._copy) {
|
||||
var cur = dataProtoSerializer.serialize(obj.getKey().getData());
|
||||
for (var mut : obj.getValue()._mutators.reversed())
|
||||
revertMutator(obj.getKey(), mut);
|
||||
var rev = dataProtoSerializer.serialize(obj.getKey().getData());
|
||||
|
||||
if (obj.getValue().snapshot.data() != null && !Objects.equals(rev, obj.getValue().snapshot.data()))
|
||||
throw new IllegalStateException("Mutator could not be reverted for object " + obj.getKey().getMeta().getName() + "\n old = " + obj.getValue().snapshot.data() + "\n reverted = " + rev + "\n");
|
||||
|
||||
for (var mut : obj.getValue()._mutators)
|
||||
applyMutator(obj.getKey(), mut);
|
||||
|
||||
var cur2 = dataProtoSerializer.serialize(obj.getKey().getData());
|
||||
if (!Objects.equals(cur, cur2))
|
||||
throw new IllegalStateException("Mutator could not be reapplied for object " + obj.getKey().getMeta().getName() + "\n old = " + cur + "\n reapplied = " + cur2 + "\n");
|
||||
}
|
||||
|
||||
obj.getValue()._metaSerialized = metaProtoSerializer.serialize(obj.getKey().getMeta());
|
||||
obj.getValue()._metaChanged = !Objects.equals(obj.getValue().snapshot.meta(), obj.getValue()._metaSerialized);
|
||||
obj.getValue()._dataChanged = dataDiff;
|
||||
|
||||
notifyWrite(obj.getKey(), obj.getValue()._metaChanged, dataDiff);
|
||||
|
||||
if (refVerification) {
|
||||
var oldRefs = obj.getValue().snapshot.data() == null
|
||||
? null
|
||||
: ((JObjectData) dataProtoSerializer.deserialize(obj.getValue().snapshot.data())).extractRefs();
|
||||
verifyRefs(obj.getKey(), oldRefs);
|
||||
}
|
||||
}
|
||||
|
||||
var bundle = txWriteback.createBundle();
|
||||
|
||||
try {
|
||||
for (var e : state._writeObjects.entrySet()) {
|
||||
var key = e.getKey();
|
||||
var value = e.getValue();
|
||||
if (key.getMeta().isDeleted()) {
|
||||
bundle.delete(key);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!value._dataChanged && !value._metaChanged) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (key.getMeta().isHaveLocalCopy() && value._dataChanged) {
|
||||
bundle.commit(key,
|
||||
value._metaSerialized,
|
||||
dataProtoSerializer.serialize(key.getData())
|
||||
);
|
||||
} else if (key.getMeta().isHaveLocalCopy() && !value._dataChanged) {
|
||||
bundle.commitMetaChange(key, value._metaSerialized);
|
||||
} else if (!key.getMeta().isHaveLocalCopy()) {
|
||||
bundle.commit(key, value._metaSerialized, null);
|
||||
} else {
|
||||
throw new IllegalStateException("Unexpected object flush combination");
|
||||
}
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
Log.error("Error creating tx bundle ", ex);
|
||||
txWriteback.dropBundle(bundle);
|
||||
throw ex;
|
||||
}
|
||||
|
||||
for (var e : state._writeObjects.entrySet())
|
||||
e.getKey().getMeta().setLastModifiedTx(bundle.getId());
|
||||
|
||||
state._writeObjects.forEach((key, value) -> key.rwUnlock());
|
||||
|
||||
state._callbacks.forEach(s -> txWriteback.asyncFence(bundle.getId(), () -> s.accept(null)));
|
||||
|
||||
txWriteback.commitBundle(bundle);
|
||||
|
||||
_state.remove();
|
||||
|
||||
return bundle.getId();
|
||||
}
|
||||
|
||||
private <T extends JObjectData> void notifyWrite(JObject<?> obj, boolean metaChanged, boolean hasDataChanged) {
|
||||
jObjectLRU.updateSize(obj);
|
||||
jObjectManager.runWriteListeners(obj, metaChanged, hasDataChanged);
|
||||
if (hasDataChanged && obj.getMeta().isHaveLocalCopy()) {
|
||||
invalidationQueueService.pushInvalidationToAll(obj);
|
||||
}
|
||||
}
|
||||
|
||||
private void verifyRefs(JObject<?> obj, @Nullable Collection<String> oldRefs) {
|
||||
if (!refVerification) return;
|
||||
|
||||
if (obj.getData() == null) return;
|
||||
if (obj.getMeta().isDeleted()) return;
|
||||
var newRefs = obj.getData().extractRefs();
|
||||
if (oldRefs != null)
|
||||
for (var o : oldRefs)
|
||||
if (!newRefs.contains(o)) {
|
||||
jObjectManager.get(o).ifPresent(refObj -> {
|
||||
if (refObj.getMeta().checkRef(obj.getMeta().getName()))
|
||||
throw new IllegalStateException("Object " + o + " is referenced from " + obj.getMeta().getName() + " but shouldn't be");
|
||||
});
|
||||
}
|
||||
for (var r : newRefs) {
|
||||
var refObj = jObjectManager.get(r).orElseThrow(() -> new IllegalStateException("Object " + r + " not found but should be referenced from " + obj.getMeta().getName()));
|
||||
if (refObj.getMeta().isDeleted())
|
||||
throw new IllegalStateException("Object " + r + " deleted but referenced from " + obj.getMeta().getName());
|
||||
if (!refObj.getMeta().checkRef(obj.getMeta().getName()))
|
||||
throw new IllegalStateException("Object " + r + " is not referenced by " + obj.getMeta().getName() + " but should be");
|
||||
}
|
||||
}
|
||||
|
||||
private <T extends JObjectData> void applyMutator(JObject<?> obj, JMutator<T> mutator) {
|
||||
mutator.mutate((T) obj.getData());
|
||||
}
|
||||
|
||||
private <T extends JObjectData> void revertMutator(JObject<?> obj, JMutator<T> mutator) {
|
||||
mutator.revert((T) obj.getData());
|
||||
}
|
||||
|
||||
public void rollback(String message) {
|
||||
var state = _state.get();
|
||||
if (state == null)
|
||||
throw new IllegalStateException("Transaction not running");
|
||||
Log.debug("Rollback of " + state._id);
|
||||
|
||||
for (var obj : state._writeObjects.entrySet()) {
|
||||
Log.debug("Rollback of " + obj.getKey().getMeta().getName());
|
||||
try {
|
||||
if (obj.getValue()._copy) {
|
||||
obj.getKey().rollback(
|
||||
metaProtoSerializer.deserialize(obj.getValue().snapshot.meta()),
|
||||
obj.getValue().snapshot.data() != null ? dataProtoSerializer.deserialize(obj.getValue().snapshot.data()) : null);
|
||||
} else {
|
||||
for (var mut : obj.getValue()._mutators.reversed())
|
||||
revertMutator(obj.getKey(), mut);
|
||||
obj.getKey().rollback(metaProtoSerializer.deserialize(obj.getValue().snapshot.meta()), obj.getKey().getData());
|
||||
}
|
||||
obj.getKey().updateDeletionState();
|
||||
} finally {
|
||||
obj.getKey().rwUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
state._callbacks.forEach(c -> c.accept(message != null ? message : "Unknown error"));
|
||||
Log.debug("Rollback of " + state._id + " done");
|
||||
_state.remove();
|
||||
}
|
||||
|
||||
public void executeTxAndFlushAsync(VoidFn fn, Consumer<String> callback) {
|
||||
var state = _state.get();
|
||||
if (state != null) {
|
||||
_state.get()._callbacks.add(callback);
|
||||
fn.apply();
|
||||
return;
|
||||
}
|
||||
|
||||
begin();
|
||||
try {
|
||||
_state.get()._callbacks.add(callback);
|
||||
fn.apply();
|
||||
commit();
|
||||
} catch (Exception e) {
|
||||
Log.debug("Error in transaction " + _state.get()._id, e);
|
||||
rollback(e.getMessage());
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
public void executeTxAndFlush(VoidFn fn) {
|
||||
executeTxAndFlush(() -> {
|
||||
fn.apply();
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
public <T> T executeTxAndFlush(Supplier<T> fn) {
|
||||
if (_state.get() != null) {
|
||||
throw new IllegalStateException("Can't wait for transaction to flush from non-top-level tx");
|
||||
}
|
||||
|
||||
begin();
|
||||
try {
|
||||
var ret = fn.get();
|
||||
var bundleId = commit();
|
||||
if (bundleId != -1)
|
||||
txWriteback.fence(bundleId);
|
||||
return ret;
|
||||
} catch (Exception e) {
|
||||
Log.debug("Error in transaction " + _state.get()._id, e);
|
||||
rollback(e.getMessage());
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
public void executeTx(VoidFn fn) {
|
||||
executeTx(() -> {
|
||||
fn.apply();
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
public <T> T executeTx(Supplier<T> fn) {
|
||||
if (_state.get() != null) {
|
||||
return fn.get();
|
||||
}
|
||||
|
||||
begin();
|
||||
try {
|
||||
var ret = fn.get();
|
||||
commit();
|
||||
return ret;
|
||||
} catch (Exception e) {
|
||||
Log.debug("Error in transaction " + _state.get()._id, e);
|
||||
rollback(e.getMessage());
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
public void forceInvalidate(JObject<?> obj) {
|
||||
var state = _state.get();
|
||||
|
||||
if (state == null)
|
||||
throw new IllegalStateException("Transaction not running");
|
||||
|
||||
obj.assertRwLock();
|
||||
|
||||
var got = state._writeObjects.get(obj);
|
||||
if (got != null)
|
||||
got._forceInvalidated = true;
|
||||
}
|
||||
|
||||
void addToTx(JObject<?> obj, boolean copy) {
|
||||
var state = _state.get();
|
||||
|
||||
if (state == null)
|
||||
throw new IllegalStateException("Transaction not running");
|
||||
|
||||
Log.debug("Adding " + obj.getMeta().getName() + " to transaction " + state._id);
|
||||
|
||||
obj.assertRwLock();
|
||||
obj.rwLock();
|
||||
|
||||
var snapshot = copy
|
||||
? new JObjectSnapshot(
|
||||
metaProtoSerializer.serialize(obj.getMeta()),
|
||||
(obj.getData() == null) ? null : dataProtoSerializer.serialize(obj.getData()),
|
||||
obj.getMeta().changelogHash())
|
||||
: new JObjectSnapshot(
|
||||
metaProtoSerializer.serialize(obj.getMeta()), (!refVerification || (obj.getData() == null)) ? null : dataProtoSerializer.serialize(obj.getData()),
|
||||
obj.getMeta().changelogHash());
|
||||
|
||||
state._writeObjects.put(obj, new TxState.TxObjectState(snapshot, copy));
|
||||
}
|
||||
|
||||
<T extends JObjectData> void addMutator(JObject<T> obj, JMutator<? super T> mut) {
|
||||
var state = _state.get();
|
||||
|
||||
if (state == null)
|
||||
throw new IllegalStateException("Transaction not running");
|
||||
|
||||
obj.assertRwLock();
|
||||
|
||||
//TODO: Asserts for rwLock/rwLockNoCopy?
|
||||
|
||||
var got = state._writeObjects.get(obj);
|
||||
if (got == null)
|
||||
throw new IllegalStateException("Object not in transaction");
|
||||
if (got._copy) {
|
||||
Log.trace("Ignoring mutator for copied object: " + obj.getMeta().getName());
|
||||
return;
|
||||
}
|
||||
got._mutators.addLast(mut);
|
||||
}
|
||||
|
||||
private class TxState {
|
||||
private final long _id = _transientTxId.incrementAndGet();
|
||||
private final HashMap<JObject<?>, TxObjectState> _writeObjects = new HashMap<>();
|
||||
private final ArrayList<Consumer<String>> _callbacks = new ArrayList<>();
|
||||
|
||||
private static class TxObjectState {
|
||||
final JObjectSnapshot snapshot;
|
||||
final List<JMutator<?>> _mutators = new LinkedList<>();
|
||||
final boolean _copy;
|
||||
boolean _forceInvalidated = false;
|
||||
ObjectMetadataP _metaSerialized; // Filled in when committing
|
||||
boolean _metaChanged = false; // Filled in when committing
|
||||
boolean _dataChanged = false; // Filled in when committing
|
||||
|
||||
private TxObjectState(JObjectSnapshot snapshot, boolean copy) {
|
||||
this.snapshot = snapshot;
|
||||
_copy = copy;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
// Indicates the object never has references
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target(ElementType.TYPE)
|
||||
public @interface Leaf {
|
||||
}
|
||||
@@ -1,196 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
import com.usatiuk.dhfs.objects.persistence.JObjectDataP;
|
||||
import com.usatiuk.dhfs.objects.repository.ObjectChangelog;
|
||||
import com.usatiuk.dhfs.objects.repository.ObjectChangelogEntry;
|
||||
import com.usatiuk.dhfs.objects.repository.ObjectHeader;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
import lombok.Getter;
|
||||
import lombok.Setter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.ObjectInputStream;
|
||||
import java.io.Serial;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
public class ObjectMetadata {
|
||||
@Getter
|
||||
private final String _name;
|
||||
@Getter
|
||||
private final Map<UUID, Long> _remoteCopies = new LinkedHashMap<>();
|
||||
private final AtomicReference<Class<? extends JObjectData>> _knownClass = new AtomicReference<>();
|
||||
@Getter
|
||||
private final HashSet<UUID> _confirmedDeletes = new LinkedHashSet<>();
|
||||
private final Set<String> _referrers = new LinkedHashSet<>();
|
||||
@Getter
|
||||
private volatile boolean _seen = false;
|
||||
@Getter
|
||||
private volatile boolean _deleted = false;
|
||||
@Getter
|
||||
@Setter
|
||||
private Map<UUID, Long> _changelog = new LinkedHashMap<>(4);
|
||||
@Getter
|
||||
@Setter
|
||||
private Set<String> _savedRefs = Collections.emptySet();
|
||||
@Getter
|
||||
private boolean _frozen = false;
|
||||
@Getter
|
||||
@Setter
|
||||
private volatile boolean _haveLocalCopy = false;
|
||||
@Getter
|
||||
private transient volatile boolean _written = true;
|
||||
@Getter
|
||||
@Setter
|
||||
private long _lastModifiedTx = -1; // -1 if it's already on disk
|
||||
|
||||
public ObjectMetadata(String name, boolean written, Class<? extends JObjectData> knownClass) {
|
||||
_name = name;
|
||||
_written = written;
|
||||
_knownClass.set(knownClass);
|
||||
}
|
||||
|
||||
public Class<? extends JObjectData> getKnownClass() {
|
||||
return _knownClass.get();
|
||||
}
|
||||
|
||||
protected void narrowClass(Class<? extends JObjectData> klass) {
|
||||
Class<? extends JObjectData> got = null;
|
||||
do {
|
||||
got = _knownClass.get();
|
||||
if (got.equals(klass)) return;
|
||||
if (klass.isAssignableFrom(got)) return;
|
||||
if (!got.isAssignableFrom(klass))
|
||||
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("Could not narrow class of object " + getName() + " from " + got + " to " + klass));
|
||||
} while (!_knownClass.compareAndSet(got, klass));
|
||||
}
|
||||
|
||||
@Serial
|
||||
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
|
||||
in.defaultReadObject();
|
||||
_written = true;
|
||||
}
|
||||
|
||||
public void markSeen() {
|
||||
Log.trace("Marking seen: " + getName());
|
||||
_seen = true;
|
||||
}
|
||||
|
||||
public void markDeleted() {
|
||||
_deleted = true;
|
||||
}
|
||||
|
||||
public void undelete() {
|
||||
_confirmedDeletes.clear();
|
||||
_deleted = false;
|
||||
}
|
||||
|
||||
public void markWritten() {
|
||||
_written = true;
|
||||
}
|
||||
|
||||
// FIXME:? a better way?
|
||||
public void markUnWritten() {
|
||||
_written = false;
|
||||
}
|
||||
|
||||
public boolean isReferred() {
|
||||
return !_referrers.isEmpty();
|
||||
}
|
||||
|
||||
public void freeze() {
|
||||
if (_frozen) throw new IllegalArgumentException("Already frozen");
|
||||
_confirmedDeletes.clear();
|
||||
Log.trace("Freezing " + getName());
|
||||
_frozen = true;
|
||||
}
|
||||
|
||||
public void unfreeze() {
|
||||
if (!_frozen) throw new IllegalArgumentException("Already unfrozen");
|
||||
Log.trace("Unfreezing " + getName());
|
||||
_frozen = false;
|
||||
}
|
||||
|
||||
public boolean checkRef(String from) {
|
||||
return _referrers.contains(from);
|
||||
}
|
||||
|
||||
public void addRef(String from) {
|
||||
if (from.equals(getName()))
|
||||
throw new IllegalArgumentException("Trying to make object refer to itself: " + getName());
|
||||
_confirmedDeletes.clear();
|
||||
_referrers.add(from);
|
||||
if (Log.isTraceEnabled())
|
||||
Log.trace("Adding ref " + from + " to " + getName());
|
||||
}
|
||||
|
||||
public void removeRef(String from) {
|
||||
if (Log.isTraceEnabled())
|
||||
Log.trace("Removing ref " + from + " from " + getName());
|
||||
_referrers.remove(from);
|
||||
}
|
||||
|
||||
public Collection<String> getReferrers() {
|
||||
return _referrers.stream().toList();
|
||||
}
|
||||
|
||||
public Collection<String> getReferrersMutable() {
|
||||
return _referrers;
|
||||
}
|
||||
|
||||
public boolean isDeletionCandidate() {
|
||||
return !isFrozen() && !isReferred();
|
||||
}
|
||||
|
||||
public Long getOurVersion() {
|
||||
return _changelog.values().stream().reduce(0L, Long::sum);
|
||||
}
|
||||
|
||||
public Long getBestVersion() {
|
||||
if (_remoteCopies.isEmpty()) return getOurVersion();
|
||||
return Math.max(getOurVersion(), _remoteCopies.values().stream().max(Long::compareTo).get());
|
||||
}
|
||||
|
||||
public void bumpVersion(UUID selfUuid) {
|
||||
_changelog.merge(selfUuid, 1L, Long::sum);
|
||||
}
|
||||
|
||||
public ObjectChangelog toRpcChangelog() {
|
||||
var changelogBuilder = ObjectChangelog.newBuilder();
|
||||
|
||||
for (var h : _changelog.entrySet()) {
|
||||
if (h.getValue() == 0) continue;
|
||||
var logEntry = ObjectChangelogEntry.newBuilder();
|
||||
logEntry.setHost(h.getKey().toString());
|
||||
logEntry.setVersion(h.getValue());
|
||||
changelogBuilder.addEntries(logEntry.build());
|
||||
}
|
||||
return changelogBuilder.build();
|
||||
}
|
||||
|
||||
public ObjectHeader toRpcHeader() {
|
||||
return toRpcHeader(null);
|
||||
}
|
||||
|
||||
public ObjectHeader toRpcHeader(JObjectDataP data) {
|
||||
var headerBuilder = ObjectHeader.newBuilder().setName(getName());
|
||||
headerBuilder.setChangelog(toRpcChangelog());
|
||||
|
||||
if (data != null)
|
||||
headerBuilder.setPushedData(data);
|
||||
|
||||
return headerBuilder.build();
|
||||
}
|
||||
|
||||
public int changelogHash() {
|
||||
int res = Objects.hashCode(_changelog);
|
||||
res = 31 * res + Objects.hashCode(_haveLocalCopy);
|
||||
return res;
|
||||
}
|
||||
|
||||
public boolean isOnlyLocal() {
|
||||
return getKnownClass().isAnnotationPresent(OnlyLocal.class);
|
||||
}
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@Singleton
|
||||
public class ObjectMetadataSerializer implements ProtoSerializer<ObjectMetadataP, ObjectMetadata> {
|
||||
@Override
|
||||
public ObjectMetadataP serialize(ObjectMetadata object) {
|
||||
return ObjectMetadataP.newBuilder()
|
||||
.setName(object.getName())
|
||||
.putAllRemoteCopies(object.getRemoteCopies().entrySet().stream().collect(Collectors.toMap(e -> e.getKey().toString(), Map.Entry::getValue)))
|
||||
.setKnownClass(object.getKnownClass().getName())
|
||||
.setSeen(object.isSeen())
|
||||
.setDeleted(object.isDeleted())
|
||||
.addAllConfirmedDeletes(() -> object.getConfirmedDeletes().stream().map(e -> e.toString()).iterator())
|
||||
.addAllReferrers(object.getReferrers())
|
||||
.putAllChangelog(object.getChangelog().entrySet().stream().collect(Collectors.toMap(e -> e.getKey().toString(), Map.Entry::getValue)))
|
||||
.addAllSavedRefs(object.getSavedRefs() != null ? object.getSavedRefs() : Collections.emptyList())
|
||||
.setFrozen(object.isFrozen())
|
||||
.setHaveLocalCopy(object.isHaveLocalCopy())
|
||||
.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ObjectMetadata deserialize(ObjectMetadataP message) {
|
||||
try {
|
||||
var obj = new ObjectMetadata(message.getName(), true,
|
||||
(Class<? extends JObjectData>) Class.forName(message.getKnownClass(), true, ObjectMetadata.class.getClassLoader()));
|
||||
if (!JObjectData.class.isAssignableFrom(obj.getKnownClass()))
|
||||
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("Class not inherited from JObjectData " + message.getKnownClass()));
|
||||
|
||||
obj.getRemoteCopies().putAll(message.getRemoteCopiesMap().entrySet().stream().collect(Collectors.toMap(e -> UUID.fromString(e.getKey()), Map.Entry::getValue)));
|
||||
if (message.getSeen()) obj.markSeen();
|
||||
if (message.getDeleted()) obj.markDeleted();
|
||||
message.getConfirmedDeletesList().stream().map(UUID::fromString).forEach(o -> obj.getConfirmedDeletes().add(o));
|
||||
obj.getReferrersMutable().addAll(message.getReferrersList());
|
||||
obj.getChangelog().putAll(message.getChangelogMap().entrySet().stream().collect(Collectors.toMap(e -> UUID.fromString(e.getKey()), Map.Entry::getValue)));
|
||||
if (message.getSavedRefsCount() > 0)
|
||||
obj.setSavedRefs(new LinkedHashSet<>(message.getSavedRefsList()));
|
||||
if (message.getFrozen())
|
||||
obj.freeze();
|
||||
if (message.getHaveLocalCopy())
|
||||
obj.setHaveLocalCopy(true);
|
||||
|
||||
return obj;
|
||||
} catch (ClassNotFoundException cx) {
|
||||
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("Could not find class " + message.getKnownClass()));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target(ElementType.TYPE)
|
||||
public @interface OnlyLocal {
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target(ElementType.TYPE)
|
||||
public @interface PushResolution {
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
public interface SoftJObject<T extends JObjectData> {
|
||||
JObject<? extends T> get();
|
||||
|
||||
String getName();
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import lombok.NonNull;
|
||||
|
||||
import java.lang.ref.SoftReference;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
@ApplicationScoped
|
||||
public class SoftJObjectFactory {
|
||||
@Inject
|
||||
JObjectManager jObjectManager;
|
||||
|
||||
public <T extends JObjectData> SoftJObject<T> create(Class<T> klass, String name) {
|
||||
return new SoftJObjectImpl<>(klass, name);
|
||||
}
|
||||
|
||||
public <T extends JObjectData> SoftJObject<T> create(Class<T> klass, JObject<? extends T> obj) {
|
||||
return new SoftJObjectImpl<>(klass, obj);
|
||||
}
|
||||
|
||||
private class SoftJObjectImpl<T extends JObjectData> implements SoftJObject<T> {
|
||||
private final Class<T> _klass;
|
||||
private final String _objName;
|
||||
private final AtomicReference<SoftReference<? extends JObject<? extends T>>> _obj;
|
||||
|
||||
private SoftJObjectImpl(Class<T> klass, @NonNull String objName) {
|
||||
_klass = klass;
|
||||
_objName = objName;
|
||||
_obj = new AtomicReference<>();
|
||||
}
|
||||
|
||||
private SoftJObjectImpl(Class<T> klass, JObject<? extends T> obj) {
|
||||
_klass = klass;
|
||||
_objName = obj.getMeta().getName();
|
||||
_obj = new AtomicReference<>(new SoftReference<>(obj));
|
||||
}
|
||||
|
||||
@Override
|
||||
public JObject<? extends T> get() {
|
||||
while (true) {
|
||||
var have = _obj.get();
|
||||
if (have != null) {
|
||||
var ref = have.get();
|
||||
if (ref != null)
|
||||
return ref;
|
||||
}
|
||||
var got = jObjectManager.get(_objName).orElse(null);
|
||||
if (got == null) return null;
|
||||
var checked = got.as(_klass);
|
||||
var next = new SoftReference<>(checked);
|
||||
if (_obj.compareAndSet(have, next))
|
||||
return checked;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return _objName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
SoftJObjectImpl<?> that = (SoftJObjectImpl<?>) o;
|
||||
return _objName.equals(that._objName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return _objName.hashCode();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
import com.usatiuk.dhfs.objects.persistence.JObjectDataP;
|
||||
import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP;
|
||||
|
||||
public interface TxBundle {
|
||||
long getId();
|
||||
|
||||
void commit(JObject<?> obj, ObjectMetadataP meta, JObjectDataP data);
|
||||
|
||||
void commitMetaChange(JObject<?> obj, ObjectMetadataP meta);
|
||||
|
||||
void delete(JObject<?> obj);
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
import com.usatiuk.dhfs.utils.VoidFn;
|
||||
|
||||
public interface TxWriteback {
|
||||
TxBundle createBundle();
|
||||
|
||||
void commitBundle(TxBundle bundle);
|
||||
|
||||
void dropBundle(TxBundle bundle);
|
||||
|
||||
void fence(long bundleId);
|
||||
|
||||
// Executes callback after bundle with bundleId id has been persisted
|
||||
// if it was already, runs callback on the caller thread
|
||||
void asyncFence(long bundleId, VoidFn callback);
|
||||
}
|
||||
@@ -1,417 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.jrepository;
|
||||
|
||||
import com.usatiuk.dhfs.objects.persistence.JObjectDataP;
|
||||
import com.usatiuk.dhfs.objects.persistence.ObjectMetadataP;
|
||||
import com.usatiuk.dhfs.objects.repository.persistence.ObjectPersistentStore;
|
||||
import com.usatiuk.dhfs.utils.VoidFn;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.inject.Inject;
|
||||
import lombok.Getter;
|
||||
import org.apache.commons.lang3.concurrent.BasicThreadFactory;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentLinkedQueue;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@ApplicationScoped
|
||||
public class TxWritebackImpl implements TxWriteback {
|
||||
private final LinkedList<TxBundle> _pendingBundles = new LinkedList<>();
|
||||
private final LinkedHashMap<Long, TxBundle> _notFlushedBundles = new LinkedHashMap<>();
|
||||
|
||||
private final Object _flushWaitSynchronizer = new Object();
|
||||
private final AtomicLong _lastWrittenTx = new AtomicLong(-1);
|
||||
private final AtomicLong _counter = new AtomicLong();
|
||||
private final AtomicLong _waitedTotal = new AtomicLong(0);
|
||||
@Inject
|
||||
ObjectPersistentStore objectPersistentStore;
|
||||
@ConfigProperty(name = "dhfs.objects.writeback.limit")
|
||||
long sizeLimit;
|
||||
private long currentSize = 0;
|
||||
private ExecutorService _writebackExecutor;
|
||||
private ExecutorService _commitExecutor;
|
||||
private ExecutorService _statusExecutor;
|
||||
private volatile boolean _ready = false;
|
||||
|
||||
void init(@Observes @Priority(110) StartupEvent event) {
|
||||
{
|
||||
BasicThreadFactory factory = new BasicThreadFactory.Builder()
|
||||
.namingPattern("tx-writeback-%d")
|
||||
.build();
|
||||
|
||||
_writebackExecutor = Executors.newSingleThreadExecutor(factory);
|
||||
_writebackExecutor.submit(this::writeback);
|
||||
}
|
||||
|
||||
{
|
||||
BasicThreadFactory factory = new BasicThreadFactory.Builder()
|
||||
.namingPattern("writeback-commit-%d")
|
||||
.build();
|
||||
|
||||
_commitExecutor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(), factory);
|
||||
}
|
||||
_statusExecutor = Executors.newSingleThreadExecutor();
|
||||
_statusExecutor.submit(() -> {
|
||||
try {
|
||||
while (true) {
|
||||
Thread.sleep(1000);
|
||||
if (currentSize > 0)
|
||||
Log.info("Tx commit status: size="
|
||||
+ currentSize / 1024 / 1024 + "MB");
|
||||
}
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
});
|
||||
_ready = true;
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(890) ShutdownEvent event) throws InterruptedException {
|
||||
Log.info("Waiting for all transactions to drain");
|
||||
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
_ready = false;
|
||||
while (currentSize > 0) {
|
||||
_flushWaitSynchronizer.wait();
|
||||
}
|
||||
}
|
||||
|
||||
_writebackExecutor.shutdownNow();
|
||||
Log.info("Total tx bundle wait time: " + _waitedTotal.get() + "ms");
|
||||
}
|
||||
|
||||
private void verifyReady() {
|
||||
if (!_ready) throw new IllegalStateException("Not doing transactions while shutting down!");
|
||||
}
|
||||
|
||||
private void writeback() {
|
||||
while (!Thread.interrupted()) {
|
||||
try {
|
||||
TxBundle bundle = new TxBundle(0);
|
||||
synchronized (_pendingBundles) {
|
||||
while (_pendingBundles.isEmpty() || !_pendingBundles.peek()._ready)
|
||||
_pendingBundles.wait();
|
||||
|
||||
long diff = 0;
|
||||
while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) {
|
||||
var toCompress = _pendingBundles.poll();
|
||||
diff -= toCompress.calculateTotalSize();
|
||||
bundle.compress(toCompress);
|
||||
}
|
||||
diff += bundle.calculateTotalSize();
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
currentSize += diff;
|
||||
}
|
||||
}
|
||||
|
||||
var latch = new CountDownLatch(bundle._committed.size() + bundle._meta.size());
|
||||
ConcurrentLinkedQueue<Throwable> errors = new ConcurrentLinkedQueue<>();
|
||||
|
||||
for (var c : bundle._committed.values()) {
|
||||
_commitExecutor.execute(() -> {
|
||||
try {
|
||||
Log.trace("Writing new " + c.newMeta.getName());
|
||||
objectPersistentStore.writeNewObject(c.newMeta.getName(), c.newMeta, c.newData);
|
||||
} catch (Throwable t) {
|
||||
Log.error("Error writing " + c.newMeta.getName(), t);
|
||||
errors.add(t);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
}
|
||||
for (var c : bundle._meta.values()) {
|
||||
_commitExecutor.execute(() -> {
|
||||
try {
|
||||
Log.trace("Writing (meta) " + c.newMeta.getName());
|
||||
objectPersistentStore.writeNewObjectMeta(c.newMeta.getName(), c.newMeta);
|
||||
} catch (Throwable t) {
|
||||
Log.error("Error writing " + c.newMeta.getName(), t);
|
||||
errors.add(t);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
}
|
||||
if (Log.isDebugEnabled())
|
||||
for (var d : bundle._deleted.keySet())
|
||||
Log.debug("Deleting from persistent storage " + d.getMeta().getName()); // FIXME: For tests
|
||||
|
||||
latch.await();
|
||||
if (!errors.isEmpty()) {
|
||||
throw new RuntimeException("Errors in writeback!");
|
||||
}
|
||||
objectPersistentStore.commitTx(
|
||||
new TxManifest(
|
||||
Stream.concat(bundle._committed.keySet().stream().map(t -> t.getMeta().getName()),
|
||||
bundle._meta.keySet().stream().map(t -> t.getMeta().getName())).collect(Collectors.toCollection(ArrayList::new)),
|
||||
bundle._deleted.keySet().stream().map(t -> t.getMeta().getName()).collect(Collectors.toCollection(ArrayList::new))
|
||||
));
|
||||
Log.trace("Bundle " + bundle.getId() + " committed");
|
||||
|
||||
|
||||
List<List<VoidFn>> callbacks = new ArrayList<>();
|
||||
synchronized (_notFlushedBundles) {
|
||||
_lastWrittenTx.set(bundle.getId());
|
||||
while (!_notFlushedBundles.isEmpty() && _notFlushedBundles.firstEntry().getKey() <= bundle.getId()) {
|
||||
callbacks.add(_notFlushedBundles.pollFirstEntry().getValue().setCommitted());
|
||||
}
|
||||
}
|
||||
callbacks.forEach(l -> l.forEach(VoidFn::apply));
|
||||
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
currentSize -= ((TxBundle) bundle).calculateTotalSize();
|
||||
// FIXME:
|
||||
if (currentSize <= sizeLimit || !_ready)
|
||||
_flushWaitSynchronizer.notifyAll();
|
||||
}
|
||||
} catch (InterruptedException ignored) {
|
||||
} catch (Exception e) {
|
||||
Log.error("Uncaught exception in writeback", e);
|
||||
} catch (Throwable o) {
|
||||
Log.error("Uncaught THROWABLE in writeback", o);
|
||||
}
|
||||
}
|
||||
Log.info("Writeback thread exiting");
|
||||
}
|
||||
|
||||
@Override
|
||||
public com.usatiuk.dhfs.objects.jrepository.TxBundle createBundle() {
|
||||
verifyReady();
|
||||
boolean wait = false;
|
||||
while (true) {
|
||||
if (wait) {
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
long started = System.currentTimeMillis();
|
||||
while (currentSize > sizeLimit) {
|
||||
try {
|
||||
_flushWaitSynchronizer.wait();
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
long waited = System.currentTimeMillis() - started;
|
||||
_waitedTotal.addAndGet(waited);
|
||||
if (Log.isTraceEnabled())
|
||||
Log.trace("Thread " + Thread.currentThread().getName() + " waited for tx bundle for " + waited + " ms");
|
||||
wait = false;
|
||||
}
|
||||
}
|
||||
synchronized (_pendingBundles) {
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
if (currentSize > sizeLimit) {
|
||||
if (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) {
|
||||
var target = _pendingBundles.poll();
|
||||
|
||||
long diff = -target.calculateTotalSize();
|
||||
while (!_pendingBundles.isEmpty() && _pendingBundles.peek()._ready) {
|
||||
var toCompress = _pendingBundles.poll();
|
||||
diff -= toCompress.calculateTotalSize();
|
||||
target.compress(toCompress);
|
||||
}
|
||||
diff += target.calculateTotalSize();
|
||||
currentSize += diff;
|
||||
_pendingBundles.addFirst(target);
|
||||
}
|
||||
}
|
||||
|
||||
if (currentSize > sizeLimit) {
|
||||
wait = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
synchronized (_notFlushedBundles) {
|
||||
var bundle = new TxBundle(_counter.incrementAndGet());
|
||||
_pendingBundles.addLast(bundle);
|
||||
_notFlushedBundles.put(bundle.getId(), bundle);
|
||||
return bundle;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commitBundle(com.usatiuk.dhfs.objects.jrepository.TxBundle bundle) {
|
||||
verifyReady();
|
||||
synchronized (_pendingBundles) {
|
||||
((TxBundle) bundle).setReady();
|
||||
if (_pendingBundles.peek() == bundle)
|
||||
_pendingBundles.notify();
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
currentSize += ((TxBundle) bundle).calculateTotalSize();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void dropBundle(com.usatiuk.dhfs.objects.jrepository.TxBundle bundle) {
|
||||
verifyReady();
|
||||
synchronized (_pendingBundles) {
|
||||
Log.warn("Dropped bundle: " + bundle);
|
||||
_pendingBundles.remove((TxBundle) bundle);
|
||||
synchronized (_flushWaitSynchronizer) {
|
||||
currentSize -= ((TxBundle) bundle).calculateTotalSize();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void fence(long bundleId) {
|
||||
var latch = new CountDownLatch(1);
|
||||
asyncFence(bundleId, latch::countDown);
|
||||
try {
|
||||
latch.await();
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void asyncFence(long bundleId, VoidFn fn) {
|
||||
verifyReady();
|
||||
if (bundleId < 0) throw new IllegalArgumentException("txId should be >0!");
|
||||
if (_lastWrittenTx.get() >= bundleId) {
|
||||
fn.apply();
|
||||
return;
|
||||
}
|
||||
synchronized (_notFlushedBundles) {
|
||||
if (_lastWrittenTx.get() >= bundleId) {
|
||||
fn.apply();
|
||||
return;
|
||||
}
|
||||
_notFlushedBundles.get(bundleId).addCallback(fn);
|
||||
}
|
||||
}
|
||||
|
||||
@Getter
|
||||
private static class TxManifest implements com.usatiuk.dhfs.objects.repository.persistence.TxManifest {
|
||||
private final ArrayList<String> _written;
|
||||
private final ArrayList<String> _deleted;
|
||||
|
||||
private TxManifest(ArrayList<String> written, ArrayList<String> deleted) {
|
||||
_written = written;
|
||||
_deleted = deleted;
|
||||
}
|
||||
}
|
||||
|
||||
private class TxBundle implements com.usatiuk.dhfs.objects.jrepository.TxBundle {
|
||||
private final HashMap<JObject<?>, CommittedEntry> _committed = new HashMap<>();
|
||||
private final HashMap<JObject<?>, CommittedMeta> _meta = new HashMap<>();
|
||||
private final HashMap<JObject<?>, Integer> _deleted = new HashMap<>();
|
||||
private final ArrayList<VoidFn> _callbacks = new ArrayList<>();
|
||||
private long _txId;
|
||||
@Getter
|
||||
private volatile boolean _ready = false;
|
||||
private long _size = -1;
|
||||
private boolean _wasCommitted = false;
|
||||
|
||||
private TxBundle(long txId) {_txId = txId;}
|
||||
|
||||
@Override
|
||||
public long getId() {
|
||||
return _txId;
|
||||
}
|
||||
|
||||
public void setReady() {
|
||||
_ready = true;
|
||||
}
|
||||
|
||||
public void addCallback(VoidFn callback) {
|
||||
synchronized (_callbacks) {
|
||||
if (_wasCommitted) throw new IllegalStateException();
|
||||
_callbacks.add(callback);
|
||||
}
|
||||
}
|
||||
|
||||
public List<VoidFn> setCommitted() {
|
||||
synchronized (_callbacks) {
|
||||
_wasCommitted = true;
|
||||
return Collections.unmodifiableList(_callbacks);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commit(JObject<?> obj, ObjectMetadataP meta, JObjectDataP data) {
|
||||
synchronized (_committed) {
|
||||
_committed.put(obj, new CommittedEntry(meta, data, obj.estimateSize()));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commitMetaChange(JObject<?> obj, ObjectMetadataP meta) {
|
||||
synchronized (_meta) {
|
||||
_meta.put(obj, new CommittedMeta(meta, obj.estimateSize()));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void delete(JObject<?> obj) {
|
||||
synchronized (_deleted) {
|
||||
_deleted.put(obj, obj.estimateSize());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public long calculateTotalSize() {
|
||||
if (_size >= 0) return _size;
|
||||
long out = 0;
|
||||
for (var c : _committed.values())
|
||||
out += c.size;
|
||||
for (var c : _meta.values())
|
||||
out += c.size;
|
||||
for (var c : _deleted.entrySet())
|
||||
out += c.getValue();
|
||||
_size = out;
|
||||
return _size;
|
||||
}
|
||||
|
||||
public void compress(TxBundle other) {
|
||||
if (_txId >= other._txId)
|
||||
throw new IllegalArgumentException("Compressing an older bundle into newer");
|
||||
|
||||
_txId = other._txId;
|
||||
_size = -1;
|
||||
|
||||
for (var d : other._deleted.entrySet()) {
|
||||
_committed.remove(d.getKey());
|
||||
_meta.remove(d.getKey());
|
||||
_deleted.put(d.getKey(), d.getValue());
|
||||
}
|
||||
|
||||
for (var c : other._committed.entrySet()) {
|
||||
_committed.put(c.getKey(), c.getValue());
|
||||
_meta.remove(c.getKey());
|
||||
_deleted.remove(c.getKey());
|
||||
}
|
||||
|
||||
for (var m : other._meta.entrySet()) {
|
||||
var deleted = _deleted.remove(m.getKey());
|
||||
if (deleted != null) {
|
||||
_committed.put(m.getKey(), new CommittedEntry(m.getValue().newMeta, null, m.getKey().estimateSize()));
|
||||
continue;
|
||||
}
|
||||
var committed = _committed.remove(m.getKey());
|
||||
if (committed != null) {
|
||||
_committed.put(m.getKey(), new CommittedEntry(m.getValue().newMeta, committed.newData, m.getKey().estimateSize()));
|
||||
continue;
|
||||
}
|
||||
_meta.put(m.getKey(), m.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
private record CommittedEntry(ObjectMetadataP newMeta, JObjectDataP newData, int size) {}
|
||||
|
||||
private record CommittedMeta(ObjectMetadataP newMeta, int size) {}
|
||||
|
||||
private record Deleted(JObject<?> handle) {}
|
||||
}
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository;
|
||||
|
||||
import org.apache.commons.codec.digest.DigestUtils;
|
||||
import org.bouncycastle.asn1.ASN1ObjectIdentifier;
|
||||
import org.bouncycastle.asn1.x500.X500Name;
|
||||
import org.bouncycastle.asn1.x509.BasicConstraints;
|
||||
import org.bouncycastle.cert.CertIOException;
|
||||
import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter;
|
||||
import org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder;
|
||||
import org.bouncycastle.jce.provider.BouncyCastleProvider;
|
||||
import org.bouncycastle.operator.ContentSigner;
|
||||
import org.bouncycastle.operator.OperatorCreationException;
|
||||
import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.InputStream;
|
||||
import java.math.BigInteger;
|
||||
import java.security.*;
|
||||
import java.security.cert.CertificateException;
|
||||
import java.security.cert.CertificateFactory;
|
||||
import java.security.cert.X509Certificate;
|
||||
import java.util.Calendar;
|
||||
import java.util.Date;
|
||||
|
||||
public class CertificateTools {
|
||||
|
||||
public static X509Certificate certFromBytes(byte[] bytes) throws CertificateException {
|
||||
CertificateFactory certFactory = CertificateFactory.getInstance("X.509");
|
||||
InputStream in = new ByteArrayInputStream(bytes);
|
||||
return (X509Certificate) certFactory.generateCertificate(in);
|
||||
}
|
||||
|
||||
public static KeyPair generateKeyPair() throws NoSuchAlgorithmException {
|
||||
KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA");
|
||||
keyGen.initialize(2048); //FIXME:
|
||||
return keyGen.generateKeyPair();
|
||||
}
|
||||
|
||||
public static X509Certificate generateCertificate(KeyPair keyPair, String subject) throws CertificateException, CertIOException, NoSuchAlgorithmException, OperatorCreationException {
|
||||
Provider bcProvider = new BouncyCastleProvider();
|
||||
Security.addProvider(bcProvider);
|
||||
|
||||
Date startDate = new Date();
|
||||
|
||||
X500Name cnName = new X500Name("CN=" + subject);
|
||||
BigInteger certSerialNumber = new BigInteger(DigestUtils.sha256(subject));
|
||||
|
||||
Calendar calendar = Calendar.getInstance();
|
||||
calendar.setTime(startDate);
|
||||
calendar.add(Calendar.YEAR, 999);
|
||||
|
||||
Date endDate = calendar.getTime();
|
||||
|
||||
ContentSigner contentSigner = new JcaContentSignerBuilder("SHA256WithRSA").build(keyPair.getPrivate());
|
||||
|
||||
JcaX509v3CertificateBuilder certBuilder = new JcaX509v3CertificateBuilder(cnName, certSerialNumber, startDate, endDate, cnName, keyPair.getPublic());
|
||||
|
||||
BasicConstraints basicConstraints = new BasicConstraints(false);
|
||||
certBuilder.addExtension(new ASN1ObjectIdentifier("2.5.29.19"), true, basicConstraints);
|
||||
|
||||
return new JcaX509CertificateConverter().setProvider(bcProvider).getCertificate(certBuilder.build(contentSigner));
|
||||
}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository;
|
||||
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObject;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectData;
|
||||
|
||||
import java.util.UUID;
|
||||
|
||||
public interface ConflictResolver {
|
||||
void resolve(UUID conflictHost, ObjectHeader conflictHeader, JObjectData conflictData, JObject<?> conflictSource);
|
||||
}
|
||||
@@ -1,277 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository;
|
||||
|
||||
import com.usatiuk.dhfs.objects.repository.peersync.PeerSyncApiClientDynamic;
|
||||
import com.usatiuk.dhfs.objects.repository.peersync.PersistentPeerInfo;
|
||||
import com.usatiuk.dhfs.objects.repository.webapi.AvailablePeerInfo;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import io.quarkus.scheduler.Scheduled;
|
||||
import io.smallrye.common.annotation.Blocking;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.inject.Inject;
|
||||
import lombok.Getter;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.security.cert.CertificateException;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.*;
|
||||
|
||||
@ApplicationScoped
|
||||
public class PeerManager {
|
||||
private final TransientPeersState _transientPeersState = new TransientPeersState();
|
||||
private final ConcurrentMap<UUID, TransientPeerState> _seenButNotAdded = new ConcurrentHashMap<>();
|
||||
// FIXME: Ideally not call them on every ping
|
||||
private final ArrayList<ConnectionEventListener> _connectedListeners = new ArrayList<>();
|
||||
private final ArrayList<ConnectionEventListener> _disconnectedListeners = new ArrayList<>();
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
@Inject
|
||||
SyncHandler syncHandler;
|
||||
@Inject
|
||||
RpcClientFactory rpcClientFactory;
|
||||
@Inject
|
||||
PeerSyncApiClientDynamic peerSyncApiClient;
|
||||
@ConfigProperty(name = "dhfs.objects.sync.ping.timeout")
|
||||
long pingTimeout;
|
||||
private ExecutorService _heartbeatExecutor;
|
||||
@Getter
|
||||
private boolean _ready = false;
|
||||
|
||||
// Note: keep priority updated with below
|
||||
void init(@Observes @Priority(600) StartupEvent event) throws IOException {
|
||||
_heartbeatExecutor = Executors.newVirtualThreadPerTaskExecutor();
|
||||
|
||||
// Note: newly added hosts aren't in _transientPeersState
|
||||
// but that's ok as they don't have initialSyncDone set
|
||||
for (var h : persistentPeerDataService.getHostUuids())
|
||||
_transientPeersState.runWriteLocked(d -> d.get(h));
|
||||
|
||||
_ready = true;
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(50) ShutdownEvent event) throws IOException {
|
||||
_ready = false;
|
||||
}
|
||||
|
||||
@Scheduled(every = "${dhfs.objects.reconnect_interval}", concurrentExecution = Scheduled.ConcurrentExecution.SKIP)
|
||||
@Blocking
|
||||
public void tryConnectAll() {
|
||||
if (!_ready) return;
|
||||
try {
|
||||
_heartbeatExecutor.invokeAll(persistentPeerDataService.getHostUuids()
|
||||
.stream()
|
||||
.<Callable<Void>>map(host -> () -> {
|
||||
try {
|
||||
if (isReachable(host))
|
||||
Log.trace("Heartbeat: " + host);
|
||||
else
|
||||
Log.debug("Trying to connect to " + host);
|
||||
if (pingCheck(host))
|
||||
handleConnectionSuccess(host);
|
||||
else
|
||||
handleConnectionError(host);
|
||||
} catch (Exception e) {
|
||||
Log.error("Failed to connect to " + host, e);
|
||||
}
|
||||
return null;
|
||||
}).toList(), 30, TimeUnit.SECONDS); //FIXME:
|
||||
} catch (InterruptedException iex) {
|
||||
Log.error("Heartbeat was interrupted");
|
||||
}
|
||||
}
|
||||
|
||||
// Note: registrations should be completed with Priority < 600
|
||||
public void registerConnectEventListener(ConnectionEventListener listener) {
|
||||
if (_ready) throw new IllegalStateException("Already initialized");
|
||||
synchronized (_connectedListeners) {
|
||||
_connectedListeners.add(listener);
|
||||
}
|
||||
}
|
||||
|
||||
// Note: registrations should be completed with Priority < 600
|
||||
public void registerDisconnectEventListener(ConnectionEventListener listener) {
|
||||
if (_ready) throw new IllegalStateException("Already initialized");
|
||||
synchronized (_disconnectedListeners) {
|
||||
_disconnectedListeners.add(listener);
|
||||
}
|
||||
}
|
||||
|
||||
public void handleConnectionSuccess(UUID host) {
|
||||
if (!_ready) return;
|
||||
|
||||
boolean wasReachable = isReachable(host);
|
||||
|
||||
boolean shouldSyncObj = persistentPeerDataService.markInitialObjSyncDone(host);
|
||||
boolean shouldSyncOp = persistentPeerDataService.markInitialOpSyncDone(host);
|
||||
|
||||
if (shouldSyncObj)
|
||||
syncHandler.pushInitialResyncObj(host);
|
||||
if (shouldSyncOp)
|
||||
syncHandler.pushInitialResyncOp(host);
|
||||
|
||||
_transientPeersState.runWriteLocked(d -> {
|
||||
d.get(host).setReachable(true);
|
||||
return null;
|
||||
});
|
||||
|
||||
if (wasReachable) return;
|
||||
|
||||
Log.info("Connected to " + host);
|
||||
|
||||
for (var l : _connectedListeners) {
|
||||
l.apply(host);
|
||||
}
|
||||
}
|
||||
|
||||
public void handleConnectionError(UUID host) {
|
||||
boolean wasReachable = isReachable(host);
|
||||
|
||||
if (wasReachable)
|
||||
Log.info("Lost connection to " + host);
|
||||
|
||||
_transientPeersState.runWriteLocked(d -> {
|
||||
d.get(host).setReachable(false);
|
||||
return null;
|
||||
});
|
||||
|
||||
for (var l : _disconnectedListeners) {
|
||||
l.apply(host);
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME:
|
||||
private boolean pingCheck(UUID host) {
|
||||
TransientPeerState state = _transientPeersState.runReadLocked(s -> s.getCopy(host));
|
||||
|
||||
try {
|
||||
return rpcClientFactory.withObjSyncClient(host.toString(), state.getAddr(), state.getSecurePort(), pingTimeout, c -> {
|
||||
var ret = c.ping(PingRequest.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).build());
|
||||
if (!UUID.fromString(ret.getSelfUuid()).equals(host)) {
|
||||
throw new IllegalStateException("Ping selfUuid returned " + ret.getSelfUuid() + " but expected " + host);
|
||||
}
|
||||
return true;
|
||||
});
|
||||
} catch (Exception ignored) {
|
||||
Log.debug("Host " + host + " is unreachable: " + ignored.getMessage() + " " + ignored.getCause());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isReachable(UUID host) {
|
||||
return _transientPeersState.runReadLocked(d -> d.get(host).isReachable());
|
||||
}
|
||||
|
||||
public TransientPeerState getTransientState(UUID host) {
|
||||
return _transientPeersState.runReadLocked(d -> d.getCopy(host));
|
||||
}
|
||||
|
||||
public List<UUID> getAvailableHosts() {
|
||||
return _transientPeersState.runReadLocked(d -> d.getStates().entrySet().stream()
|
||||
.filter(e -> e.getValue().isReachable())
|
||||
.map(Map.Entry::getKey).toList());
|
||||
}
|
||||
|
||||
public List<UUID> getUnavailableHosts() {
|
||||
return _transientPeersState.runReadLocked(d -> d.getStates().entrySet().stream()
|
||||
.filter(e -> !e.getValue().isReachable())
|
||||
.map(Map.Entry::getKey).toList());
|
||||
}
|
||||
|
||||
public HostStateSnapshot getHostStateSnapshot() {
|
||||
ArrayList<UUID> available = new ArrayList<>();
|
||||
ArrayList<UUID> unavailable = new ArrayList<>();
|
||||
_transientPeersState.runReadLocked(d -> {
|
||||
for (var v : d.getStates().entrySet()) {
|
||||
if (v.getValue().isReachable())
|
||||
available.add(v.getKey());
|
||||
else
|
||||
unavailable.add(v.getKey());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
);
|
||||
return new HostStateSnapshot(available, unavailable);
|
||||
}
|
||||
|
||||
public void notifyAddr(UUID host, String addr, Integer port, Integer securePort) {
|
||||
if (host.equals(persistentPeerDataService.getSelfUuid())) {
|
||||
return;
|
||||
}
|
||||
|
||||
var state = new TransientPeerState();
|
||||
state.setAddr(addr);
|
||||
state.setPort(port);
|
||||
state.setSecurePort(securePort);
|
||||
|
||||
if (!persistentPeerDataService.existsHost(host)) {
|
||||
var prev = _seenButNotAdded.put(host, state);
|
||||
// Needed for tests
|
||||
if (prev == null)
|
||||
Log.debug("Ignoring new address from unknown host " + ": addr=" + addr + " port=" + port);
|
||||
return;
|
||||
} else {
|
||||
_seenButNotAdded.remove(host);
|
||||
}
|
||||
|
||||
_transientPeersState.runWriteLocked(d -> {
|
||||
// Log.trace("Updating connection info for " + host + ": addr=" + addr + " port=" + port);
|
||||
d.get(host).setAddr(addr);
|
||||
d.get(host).setPort(port);
|
||||
d.get(host).setSecurePort(securePort);
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
public void removeRemoteHost(UUID host) {
|
||||
persistentPeerDataService.removeHost(host);
|
||||
// Race?
|
||||
_transientPeersState.runWriteLocked(d -> {
|
||||
d.getStates().remove(host);
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
public void addRemoteHost(UUID host) {
|
||||
if (!_seenButNotAdded.containsKey(host)) {
|
||||
throw new IllegalStateException("Host " + host + " is not seen");
|
||||
}
|
||||
if (persistentPeerDataService.existsHost(host)) {
|
||||
throw new IllegalStateException("Host " + host + " is already added");
|
||||
}
|
||||
|
||||
var state = _seenButNotAdded.get(host);
|
||||
|
||||
// FIXME: race?
|
||||
|
||||
var info = peerSyncApiClient.getSelfInfo(state.getAddr(), state.getPort());
|
||||
|
||||
try {
|
||||
persistentPeerDataService.addHost(
|
||||
new PersistentPeerInfo(UUID.fromString(info.selfUuid()),
|
||||
CertificateTools.certFromBytes(Base64.getDecoder().decode(info.cert()))));
|
||||
Log.info("Added host: " + host.toString());
|
||||
} catch (CertificateException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public Collection<AvailablePeerInfo> getSeenButNotAddedHosts() {
|
||||
return _seenButNotAdded.entrySet().stream()
|
||||
.filter(e -> !persistentPeerDataService.existsHost(e.getKey()))
|
||||
.map(e -> new AvailablePeerInfo(e.getKey().toString(), e.getValue().getAddr(), e.getValue().getPort()))
|
||||
.toList();
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
public interface ConnectionEventListener {
|
||||
void apply(UUID host);
|
||||
}
|
||||
|
||||
public record HostStateSnapshot(List<UUID> available, List<UUID> unavailable) {
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,361 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository;
|
||||
|
||||
import com.usatiuk.dhfs.utils.SerializationHelper;
|
||||
import com.usatiuk.dhfs.ShutdownChecker;
|
||||
import com.usatiuk.dhfs.objects.jrepository.*;
|
||||
import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService;
|
||||
import com.usatiuk.dhfs.objects.repository.peersync.PeerDirectory;
|
||||
import com.usatiuk.dhfs.objects.repository.peersync.PeerDirectoryLocal;
|
||||
import com.usatiuk.dhfs.objects.repository.peersync.PersistentPeerInfo;
|
||||
import com.usatiuk.dhfs.objects.repository.peertrust.PeerTrustManager;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import jakarta.annotation.Nullable;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.SerializationUtils;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
import java.security.KeyPair;
|
||||
import java.security.cert.X509Certificate;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
|
||||
import static java.nio.file.StandardCopyOption.REPLACE_EXISTING;
|
||||
|
||||
@ApplicationScoped
|
||||
public class PersistentPeerDataService {
|
||||
final String dataFileName = "hosts";
|
||||
@ConfigProperty(name = "dhfs.objects.root")
|
||||
String dataRoot;
|
||||
@Inject
|
||||
PeerTrustManager peerTrustManager;
|
||||
@Inject
|
||||
JObjectManager jObjectManager;
|
||||
@Inject
|
||||
ExecutorService executorService;
|
||||
@Inject
|
||||
InvalidationQueueService invalidationQueueService;
|
||||
@Inject
|
||||
RpcClientFactory rpcClientFactory;
|
||||
@Inject
|
||||
ShutdownChecker shutdownChecker;
|
||||
@Inject
|
||||
JObjectTxManager jObjectTxManager;
|
||||
@Inject
|
||||
SoftJObjectFactory softJObjectFactory;
|
||||
SoftJObject<PeerDirectory> peerDirectory;
|
||||
SoftJObject<PeerDirectoryLocal> peerDirectoryLocal;
|
||||
private PersistentRemoteHosts _persistentData = new PersistentRemoteHosts();
|
||||
private UUID _selfUuid;
|
||||
|
||||
void init(@Observes @Priority(300) StartupEvent event) throws IOException {
|
||||
Paths.get(dataRoot).toFile().mkdirs();
|
||||
Log.info("Initializing with root " + dataRoot);
|
||||
if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) {
|
||||
Log.info("Reading hosts");
|
||||
_persistentData = SerializationHelper.deserialize(Files.readAllBytes(Paths.get(dataRoot).resolve(dataFileName)));
|
||||
} else if (Paths.get(dataRoot).resolve(dataFileName + ".bak").toFile().exists()) {
|
||||
Log.warn("Reading hosts from backup");
|
||||
_persistentData = SerializationHelper.deserialize(Files.readAllBytes(Paths.get(dataRoot).resolve(dataFileName)));
|
||||
}
|
||||
_selfUuid = _persistentData.runReadLocked(PersistentRemoteHostsData::getSelfUuid);
|
||||
|
||||
if (_persistentData.runReadLocked(d -> d.getSelfCertificate() == null)) {
|
||||
jObjectTxManager.executeTxAndFlush(() -> {
|
||||
_persistentData.runWriteLocked(d -> {
|
||||
try {
|
||||
Log.info("Generating a key pair, please wait");
|
||||
d.setSelfKeyPair(CertificateTools.generateKeyPair());
|
||||
d.setSelfCertificate(CertificateTools.generateCertificate(d.getSelfKeyPair(), _selfUuid.toString()));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("Failed generating cert", e);
|
||||
}
|
||||
return null;
|
||||
});
|
||||
var newpd = new PeerDirectory();
|
||||
jObjectManager.put(new PersistentPeerInfo(_selfUuid, getSelfCertificate()), Optional.of(PeerDirectory.PeerDirectoryObjName));
|
||||
newpd.getPeers().add(_selfUuid);
|
||||
jObjectManager.put(newpd, Optional.empty());
|
||||
jObjectManager.put(new PeerDirectoryLocal(), Optional.empty());
|
||||
});
|
||||
}
|
||||
|
||||
peerDirectory = softJObjectFactory.create(PeerDirectory.class, PeerDirectory.PeerDirectoryObjName);
|
||||
peerDirectoryLocal = softJObjectFactory.create(PeerDirectoryLocal.class, PeerDirectoryLocal.PeerDirectoryLocalObjName);
|
||||
|
||||
if (!shutdownChecker.lastShutdownClean()) {
|
||||
_persistentData.getData().getIrregularShutdownCounter().addAndGet(1);
|
||||
jObjectTxManager.executeTxAndFlush(() -> {
|
||||
peerDirectoryLocal.get().rwLock();
|
||||
peerDirectoryLocal.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
|
||||
try {
|
||||
peerDirectoryLocal.get().getData().getInitialObjSyncDone().clear();
|
||||
peerDirectoryLocal.get().bumpVer();
|
||||
} finally {
|
||||
peerDirectoryLocal.get().rwUnlock();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
jObjectManager.registerWriteListener(PersistentPeerInfo.class, this::pushPeerUpdates);
|
||||
jObjectManager.registerWriteListener(PeerDirectory.class, this::pushPeerUpdates);
|
||||
|
||||
// FIXME: Warn on failed resolves?
|
||||
peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
|
||||
peerTrustManager.reloadTrustManagerHosts(getHosts());
|
||||
return null;
|
||||
});
|
||||
|
||||
Files.writeString(Paths.get(dataRoot, "self_uuid"), _selfUuid.toString());
|
||||
Log.info("Self uuid is: " + _selfUuid.toString());
|
||||
writeData();
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(300) ShutdownEvent event) throws IOException {
|
||||
Log.info("Saving hosts");
|
||||
writeData();
|
||||
Log.info("Shutdown");
|
||||
}
|
||||
|
||||
private void writeData() {
|
||||
try {
|
||||
if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists())
|
||||
Files.move(Paths.get(dataRoot).resolve(dataFileName), Paths.get(dataRoot).resolve(dataFileName + ".bak"), REPLACE_EXISTING);
|
||||
Files.write(Paths.get(dataRoot).resolve(dataFileName), SerializationUtils.serialize(_persistentData));
|
||||
} catch (IOException iex) {
|
||||
Log.error("Error writing persistent hosts data", iex);
|
||||
throw new RuntimeException(iex);
|
||||
}
|
||||
}
|
||||
|
||||
private void pushPeerUpdates() {
|
||||
pushPeerUpdates(null);
|
||||
}
|
||||
|
||||
private void pushPeerUpdates(@Nullable JObject<?> obj) {
|
||||
if (obj != null)
|
||||
Log.info("Scheduling certificate update after " + obj.getMeta().getName() + " was updated");
|
||||
executorService.submit(() -> {
|
||||
updateCerts();
|
||||
invalidationQueueService.pushInvalidationToAll(PeerDirectory.PeerDirectoryObjName);
|
||||
for (var p : peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getPeers().stream().toList()))
|
||||
invalidationQueueService.pushInvalidationToAll(PersistentPeerInfo.getNameFromUuid(p));
|
||||
});
|
||||
}
|
||||
|
||||
private JObject<PersistentPeerInfo> getPeer(UUID uuid) {
|
||||
var got = jObjectManager.get(PersistentPeerInfo.getNameFromUuid(uuid)).orElseThrow(() -> new IllegalStateException("Peer " + uuid + " not found"));
|
||||
got.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
|
||||
if (d == null) throw new IllegalStateException("Could not resolve peer " + uuid);
|
||||
if (!(d instanceof PersistentPeerInfo))
|
||||
throw new IllegalStateException("Peer " + uuid + " is of wrong type!");
|
||||
return null;
|
||||
});
|
||||
return (JObject<PersistentPeerInfo>) got;
|
||||
}
|
||||
|
||||
private List<PersistentPeerInfo> getPeersSnapshot() {
|
||||
return peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY,
|
||||
(m, d) -> d.getPeers().stream().map(u -> {
|
||||
try {
|
||||
return getPeer(u).runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m2, d2) -> d2);
|
||||
} catch (Exception e) {
|
||||
Log.warn("Error making snapshot of peer " + u, e);
|
||||
return null;
|
||||
}
|
||||
}).filter(Objects::nonNull).toList());
|
||||
}
|
||||
|
||||
public UUID getSelfUuid() {
|
||||
if (_selfUuid == null)
|
||||
throw new IllegalStateException();
|
||||
else return _selfUuid;
|
||||
}
|
||||
|
||||
public String getUniqueId() {
|
||||
String sb = String.valueOf(_selfUuid) +
|
||||
_persistentData.getData().getIrregularShutdownCounter() +
|
||||
"_" +
|
||||
_persistentData.getData().getSelfCounter().addAndGet(1);
|
||||
return sb;
|
||||
}
|
||||
|
||||
public PersistentPeerInfo getInfo(UUID name) {
|
||||
return getPeer(name).runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d);
|
||||
}
|
||||
|
||||
public List<PersistentPeerInfo> getHosts() {
|
||||
return getPeersSnapshot().stream().filter(i -> !i.getUuid().equals(_selfUuid)).toList();
|
||||
}
|
||||
|
||||
public List<UUID> getHostUuids() {
|
||||
return peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getPeers().stream().filter(i -> !i.equals(_selfUuid)).toList());
|
||||
}
|
||||
|
||||
public List<UUID> getHostUuidsAndSelf() {
|
||||
return peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getPeers().stream().toList());
|
||||
}
|
||||
|
||||
public List<PersistentPeerInfo> getHostsNoNulls() {
|
||||
for (int i = 0; i < 5; i++) {
|
||||
try {
|
||||
return peerDirectory.get()
|
||||
.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY,
|
||||
(m, d) -> d.getPeers().stream()
|
||||
.map(u -> getPeer(u).runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m2, d2) -> d2))
|
||||
.filter(e -> !e.getUuid().equals(_selfUuid)).toList());
|
||||
} catch (Exception e) {
|
||||
Log.warn("Error when making snapshot of hosts: " + e.getMessage());
|
||||
try {
|
||||
Thread.sleep(i * 2);
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
}
|
||||
}
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("Could not make a snapshot of peers in 5 tries!"));
|
||||
}
|
||||
|
||||
public boolean addHost(PersistentPeerInfo persistentPeerInfo) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
if (persistentPeerInfo.getUuid().equals(_selfUuid)) return false;
|
||||
|
||||
boolean added = peerDirectory.get().runWriteLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d, b, v) -> {
|
||||
boolean addedInner = d.getPeers().add(persistentPeerInfo.getUuid());
|
||||
if (addedInner) {
|
||||
jObjectManager.put(persistentPeerInfo, Optional.of(m.getName()));
|
||||
b.apply();
|
||||
}
|
||||
return addedInner;
|
||||
});
|
||||
return added;
|
||||
});
|
||||
}
|
||||
|
||||
public boolean removeHost(UUID host) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
boolean removed = peerDirectory.get().runWriteLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d, b, v) -> {
|
||||
boolean removedInner = d.getPeers().remove(host);
|
||||
Log.info("Removing host: " + host + (removedInner ? " removed" : " did not exists"));
|
||||
if (removedInner) {
|
||||
peerDirectoryLocal.get().rwLock();
|
||||
peerDirectoryLocal.get().tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
|
||||
try {
|
||||
peerDirectoryLocal.get().getData().getInitialObjSyncDone().remove(host);
|
||||
peerDirectoryLocal.get().getData().getInitialOpSyncDone().remove(host);
|
||||
peerDirectoryLocal.get().bumpVer();
|
||||
} finally {
|
||||
peerDirectoryLocal.get().rwUnlock();
|
||||
}
|
||||
getPeer(host).runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (mp, dp, bp, vp) -> {
|
||||
mp.removeRef(m.getName());
|
||||
return null;
|
||||
});
|
||||
b.apply();
|
||||
}
|
||||
return removedInner;
|
||||
});
|
||||
return removed;
|
||||
});
|
||||
}
|
||||
|
||||
private void updateCerts() {
|
||||
try {
|
||||
peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
|
||||
peerTrustManager.reloadTrustManagerHosts(getHostsNoNulls());
|
||||
// Fixme:? I don't think it should be needed with custom trust store
|
||||
// but it doesn't work?
|
||||
rpcClientFactory.dropCache();
|
||||
return null;
|
||||
});
|
||||
} catch (Exception ex) {
|
||||
Log.warn("Error when refreshing certificates, will retry: " + ex.getMessage());
|
||||
pushPeerUpdates();
|
||||
}
|
||||
}
|
||||
|
||||
public boolean existsHost(UUID uuid) {
|
||||
return peerDirectory.get().runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.getPeers().contains(uuid));
|
||||
}
|
||||
|
||||
public PersistentPeerInfo getHost(UUID uuid) {
|
||||
if (!existsHost(uuid))
|
||||
throw new StatusRuntimeException(Status.NOT_FOUND);
|
||||
return getPeer(uuid).runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d);
|
||||
}
|
||||
|
||||
public KeyPair getSelfKeypair() {
|
||||
return _persistentData.runReadLocked(PersistentRemoteHostsData::getSelfKeyPair);
|
||||
}
|
||||
|
||||
public X509Certificate getSelfCertificate() {
|
||||
return _persistentData.runReadLocked(PersistentRemoteHostsData::getSelfCertificate);
|
||||
}
|
||||
|
||||
// Returns true if host's initial sync wasn't done before, and marks it as done
|
||||
public boolean markInitialOpSyncDone(UUID connectedHost) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
peerDirectoryLocal.get().rwLock();
|
||||
try {
|
||||
peerDirectoryLocal.get().local();
|
||||
boolean contained = peerDirectoryLocal.get().getData().getInitialOpSyncDone().contains(connectedHost);
|
||||
|
||||
if (!contained)
|
||||
peerDirectoryLocal.get().local().mutate(new JMutator<PeerDirectoryLocal>() {
|
||||
@Override
|
||||
public boolean mutate(PeerDirectoryLocal object) {
|
||||
object.getInitialOpSyncDone().add(connectedHost);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void revert(PeerDirectoryLocal object) {
|
||||
object.getInitialOpSyncDone().remove(connectedHost);
|
||||
}
|
||||
});
|
||||
return !contained;
|
||||
} finally {
|
||||
peerDirectoryLocal.get().rwUnlock();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public boolean markInitialObjSyncDone(UUID connectedHost) {
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
peerDirectoryLocal.get().rwLock();
|
||||
try {
|
||||
peerDirectoryLocal.get().local();
|
||||
boolean contained = peerDirectoryLocal.get().getData().getInitialObjSyncDone().contains(connectedHost);
|
||||
|
||||
if (!contained)
|
||||
peerDirectoryLocal.get().local().mutate(new JMutator<PeerDirectoryLocal>() {
|
||||
@Override
|
||||
public boolean mutate(PeerDirectoryLocal object) {
|
||||
object.getInitialObjSyncDone().add(connectedHost);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void revert(PeerDirectoryLocal object) {
|
||||
object.getInitialObjSyncDone().remove(connectedHost);
|
||||
}
|
||||
});
|
||||
return !contained;
|
||||
} finally {
|
||||
peerDirectoryLocal.get().rwUnlock();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository;
|
||||
|
||||
import lombok.Getter;
|
||||
|
||||
import java.io.Serial;
|
||||
import java.io.Serializable;
|
||||
import java.util.concurrent.locks.ReadWriteLock;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
public class PersistentRemoteHosts implements Serializable {
|
||||
@Serial
|
||||
private static final long serialVersionUID = 1;
|
||||
|
||||
@Getter
|
||||
private final PersistentRemoteHostsData _data = new PersistentRemoteHostsData();
|
||||
private final ReadWriteLock _lock = new ReentrantReadWriteLock();
|
||||
|
||||
public <R> R runReadLocked(PersistentRemoteHostsFn<R> fn) {
|
||||
_lock.readLock().lock();
|
||||
try {
|
||||
return fn.apply(_data);
|
||||
} finally {
|
||||
_lock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
public <R> R runWriteLocked(PersistentRemoteHostsFn<R> fn) {
|
||||
_lock.writeLock().lock();
|
||||
try {
|
||||
return fn.apply(_data);
|
||||
} finally {
|
||||
_lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
public interface PersistentRemoteHostsFn<R> {
|
||||
R apply(PersistentRemoteHostsData hostsData);
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository;
|
||||
|
||||
import lombok.Getter;
|
||||
import lombok.Setter;
|
||||
|
||||
import java.io.Serial;
|
||||
import java.io.Serializable;
|
||||
import java.security.KeyPair;
|
||||
import java.security.cert.X509Certificate;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
public class PersistentRemoteHostsData implements Serializable {
|
||||
@Serial
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
@Getter
|
||||
private final UUID _selfUuid = UUID.randomUUID();
|
||||
@Getter
|
||||
private final AtomicLong _selfCounter = new AtomicLong();
|
||||
@Getter
|
||||
private final AtomicLong _irregularShutdownCounter = new AtomicLong();
|
||||
@Getter
|
||||
@Setter
|
||||
private X509Certificate _selfCertificate = null;
|
||||
@Getter
|
||||
@Setter
|
||||
private KeyPair _selfKeyPair = null;
|
||||
}
|
||||
@@ -1,174 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository;
|
||||
|
||||
import com.google.common.collect.Maps;
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.objects.jrepository.*;
|
||||
import com.usatiuk.dhfs.objects.persistence.JObjectDataP;
|
||||
import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService;
|
||||
import com.usatiuk.dhfs.objects.repository.opsupport.Op;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ConcurrentLinkedDeque;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@ApplicationScoped
|
||||
public class RemoteObjectServiceClient {
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
|
||||
@Inject
|
||||
RpcClientFactory rpcClientFactory;
|
||||
|
||||
@Inject
|
||||
JObjectManager jObjectManager;
|
||||
|
||||
@Inject
|
||||
SyncHandler syncHandler;
|
||||
@Inject
|
||||
InvalidationQueueService invalidationQueueService;
|
||||
@Inject
|
||||
ProtoSerializer<JObjectDataP, JObjectData> dataProtoSerializer;
|
||||
@Inject
|
||||
ProtoSerializer<OpPushPayload, Op> opProtoSerializer;
|
||||
@Inject
|
||||
JObjectTxManager jObjectTxManager;
|
||||
|
||||
public Pair<ObjectHeader, JObjectDataP> getSpecificObject(UUID host, String name) {
|
||||
return rpcClientFactory.withObjSyncClient(host, client -> {
|
||||
var reply = client.getObject(GetObjectRequest.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).setName(name).build());
|
||||
return Pair.of(reply.getObject().getHeader(), reply.getObject().getContent());
|
||||
});
|
||||
}
|
||||
|
||||
public JObjectDataP getObject(JObject<?> jObject) {
|
||||
jObject.assertRwLock();
|
||||
|
||||
var targets = jObject.runReadLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (md, d) -> {
|
||||
var ourVersion = md.getOurVersion();
|
||||
if (ourVersion >= 1)
|
||||
return md.getRemoteCopies().entrySet().stream()
|
||||
.filter(entry -> entry.getValue().equals(ourVersion))
|
||||
.map(Map.Entry::getKey).toList();
|
||||
else
|
||||
return persistentPeerDataService.getHostUuids();
|
||||
});
|
||||
|
||||
if (targets.isEmpty())
|
||||
throw new IllegalStateException("No targets for object " + jObject.getMeta().getName());
|
||||
|
||||
Log.info("Downloading object " + jObject.getMeta().getName() + " from " + targets.stream().map(UUID::toString).collect(Collectors.joining(", ")));
|
||||
|
||||
return rpcClientFactory.withObjSyncClient(targets, client -> {
|
||||
var reply = client.getObject(GetObjectRequest.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).setName(jObject.getMeta().getName()).build());
|
||||
|
||||
var receivedMap = new HashMap<UUID, Long>();
|
||||
for (var e : reply.getObject().getHeader().getChangelog().getEntriesList()) {
|
||||
receivedMap.put(UUID.fromString(e.getHost()), e.getVersion());
|
||||
}
|
||||
|
||||
return jObjectTxManager.executeTx(() -> {
|
||||
return jObject.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (md, d, b, v) -> {
|
||||
var unexpected = !Objects.equals(
|
||||
Maps.filterValues(md.getChangelog(), val -> val != 0),
|
||||
Maps.filterValues(receivedMap, val -> val != 0));
|
||||
|
||||
if (unexpected) {
|
||||
try {
|
||||
syncHandler.handleOneUpdate(UUID.fromString(reply.getSelfUuid()), reply.getObject().getHeader());
|
||||
} catch (SyncHandler.OutdatedUpdateException ignored) {
|
||||
Log.info("Outdated update of " + md.getName() + " from " + reply.getSelfUuid());
|
||||
invalidationQueueService.pushInvalidationToOne(UUID.fromString(reply.getSelfUuid()), md.getName()); // True?
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("Received outdated object version"));
|
||||
} catch (Exception e) {
|
||||
Log.error("Received unexpected object version from " + reply.getSelfUuid()
|
||||
+ " for " + reply.getObject().getHeader().getName() + " and conflict resolution failed", e);
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("Received unexpected object version"));
|
||||
}
|
||||
}
|
||||
|
||||
return reply.getObject().getContent();
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public IndexUpdateReply notifyUpdate(JObject<?> obj, UUID host) {
|
||||
var builder = IndexUpdatePush.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString());
|
||||
|
||||
var header = obj
|
||||
.runReadLocked(
|
||||
obj.getMeta().getKnownClass().isAnnotationPresent(PushResolution.class)
|
||||
? JObjectManager.ResolutionStrategy.LOCAL_ONLY
|
||||
: JObjectManager.ResolutionStrategy.NO_RESOLUTION,
|
||||
(m, d) -> {
|
||||
if (obj.getMeta().isDeleted()) return null;
|
||||
if (m.getKnownClass().isAnnotationPresent(PushResolution.class) && d == null)
|
||||
Log.warn("Object " + m.getName() + " is marked as PushResolution but no resolution found");
|
||||
if (m.getKnownClass().isAnnotationPresent(PushResolution.class))
|
||||
return m.toRpcHeader(dataProtoSerializer.serialize(d));
|
||||
else
|
||||
return m.toRpcHeader();
|
||||
});
|
||||
if (header == null) return null;
|
||||
jObjectTxManager.executeTx(obj::markSeen);
|
||||
builder.setHeader(header);
|
||||
|
||||
var send = builder.build();
|
||||
|
||||
return rpcClientFactory.withObjSyncClient(host, client -> client.indexUpdate(send));
|
||||
}
|
||||
|
||||
public OpPushReply pushOps(List<Op> ops, String queueName, UUID host) {
|
||||
for (Op op : ops) {
|
||||
for (var ref : op.getEscapedRefs()) {
|
||||
jObjectTxManager.executeTx(() -> {
|
||||
jObjectManager.get(ref).ifPresent(JObject::markSeen);
|
||||
});
|
||||
}
|
||||
}
|
||||
var builder = OpPushMsg.newBuilder()
|
||||
.setSelfUuid(persistentPeerDataService.getSelfUuid().toString())
|
||||
.setQueueId(queueName);
|
||||
for (var op : ops)
|
||||
builder.addMsg(opProtoSerializer.serialize(op));
|
||||
return rpcClientFactory.withObjSyncClient(host, client -> client.opPush(builder.build()));
|
||||
}
|
||||
|
||||
public Collection<CanDeleteReply> canDelete(Collection<UUID> targets, String object, Collection<String> ourReferrers) {
|
||||
ConcurrentLinkedDeque<CanDeleteReply> results = new ConcurrentLinkedDeque<>();
|
||||
Log.trace("Asking canDelete for " + object + " from " + targets.stream().map(UUID::toString).collect(Collectors.joining(", ")));
|
||||
try (var executor = Executors.newVirtualThreadPerTaskExecutor()) {
|
||||
try {
|
||||
executor.invokeAll(targets.stream().<Callable<Void>>map(h -> () -> {
|
||||
try {
|
||||
var req = CanDeleteRequest.newBuilder()
|
||||
.setSelfUuid(persistentPeerDataService.getSelfUuid().toString())
|
||||
.setName(object);
|
||||
req.addAllOurReferrers(ourReferrers);
|
||||
var res = rpcClientFactory.withObjSyncClient(h, client -> client.canDelete(req.build()));
|
||||
if (res != null)
|
||||
results.add(res);
|
||||
} catch (Exception e) {
|
||||
Log.debug("Error when asking canDelete for object " + object, e);
|
||||
}
|
||||
return null;
|
||||
}).toList());
|
||||
} catch (InterruptedException e) {
|
||||
Log.warn("Interrupted waiting for canDelete for object " + object);
|
||||
}
|
||||
if (!executor.shutdownNow().isEmpty())
|
||||
Log.warn("Didn't ask all targets when asking canDelete for " + object);
|
||||
}
|
||||
return results;
|
||||
}
|
||||
}
|
||||
@@ -1,184 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.objects.jrepository.DeletedObjectAccessException;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectData;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectManager;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager;
|
||||
import com.usatiuk.dhfs.objects.persistence.JObjectDataP;
|
||||
import com.usatiuk.dhfs.objects.repository.autosync.AutoSyncProcessor;
|
||||
import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService;
|
||||
import com.usatiuk.dhfs.objects.repository.opsupport.Op;
|
||||
import com.usatiuk.dhfs.objects.repository.opsupport.OpObjectRegistry;
|
||||
import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.grpc.GrpcService;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.smallrye.common.annotation.Blocking;
|
||||
import io.smallrye.mutiny.Uni;
|
||||
import jakarta.annotation.security.RolesAllowed;
|
||||
import jakarta.inject.Inject;
|
||||
|
||||
import java.util.UUID;
|
||||
|
||||
// Note: RunOnVirtualThread hangs somehow
|
||||
@GrpcService
|
||||
@RolesAllowed("cluster-member")
|
||||
public class RemoteObjectServiceServer implements DhfsObjectSyncGrpc {
|
||||
@Inject
|
||||
SyncHandler syncHandler;
|
||||
|
||||
@Inject
|
||||
JObjectManager jObjectManager;
|
||||
|
||||
@Inject
|
||||
PeerManager remoteHostManager;
|
||||
|
||||
@Inject
|
||||
AutoSyncProcessor autoSyncProcessor;
|
||||
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
|
||||
@Inject
|
||||
InvalidationQueueService invalidationQueueService;
|
||||
|
||||
@Inject
|
||||
ProtoSerializer<JObjectDataP, JObjectData> dataProtoSerializer;
|
||||
@Inject
|
||||
ProtoSerializer<OpPushPayload, Op> opProtoSerializer;
|
||||
|
||||
@Inject
|
||||
OpObjectRegistry opObjectRegistry;
|
||||
|
||||
@Inject
|
||||
JObjectTxManager jObjectTxManager;
|
||||
|
||||
@Override
|
||||
@Blocking
|
||||
public Uni<GetObjectReply> getObject(GetObjectRequest request) {
|
||||
if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
|
||||
if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid())))
|
||||
throw new StatusRuntimeException(Status.UNAUTHENTICATED);
|
||||
|
||||
Log.info("<-- getObject: " + request.getName() + " from " + request.getSelfUuid());
|
||||
|
||||
var obj = jObjectManager.get(request.getName()).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND));
|
||||
|
||||
// Does @Blocking break this?
|
||||
return Uni.createFrom().emitter(emitter -> {
|
||||
var replyObj = jObjectTxManager.executeTx(() -> {
|
||||
// Obj.markSeen before markSeen of its children
|
||||
obj.markSeen();
|
||||
return obj.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (meta, data) -> {
|
||||
if (meta.isOnlyLocal())
|
||||
throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription("Trying to get local-only object"));
|
||||
if (data == null) {
|
||||
Log.info("<-- getObject FAIL: " + request.getName() + " from " + request.getSelfUuid());
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("Not available locally"));
|
||||
}
|
||||
data.extractRefs().forEach(ref ->
|
||||
jObjectManager.get(ref)
|
||||
.orElseThrow(() -> new IllegalStateException("Non-hydrated refs for local object?"))
|
||||
.markSeen());
|
||||
|
||||
return ApiObject.newBuilder()
|
||||
.setHeader(obj.getMeta().toRpcHeader())
|
||||
.setContent(dataProtoSerializer.serialize(obj.getData())).build();
|
||||
});
|
||||
});
|
||||
var ret = GetObjectReply.newBuilder()
|
||||
.setSelfUuid(persistentPeerDataService.getSelfUuid().toString())
|
||||
.setObject(replyObj).build();
|
||||
// TODO: Could this cause problems if we wait for too long?
|
||||
obj.commitFenceAsync(() -> emitter.complete(ret));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
@Blocking
|
||||
public Uni<CanDeleteReply> canDelete(CanDeleteRequest request) {
|
||||
if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
|
||||
if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid())))
|
||||
throw new StatusRuntimeException(Status.UNAUTHENTICATED);
|
||||
|
||||
Log.info("<-- canDelete: " + request.getName() + " from " + request.getSelfUuid());
|
||||
|
||||
var builder = CanDeleteReply.newBuilder();
|
||||
|
||||
var obj = jObjectManager.get(request.getName());
|
||||
|
||||
builder.setSelfUuid(persistentPeerDataService.getSelfUuid().toString());
|
||||
builder.setObjName(request.getName());
|
||||
|
||||
if (obj.isPresent()) try {
|
||||
boolean tryUpdate = obj.get().runReadLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d) -> {
|
||||
if (m.isDeleted() && !m.isDeletionCandidate())
|
||||
throw new IllegalStateException("Object " + m.getName() + " is deleted but not a deletion candidate");
|
||||
builder.setDeletionCandidate(m.isDeletionCandidate());
|
||||
builder.addAllReferrers(m.getReferrers());
|
||||
return m.isDeletionCandidate() && !m.isDeleted();
|
||||
});
|
||||
// FIXME
|
||||
// if (tryUpdate) {
|
||||
// obj.get().runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, b, v) -> {
|
||||
// return null;
|
||||
// });
|
||||
// }
|
||||
} catch (DeletedObjectAccessException dox) {
|
||||
builder.setDeletionCandidate(true);
|
||||
}
|
||||
else {
|
||||
builder.setDeletionCandidate(true);
|
||||
}
|
||||
|
||||
var ret = builder.build();
|
||||
|
||||
if (!ret.getDeletionCandidate())
|
||||
for (var rr : request.getOurReferrersList())
|
||||
autoSyncProcessor.add(rr);
|
||||
|
||||
return Uni.createFrom().item(ret);
|
||||
}
|
||||
|
||||
@Override
|
||||
@Blocking
|
||||
public Uni<IndexUpdateReply> indexUpdate(IndexUpdatePush request) {
|
||||
if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
|
||||
if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid())))
|
||||
throw new StatusRuntimeException(Status.UNAUTHENTICATED);
|
||||
|
||||
// Log.info("<-- indexUpdate: " + request.getHeader().getName());
|
||||
return jObjectTxManager.executeTxAndFlush(() -> {
|
||||
return Uni.createFrom().item(syncHandler.handleRemoteUpdate(request));
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
@Blocking
|
||||
public Uni<OpPushReply> opPush(OpPushMsg request) {
|
||||
if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
|
||||
if (!persistentPeerDataService.existsHost(UUID.fromString(request.getSelfUuid())))
|
||||
throw new StatusRuntimeException(Status.UNAUTHENTICATED);
|
||||
|
||||
try {
|
||||
var objs = request.getMsgList().stream().map(opProtoSerializer::deserialize).toList();
|
||||
jObjectTxManager.executeTxAndFlush(() -> {
|
||||
opObjectRegistry.acceptExternalOps(request.getQueueId(), UUID.fromString(request.getSelfUuid()), objs);
|
||||
});
|
||||
} catch (Exception e) {
|
||||
Log.error(e, e);
|
||||
throw e;
|
||||
}
|
||||
return Uni.createFrom().item(OpPushReply.getDefaultInstance());
|
||||
}
|
||||
|
||||
@Override
|
||||
@Blocking
|
||||
public Uni<PingReply> ping(PingRequest request) {
|
||||
if (request.getSelfUuid().isBlank()) throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
|
||||
|
||||
return Uni.createFrom().item(PingReply.newBuilder().setSelfUuid(persistentPeerDataService.getSelfUuid().toString()).build());
|
||||
}
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository;
|
||||
|
||||
import com.usatiuk.dhfs.objects.repository.peertrust.PeerTrustManager;
|
||||
import io.grpc.ChannelCredentials;
|
||||
import io.grpc.ManagedChannel;
|
||||
import io.grpc.TlsChannelCredentials;
|
||||
import io.grpc.netty.NettyChannelBuilder;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.inject.Inject;
|
||||
|
||||
import javax.net.ssl.KeyManagerFactory;
|
||||
import java.security.KeyStore;
|
||||
import java.security.cert.Certificate;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
//FIXME: Leaks!
|
||||
@ApplicationScoped
|
||||
public class RpcChannelFactory {
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
@Inject
|
||||
PeerTrustManager peerTrustManager;
|
||||
private ConcurrentMap<SecureChannelKey, ManagedChannel> _secureChannelCache = new ConcurrentHashMap<>();
|
||||
|
||||
void shutdown(@Observes @Priority(100000) ShutdownEvent event) {
|
||||
for (var c : _secureChannelCache.values()) c.shutdownNow();
|
||||
}
|
||||
|
||||
private ChannelCredentials getChannelCredentials() {
|
||||
try {
|
||||
KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType());
|
||||
ks.load(null, null);
|
||||
|
||||
ks.setKeyEntry("clientkey", persistentPeerDataService.getSelfKeypair().getPrivate(), null, new Certificate[]{persistentPeerDataService.getSelfCertificate()});
|
||||
|
||||
KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
|
||||
keyManagerFactory.init(ks, null);
|
||||
|
||||
ChannelCredentials creds = TlsChannelCredentials.newBuilder().trustManager(peerTrustManager).keyManager(keyManagerFactory.getKeyManagers()).build();
|
||||
return creds;
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
ManagedChannel getSecureChannel(String host, String address, int port) {
|
||||
var key = new SecureChannelKey(host, address, port);
|
||||
return _secureChannelCache.computeIfAbsent(key, (k) -> {
|
||||
return NettyChannelBuilder.forAddress(address, port, getChannelCredentials()).overrideAuthority(host).idleTimeout(10, TimeUnit.SECONDS).build();
|
||||
});
|
||||
}
|
||||
|
||||
public void dropCache() {
|
||||
var oldS = _secureChannelCache;
|
||||
_secureChannelCache = new ConcurrentHashMap<>();
|
||||
oldS.values().forEach(ManagedChannel::shutdown);
|
||||
}
|
||||
|
||||
private record SecureChannelKey(String host, String address, int port) {
|
||||
}
|
||||
|
||||
private record InsecureChannelKey(String address, int port) {
|
||||
}
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository;
|
||||
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
// TODO: Dedup this
|
||||
@ApplicationScoped
|
||||
public class RpcClientFactory {
|
||||
@ConfigProperty(name = "dhfs.objects.sync.timeout")
|
||||
long syncTimeout;
|
||||
|
||||
@Inject
|
||||
PeerManager remoteHostManager;
|
||||
|
||||
@Inject
|
||||
RpcChannelFactory rpcChannelFactory;
|
||||
// FIXME: Leaks!
|
||||
private ConcurrentMap<ObjSyncStubKey, DhfsObjectSyncGrpcGrpc.DhfsObjectSyncGrpcBlockingStub> _objSyncCache = new ConcurrentHashMap<>();
|
||||
|
||||
public <R> R withObjSyncClient(Collection<UUID> targets, ObjectSyncClientFunction<R> fn) {
|
||||
var shuffledList = new ArrayList<>(targets);
|
||||
Collections.shuffle(shuffledList);
|
||||
for (UUID target : shuffledList) {
|
||||
try {
|
||||
return withObjSyncClient(target, fn);
|
||||
} catch (StatusRuntimeException e) {
|
||||
if (e.getStatus().getCode().equals(Status.UNAVAILABLE.getCode()))
|
||||
Log.debug("Host " + target + " is unreachable: " + e.getMessage());
|
||||
else
|
||||
Log.warn("When calling " + target + " " + e.getMessage());
|
||||
} catch (Exception e) {
|
||||
Log.warn("When calling " + target + " " + e.getMessage());
|
||||
}
|
||||
}
|
||||
throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("No reachable targets!"));
|
||||
}
|
||||
|
||||
public <R> R withObjSyncClient(UUID target, ObjectSyncClientFunction<R> fn) {
|
||||
var hostinfo = remoteHostManager.getTransientState(target);
|
||||
boolean reachable = remoteHostManager.isReachable(target);
|
||||
|
||||
if (hostinfo.getAddr() == null)
|
||||
throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("Address for " + target + " not yet known"));
|
||||
|
||||
if (!reachable)
|
||||
throw new StatusRuntimeException(Status.UNAVAILABLE.withDescription("Not known to be reachable: " + target));
|
||||
|
||||
return withObjSyncClient(target.toString(), hostinfo.getAddr(), hostinfo.getSecurePort(), syncTimeout, fn);
|
||||
}
|
||||
|
||||
public <R> R withObjSyncClient(String host, String addr, int port, long timeout, ObjectSyncClientFunction<R> fn) {
|
||||
var key = new ObjSyncStubKey(host, addr, port);
|
||||
var stub = _objSyncCache.computeIfAbsent(key, (k) -> {
|
||||
var channel = rpcChannelFactory.getSecureChannel(host, addr, port);
|
||||
return DhfsObjectSyncGrpcGrpc.newBlockingStub(channel)
|
||||
.withMaxOutboundMessageSize(Integer.MAX_VALUE)
|
||||
.withMaxInboundMessageSize(Integer.MAX_VALUE);
|
||||
|
||||
});
|
||||
return fn.apply(stub.withDeadlineAfter(timeout, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void dropCache() {
|
||||
rpcChannelFactory.dropCache();
|
||||
_objSyncCache = new ConcurrentHashMap<>();
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
public interface ObjectSyncClientFunction<R> {
|
||||
R apply(DhfsObjectSyncGrpcGrpc.DhfsObjectSyncGrpcBlockingStub client);
|
||||
}
|
||||
|
||||
private record ObjSyncStubKey(String host, String address, int port) {
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,207 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObject;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectData;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectManager;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager;
|
||||
import com.usatiuk.dhfs.objects.persistence.JObjectDataP;
|
||||
import com.usatiuk.dhfs.objects.repository.invalidation.InvalidationQueueService;
|
||||
import com.usatiuk.dhfs.objects.repository.opsupport.OpObjectRegistry;
|
||||
import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace;
|
||||
import io.grpc.Status;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.inject.Instance;
|
||||
import jakarta.inject.Inject;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@ApplicationScoped
|
||||
public class SyncHandler {
|
||||
@Inject
|
||||
JObjectManager jObjectManager;
|
||||
@Inject
|
||||
PeerManager remoteHostManager;
|
||||
@Inject
|
||||
RemoteObjectServiceClient remoteObjectServiceClient;
|
||||
@Inject
|
||||
InvalidationQueueService invalidationQueueService;
|
||||
@Inject
|
||||
Instance<ConflictResolver> conflictResolvers;
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
@Inject
|
||||
ProtoSerializer<JObjectDataP, JObjectData> dataProtoSerializer;
|
||||
@Inject
|
||||
OpObjectRegistry opObjectRegistry;
|
||||
@Inject
|
||||
JObjectTxManager jObjectTxManager;
|
||||
|
||||
public void pushInitialResyncObj(UUID host) {
|
||||
Log.info("Doing initial object push for " + host);
|
||||
|
||||
var objs = jObjectManager.findAll();
|
||||
|
||||
for (var obj : objs) {
|
||||
Log.trace("IS: " + obj + " to " + host);
|
||||
invalidationQueueService.pushInvalidationToOne(host, obj);
|
||||
}
|
||||
}
|
||||
|
||||
public void pushInitialResyncOp(UUID host) {
|
||||
Log.info("Doing initial op push for " + host);
|
||||
|
||||
jObjectTxManager.executeTxAndFlush(
|
||||
() -> {
|
||||
opObjectRegistry.pushBootstrapData(host);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
public void handleOneUpdate(UUID from, ObjectHeader header) {
|
||||
AtomicReference<JObject<?>> foundExt = new AtomicReference<>();
|
||||
|
||||
boolean conflict = jObjectTxManager.executeTx(() -> {
|
||||
JObject<?> found = jObjectManager.getOrPut(header.getName(), JObjectData.class, Optional.empty());
|
||||
foundExt.set(found);
|
||||
|
||||
var receivedTotalVer = header.getChangelog().getEntriesList()
|
||||
.stream().map(ObjectChangelogEntry::getVersion).reduce(0L, Long::sum);
|
||||
|
||||
var receivedMap = new HashMap<UUID, Long>();
|
||||
for (var e : header.getChangelog().getEntriesList()) {
|
||||
receivedMap.put(UUID.fromString(e.getHost()), e.getVersion());
|
||||
}
|
||||
|
||||
return found.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (md, data, bump, invalidate) -> {
|
||||
if (md.getRemoteCopies().getOrDefault(from, 0L) > receivedTotalVer) {
|
||||
Log.error("Received older index update than was known for host: "
|
||||
+ from + " " + header.getName());
|
||||
throw new OutdatedUpdateException();
|
||||
}
|
||||
|
||||
String rcv = "";
|
||||
for (var e : header.getChangelog().getEntriesList()) {
|
||||
rcv += e.getHost() + ": " + e.getVersion() + "; ";
|
||||
}
|
||||
String ours = "";
|
||||
for (var e : md.getChangelog().entrySet()) {
|
||||
ours += e.getKey() + ": " + e.getValue() + "; ";
|
||||
}
|
||||
Log.trace("Handling update: " + header.getName() + " from " + from + "\n" + "ours: " + ours + " \n" + "received: " + rcv);
|
||||
|
||||
boolean updatedRemoteVersion = false;
|
||||
|
||||
var oldRemoteVer = md.getRemoteCopies().put(from, receivedTotalVer);
|
||||
if (oldRemoteVer == null || !oldRemoteVer.equals(receivedTotalVer)) updatedRemoteVersion = true;
|
||||
|
||||
boolean hasLower = false;
|
||||
boolean hasHigher = false;
|
||||
for (var e : Stream.concat(md.getChangelog().keySet().stream(), receivedMap.keySet().stream()).collect(Collectors.toSet())) {
|
||||
if (receivedMap.getOrDefault(e, 0L) < md.getChangelog().getOrDefault(e, 0L))
|
||||
hasLower = true;
|
||||
if (receivedMap.getOrDefault(e, 0L) > md.getChangelog().getOrDefault(e, 0L))
|
||||
hasHigher = true;
|
||||
}
|
||||
|
||||
if (hasLower && hasHigher) {
|
||||
Log.info("Conflict on update (inconsistent version): " + header.getName() + " from " + from);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (hasLower) {
|
||||
Log.info("Received older index update than known: "
|
||||
+ from + " " + header.getName());
|
||||
throw new OutdatedUpdateException();
|
||||
}
|
||||
|
||||
if (hasHigher) {
|
||||
invalidate.apply();
|
||||
md.getChangelog().clear();
|
||||
md.getChangelog().putAll(receivedMap);
|
||||
md.getChangelog().putIfAbsent(persistentPeerDataService.getSelfUuid(), 0L);
|
||||
if (header.hasPushedData())
|
||||
found.externalResolution(dataProtoSerializer.deserialize(header.getPushedData()));
|
||||
return false;
|
||||
} else if (data == null && header.hasPushedData()) {
|
||||
found.tryResolve(JObjectManager.ResolutionStrategy.LOCAL_ONLY);
|
||||
if (found.getData() == null)
|
||||
found.externalResolution(dataProtoSerializer.deserialize(header.getPushedData()));
|
||||
}
|
||||
|
||||
assert Objects.equals(receivedTotalVer, md.getOurVersion());
|
||||
|
||||
if (!updatedRemoteVersion)
|
||||
Log.debug("No action on update: " + header.getName() + " from " + from);
|
||||
|
||||
return false;
|
||||
});
|
||||
});
|
||||
|
||||
// TODO: Is the lock gap here ok?
|
||||
if (conflict) {
|
||||
Log.info("Trying conflict resolution: " + header.getName() + " from " + from);
|
||||
var found = foundExt.get();
|
||||
|
||||
JObjectData theirsData;
|
||||
ObjectHeader theirsHeader;
|
||||
if (header.hasPushedData()) {
|
||||
theirsHeader = header;
|
||||
theirsData = dataProtoSerializer.deserialize(header.getPushedData());
|
||||
} else {
|
||||
var got = remoteObjectServiceClient.getSpecificObject(from, header.getName());
|
||||
theirsData = dataProtoSerializer.deserialize(got.getRight());
|
||||
theirsHeader = got.getLeft();
|
||||
}
|
||||
|
||||
jObjectTxManager.executeTx(() -> {
|
||||
var resolverClass = found.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> {
|
||||
if (d == null)
|
||||
throw new StatusRuntimeExceptionNoStacktrace(Status.UNAVAILABLE.withDescription("No local data when conflict " + header.getName()));
|
||||
return d.getConflictResolver();
|
||||
});
|
||||
var resolver = conflictResolvers.select(resolverClass);
|
||||
resolver.get().resolve(from, theirsHeader, theirsData, found);
|
||||
});
|
||||
Log.info("Resolved conflict for " + from + " " + header.getName());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public IndexUpdateReply handleRemoteUpdate(IndexUpdatePush request) {
|
||||
// TODO: Dedup
|
||||
try {
|
||||
handleOneUpdate(UUID.fromString(request.getSelfUuid()), request.getHeader());
|
||||
} catch (OutdatedUpdateException ignored) {
|
||||
Log.warn("Outdated update of " + request.getHeader().getName() + " from " + request.getSelfUuid());
|
||||
invalidationQueueService.pushInvalidationToOne(UUID.fromString(request.getSelfUuid()), request.getHeader().getName());
|
||||
} catch (Exception ex) {
|
||||
Log.info("Error when handling update from " + request.getSelfUuid() + " of " + request.getHeader().getName(), ex);
|
||||
throw ex;
|
||||
}
|
||||
|
||||
return IndexUpdateReply.getDefaultInstance();
|
||||
}
|
||||
|
||||
protected static class OutdatedUpdateException extends RuntimeException {
|
||||
OutdatedUpdateException() {
|
||||
super();
|
||||
}
|
||||
|
||||
OutdatedUpdateException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized Throwable fillInStackTrace() {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository;
|
||||
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Getter;
|
||||
import lombok.NoArgsConstructor;
|
||||
import lombok.Setter;
|
||||
|
||||
@AllArgsConstructor
|
||||
@NoArgsConstructor
|
||||
public class TransientPeerState {
|
||||
@Getter
|
||||
@Setter
|
||||
private boolean _reachable = false;
|
||||
@Getter
|
||||
@Setter
|
||||
private String _addr;
|
||||
@Getter
|
||||
@Setter
|
||||
private int _port;
|
||||
@Getter
|
||||
@Setter
|
||||
private int _securePort;
|
||||
|
||||
public TransientPeerState(boolean reachable) {
|
||||
_reachable = reachable;
|
||||
}
|
||||
|
||||
public TransientPeerState(TransientPeerState source) {
|
||||
_reachable = source._reachable;
|
||||
_addr = source._addr;
|
||||
_port = source._port;
|
||||
_securePort = source._securePort;
|
||||
}
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository;
|
||||
|
||||
import java.util.concurrent.locks.ReadWriteLock;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
public class TransientPeersState {
|
||||
private final TransientPeersStateData _data = new TransientPeersStateData();
|
||||
private final ReadWriteLock _lock = new ReentrantReadWriteLock();
|
||||
|
||||
public <R> R runReadLocked(TransientPeersStaten<R> fn) {
|
||||
_lock.readLock().lock();
|
||||
try {
|
||||
return fn.apply(_data);
|
||||
} finally {
|
||||
_lock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
public <R> R runWriteLocked(TransientPeersStaten<R> fn) {
|
||||
_lock.writeLock().lock();
|
||||
try {
|
||||
return fn.apply(_data);
|
||||
} finally {
|
||||
_lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
public interface TransientPeersStaten<R> {
|
||||
R apply(TransientPeersStateData hostsData);
|
||||
}
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository;
|
||||
|
||||
import lombok.Getter;
|
||||
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
|
||||
public class TransientPeersStateData {
|
||||
|
||||
@Getter
|
||||
private final Map<UUID, TransientPeerState> _states = new LinkedHashMap<>();
|
||||
|
||||
TransientPeerState get(UUID host) {
|
||||
return _states.computeIfAbsent(host, k -> new TransientPeerState());
|
||||
}
|
||||
|
||||
TransientPeerState getCopy(UUID host) {
|
||||
return new TransientPeerState(get(host));
|
||||
}
|
||||
}
|
||||
@@ -1,122 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository.autosync;
|
||||
|
||||
import com.usatiuk.dhfs.objects.jrepository.*;
|
||||
import com.usatiuk.dhfs.objects.repository.peersync.PeerDirectory;
|
||||
import com.usatiuk.dhfs.objects.repository.peersync.PersistentPeerInfo;
|
||||
import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.Startup;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.concurrent.BasicThreadFactory;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
||||
@ApplicationScoped
|
||||
public class AutoSyncProcessor {
|
||||
private final HashSetDelayedBlockingQueue<String> _pending = new HashSetDelayedBlockingQueue<>(0);
|
||||
private final HashSetDelayedBlockingQueue<String> _retries = new HashSetDelayedBlockingQueue<>(10000); //FIXME:
|
||||
@Inject
|
||||
JObjectManager jObjectManager;
|
||||
@ConfigProperty(name = "dhfs.objects.autosync.threads")
|
||||
int autosyncThreads;
|
||||
@ConfigProperty(name = "dhfs.objects.autosync.download-all")
|
||||
boolean downloadAll;
|
||||
@Inject
|
||||
ExecutorService executorService;
|
||||
@Inject
|
||||
JObjectTxManager jObjectTxManager;
|
||||
private ExecutorService _autosyncExcecutor;
|
||||
|
||||
@Startup
|
||||
void init() {
|
||||
BasicThreadFactory factory = new BasicThreadFactory.Builder()
|
||||
.namingPattern("autosync-%d")
|
||||
.build();
|
||||
|
||||
_autosyncExcecutor = Executors.newFixedThreadPool(autosyncThreads, factory);
|
||||
for (int i = 0; i < autosyncThreads; i++) {
|
||||
_autosyncExcecutor.submit(this::autosync);
|
||||
}
|
||||
|
||||
if (downloadAll) {
|
||||
jObjectManager.registerMetaWriteListener(JObjectData.class, this::alwaysSaveCallback);
|
||||
} else {
|
||||
jObjectManager.registerMetaWriteListener(PersistentPeerInfo.class, this::alwaysSaveCallback);
|
||||
jObjectManager.registerMetaWriteListener(PeerDirectory.class, this::alwaysSaveCallback);
|
||||
}
|
||||
|
||||
if (downloadAll)
|
||||
executorService.submit(() -> {
|
||||
for (var obj : jObjectManager.findAll()) {
|
||||
var got = jObjectManager.get(obj);
|
||||
if (got.isEmpty() || !got.get().getMeta().isHaveLocalCopy())
|
||||
add(obj);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void alwaysSaveCallback(JObject<?> obj) {
|
||||
obj.assertRwLock();
|
||||
if (obj.getMeta().isDeleted()) return;
|
||||
if (obj.getData() != null) return;
|
||||
if (obj.getMeta().isHaveLocalCopy()) return;
|
||||
|
||||
add(obj.getMeta().getName());
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(10) ShutdownEvent event) {
|
||||
_autosyncExcecutor.shutdownNow();
|
||||
}
|
||||
|
||||
public void add(String name) {
|
||||
_pending.add(name);
|
||||
}
|
||||
|
||||
private void autosync() {
|
||||
try {
|
||||
while (!Thread.interrupted()) {
|
||||
String name = null;
|
||||
|
||||
while (name == null) {
|
||||
name = _pending.tryGet();
|
||||
if (name == null)
|
||||
name = _retries.tryGet();
|
||||
if (name == null)
|
||||
name = _pending.get(1000L); //FIXME:
|
||||
}
|
||||
|
||||
try {
|
||||
String finalName = name;
|
||||
jObjectTxManager.executeTx(() -> {
|
||||
jObjectManager.get(finalName).ifPresent(obj -> {
|
||||
boolean ok = obj.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION, (m, d, i, v) -> {
|
||||
if (m.isOnlyLocal()) return true; // FIXME:
|
||||
if (m.isDeleted()) return true;
|
||||
if (m.isDeletionCandidate()) return false;
|
||||
if (obj.getMeta().isHaveLocalCopy()) return true;
|
||||
return obj.tryResolve(JObjectManager.ResolutionStrategy.REMOTE);
|
||||
});
|
||||
if (!ok) {
|
||||
Log.debug("Failed downloading object " + obj.getMeta().getName() + ", will retry.");
|
||||
_retries.add(obj.getMeta().getName());
|
||||
}
|
||||
});
|
||||
});
|
||||
} catch (DeletedObjectAccessException ignored) {
|
||||
} catch (Exception e) {
|
||||
Log.debug("Failed downloading object " + name + ", will retry.", e);
|
||||
_retries.add(name);
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
Log.info("Autosync thread exiting");
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository.invalidation;
|
||||
|
||||
import lombok.Getter;
|
||||
import org.apache.commons.collections4.MultiValuedMap;
|
||||
import org.apache.commons.collections4.multimap.HashSetValuedHashMap;
|
||||
|
||||
import java.io.Serial;
|
||||
import java.io.Serializable;
|
||||
import java.util.UUID;
|
||||
|
||||
public class DeferredInvalidationQueueData implements Serializable {
|
||||
@Serial
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
@Getter
|
||||
private final MultiValuedMap<UUID, String> _deferredInvalidations = new HashSetValuedHashMap<>();
|
||||
}
|
||||
@@ -1,92 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository.invalidation;
|
||||
|
||||
import com.usatiuk.dhfs.utils.SerializationHelper;
|
||||
import com.usatiuk.dhfs.objects.repository.PeerManager;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import io.quarkus.scheduler.Scheduled;
|
||||
import io.smallrye.common.annotation.Blocking;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.SerializationUtils;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.UUID;
|
||||
|
||||
import static java.nio.file.StandardCopyOption.REPLACE_EXISTING;
|
||||
|
||||
@ApplicationScoped
|
||||
public class DeferredInvalidationQueueService {
|
||||
private static final String dataFileName = "invqueue";
|
||||
@Inject
|
||||
PeerManager remoteHostManager;
|
||||
@Inject
|
||||
InvalidationQueueService invalidationQueueService;
|
||||
@ConfigProperty(name = "dhfs.objects.root")
|
||||
String dataRoot;
|
||||
// FIXME: DB when?
|
||||
private DeferredInvalidationQueueData _persistentData = new DeferredInvalidationQueueData();
|
||||
|
||||
void init(@Observes @Priority(290) StartupEvent event) throws IOException {
|
||||
Paths.get(dataRoot).toFile().mkdirs();
|
||||
Log.info("Initializing with root " + dataRoot);
|
||||
if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists()) {
|
||||
Log.info("Reading invalidation queue");
|
||||
_persistentData = SerializationHelper.deserialize(Files.readAllBytes(Paths.get(dataRoot).resolve(dataFileName)));
|
||||
} else if (Paths.get(dataRoot).resolve(dataFileName + ".bak").toFile().exists()) {
|
||||
Log.warn("Reading invalidation queue from backup");
|
||||
_persistentData = SerializationHelper.deserialize(Files.readAllBytes(Paths.get(dataRoot).resolve(dataFileName)));
|
||||
}
|
||||
remoteHostManager.registerConnectEventListener(this::returnForHost);
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(300) ShutdownEvent event) throws IOException {
|
||||
Log.info("Saving deferred invalidations");
|
||||
writeData();
|
||||
Log.info("Saved deferred invalidations");
|
||||
}
|
||||
|
||||
|
||||
private void writeData() {
|
||||
try {
|
||||
if (Paths.get(dataRoot).resolve(dataFileName).toFile().exists())
|
||||
Files.move(Paths.get(dataRoot).resolve(dataFileName), Paths.get(dataRoot).resolve(dataFileName + ".bak"), REPLACE_EXISTING);
|
||||
Files.write(Paths.get(dataRoot).resolve(dataFileName), SerializationUtils.serialize(_persistentData));
|
||||
} catch (IOException iex) {
|
||||
Log.error("Error writing deferred invalidations data", iex);
|
||||
throw new RuntimeException(iex);
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME:
|
||||
@Scheduled(every = "15s", concurrentExecution = Scheduled.ConcurrentExecution.SKIP)
|
||||
@Blocking
|
||||
void periodicReturn() {
|
||||
for (var reachable : remoteHostManager.getAvailableHosts())
|
||||
returnForHost(reachable);
|
||||
}
|
||||
|
||||
void returnForHost(UUID host) {
|
||||
synchronized (this) {
|
||||
var col = _persistentData.getDeferredInvalidations().get(host);
|
||||
for (var s : col) {
|
||||
Log.trace("Un-deferred invalidation to " + host + " of " + s);
|
||||
invalidationQueueService.pushDeferredInvalidations(host, s);
|
||||
}
|
||||
col.clear();
|
||||
}
|
||||
}
|
||||
|
||||
void defer(UUID host, String object) {
|
||||
synchronized (this) {
|
||||
Log.trace("Deferred invalidation to " + host + " of " + object);
|
||||
_persistentData.getDeferredInvalidations().put(host, object);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository.invalidation;
|
||||
|
||||
public class InvalidationQueue {
|
||||
}
|
||||
@@ -1,180 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository.invalidation;
|
||||
|
||||
import com.usatiuk.dhfs.objects.jrepository.DeletedObjectAccessException;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObject;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectManager;
|
||||
import com.usatiuk.dhfs.objects.repository.PeerManager;
|
||||
import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService;
|
||||
import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient;
|
||||
import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.StartupEvent;
|
||||
import io.vertx.core.impl.ConcurrentHashSet;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.concurrent.BasicThreadFactory;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
@ApplicationScoped
|
||||
public class InvalidationQueueService {
|
||||
private final HashSetDelayedBlockingQueue<Pair<UUID, String>> _queue;
|
||||
private final AtomicReference<ConcurrentHashSet<String>> _toAllQueue = new AtomicReference<>(new ConcurrentHashSet<>());
|
||||
@Inject
|
||||
PeerManager remoteHostManager;
|
||||
@Inject
|
||||
RemoteObjectServiceClient remoteObjectServiceClient;
|
||||
@Inject
|
||||
JObjectManager jObjectManager;
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
@Inject
|
||||
DeferredInvalidationQueueService deferredInvalidationQueueService;
|
||||
@ConfigProperty(name = "dhfs.objects.invalidation.threads")
|
||||
int threads;
|
||||
private ExecutorService _executor;
|
||||
private volatile boolean _shutdown = false;
|
||||
|
||||
public InvalidationQueueService(@ConfigProperty(name = "dhfs.objects.invalidation.delay") int delay) {
|
||||
_queue = new HashSetDelayedBlockingQueue<>(delay);
|
||||
}
|
||||
|
||||
void init(@Observes @Priority(300) StartupEvent event) throws InterruptedException {
|
||||
BasicThreadFactory factory = new BasicThreadFactory.Builder()
|
||||
.namingPattern("invalidation-%d")
|
||||
.build();
|
||||
|
||||
_executor = Executors.newFixedThreadPool(threads, factory);
|
||||
|
||||
for (int i = 0; i < threads; i++) {
|
||||
_executor.submit(this::sender);
|
||||
}
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(10) ShutdownEvent event) throws InterruptedException {
|
||||
_shutdown = true;
|
||||
_executor.shutdownNow();
|
||||
if (!_executor.awaitTermination(30, TimeUnit.SECONDS)) {
|
||||
Log.error("Failed to shut down invalidation sender thread");
|
||||
}
|
||||
var data = _queue.close();
|
||||
Log.info("Will defer " + data.size() + " invalidations on shutdown");
|
||||
for (var e : data)
|
||||
deferredInvalidationQueueService.defer(e.getLeft(), e.getRight());
|
||||
}
|
||||
|
||||
private void sender() {
|
||||
while (!_shutdown) {
|
||||
try {
|
||||
try {
|
||||
if (!_queue.hasImmediate()) {
|
||||
ConcurrentHashSet<String> toAllQueue;
|
||||
|
||||
while (true) {
|
||||
toAllQueue = _toAllQueue.get();
|
||||
if (toAllQueue != null) {
|
||||
if (_toAllQueue.compareAndSet(toAllQueue, null))
|
||||
break;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (toAllQueue != null) {
|
||||
var hostInfo = remoteHostManager.getHostStateSnapshot();
|
||||
for (var o : toAllQueue) {
|
||||
for (var h : hostInfo.available())
|
||||
_queue.add(Pair.of(h, o));
|
||||
for (var u : hostInfo.unavailable())
|
||||
deferredInvalidationQueueService.defer(u, o);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var data = _queue.getAllWait(100, _queue.getDelay()); // TODO: config?
|
||||
if (data.isEmpty()) continue;
|
||||
String stats = "Sent invalidation: ";
|
||||
long success = 0;
|
||||
|
||||
for (var e : data) {
|
||||
if (!persistentPeerDataService.existsHost(e.getLeft())) continue;
|
||||
|
||||
if (!remoteHostManager.isReachable(e.getLeft())) {
|
||||
deferredInvalidationQueueService.defer(e.getLeft(), e.getRight());
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
jObjectManager.get(e.getRight()).ifPresent(obj -> {
|
||||
remoteObjectServiceClient.notifyUpdate(obj, e.getLeft());
|
||||
});
|
||||
success++;
|
||||
} catch (DeletedObjectAccessException ignored) {
|
||||
} catch (Exception ex) {
|
||||
Log.info("Failed to send invalidation to " + e.getLeft() + ", will retry", ex);
|
||||
pushInvalidationToOne(e.getLeft(), e.getRight());
|
||||
}
|
||||
if (_shutdown) {
|
||||
Log.info("Invalidation sender exiting");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
stats += success + "/" + data.size() + " ";
|
||||
Log.info(stats);
|
||||
} catch (InterruptedException ie) {
|
||||
throw ie;
|
||||
} catch (Exception e) {
|
||||
Log.error("Exception in invalidation sender thread: ", e);
|
||||
}
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
}
|
||||
Log.info("Invalidation sender exiting");
|
||||
}
|
||||
|
||||
public void pushInvalidationToAll(JObject<?> obj) {
|
||||
if (obj.getMeta().isOnlyLocal()) return;
|
||||
while (true) {
|
||||
var queue = _toAllQueue.get();
|
||||
if (queue == null) {
|
||||
var nq = new ConcurrentHashSet<String>();
|
||||
if (!_toAllQueue.compareAndSet(null, nq)) continue;
|
||||
queue = nq;
|
||||
}
|
||||
|
||||
queue.add(obj.getMeta().getName());
|
||||
|
||||
if (_toAllQueue.get() == queue) break;
|
||||
}
|
||||
}
|
||||
|
||||
public void pushInvalidationToOne(UUID host, JObject<?> obj) {
|
||||
if (obj.getMeta().isOnlyLocal()) return;
|
||||
if (remoteHostManager.isReachable(host))
|
||||
_queue.add(Pair.of(host, obj.getMeta().getName()));
|
||||
else
|
||||
deferredInvalidationQueueService.defer(host, obj.getMeta().getName());
|
||||
}
|
||||
|
||||
public void pushInvalidationToAll(String name) {
|
||||
pushInvalidationToAll(jObjectManager.get(name).orElseThrow(() -> new IllegalArgumentException("Object " + name + " not found")));
|
||||
}
|
||||
|
||||
public void pushInvalidationToOne(UUID host, String name) {
|
||||
pushInvalidationToOne(host, jObjectManager.get(name).orElseThrow(() -> new IllegalArgumentException("Object " + name + " not found")));
|
||||
}
|
||||
|
||||
protected void pushDeferredInvalidations(UUID host, String name) {
|
||||
_queue.add(Pair.of(host, name));
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository.opsupport;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
|
||||
import com.usatiuk.dhfs.objects.repository.OpPushPayload;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
@ProtoMirror(OpPushPayload.class)
|
||||
public interface Op {
|
||||
Collection<String> getEscapedRefs();
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository.opsupport;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
public interface OpObject {
|
||||
String getId();
|
||||
|
||||
boolean hasPendingOpsForHost(UUID host);
|
||||
|
||||
List<Op> getPendingOpsForHost(UUID host, int limit);
|
||||
|
||||
void commitOpForHost(UUID host, Op op);
|
||||
|
||||
void pushBootstrap(UUID host);
|
||||
|
||||
boolean acceptExternalOp(UUID from, Op op);
|
||||
|
||||
Op getPeriodicPushOp();
|
||||
|
||||
void addToTx();
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository.opsupport;
|
||||
|
||||
import com.usatiuk.dhfs.objects.repository.PeerManager;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
@ApplicationScoped
|
||||
public class OpObjectRegistry {
|
||||
private final ConcurrentHashMap<String, OpObject> _objects = new ConcurrentHashMap<>();
|
||||
@Inject
|
||||
OpSender opSender;
|
||||
@Inject
|
||||
PeerManager remoteHostManager;
|
||||
|
||||
public void registerObject(OpObject obj) {
|
||||
_objects.put(obj.getId(), obj);
|
||||
remoteHostManager.registerConnectEventListener(host -> {
|
||||
opSender.push(obj);
|
||||
});
|
||||
}
|
||||
|
||||
public void acceptExternalOps(String objId, UUID from, List<Op> ops) {
|
||||
var got = _objects.get(objId);
|
||||
if (got == null)
|
||||
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("Queue with id " + objId + " not registered"));
|
||||
got.addToTx();
|
||||
boolean push = false;
|
||||
for (Op op : ops)
|
||||
push |= got.acceptExternalOp(from, op);
|
||||
if (push)
|
||||
opSender.push(got);
|
||||
}
|
||||
|
||||
public void pushBootstrapData(UUID host) {
|
||||
for (var o : _objects.values()) {
|
||||
// FIXME: Split transactions for objects?
|
||||
o.addToTx();
|
||||
o.pushBootstrap(host);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,109 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository.opsupport;
|
||||
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectTxManager;
|
||||
import com.usatiuk.dhfs.objects.repository.PeerManager;
|
||||
import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient;
|
||||
import com.usatiuk.dhfs.utils.HashSetDelayedBlockingQueue;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.Startup;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.concurrent.BasicThreadFactory;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
@ApplicationScoped
|
||||
public class OpSender {
|
||||
private static final int _threads = 1;
|
||||
private final HashSetDelayedBlockingQueue<OpObject> _queue = new HashSetDelayedBlockingQueue<>(0); // FIXME:
|
||||
@Inject
|
||||
PeerManager remoteHostManager;
|
||||
@Inject
|
||||
RemoteObjectServiceClient remoteObjectServiceClient;
|
||||
@Inject
|
||||
JObjectTxManager jObjectTxManager;
|
||||
@ConfigProperty(name = "dhfs.objects.opsender.batch-size")
|
||||
int batchSize;
|
||||
private ExecutorService _executor;
|
||||
private volatile boolean _shutdown = false;
|
||||
|
||||
@Startup
|
||||
void init() {
|
||||
BasicThreadFactory factory = new BasicThreadFactory.Builder()
|
||||
.namingPattern("opsender-%d")
|
||||
.build();
|
||||
|
||||
_executor = Executors.newFixedThreadPool(_threads, factory);
|
||||
|
||||
for (int i = 0; i < _threads; i++) {
|
||||
_executor.submit(this::sender);
|
||||
}
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(10) ShutdownEvent event) throws InterruptedException {
|
||||
_shutdown = true;
|
||||
_executor.shutdownNow();
|
||||
if (!_executor.awaitTermination(30, TimeUnit.SECONDS)) {
|
||||
Log.error("Failed to shut down op sender thread");
|
||||
}
|
||||
}
|
||||
|
||||
private void sender() {
|
||||
while (!_shutdown) {
|
||||
try {
|
||||
var got = _queue.get();
|
||||
for (var h : remoteHostManager.getAvailableHosts()) {
|
||||
sendForHost(got, h);
|
||||
}
|
||||
} catch (InterruptedException ignored) {
|
||||
} catch (Throwable ex) {
|
||||
Log.error("In op sender: ", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void sendForHost(OpObject obj, UUID host) {
|
||||
// Must be peeked before getPendingOpForHost
|
||||
var periodicPushOp = obj.getPeriodicPushOp();
|
||||
|
||||
if (!obj.hasPendingOpsForHost(host)) {
|
||||
if (periodicPushOp == null) return;
|
||||
try {
|
||||
remoteObjectServiceClient.pushOps(List.of(periodicPushOp), obj.getId(), host);
|
||||
Log.debug("Sent periodic op update to " + host + "of" + obj.getId());
|
||||
} catch (Throwable e) {
|
||||
Log.warn("Error pushing periodic op for " + host + " of " + obj.getId(), e);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
while (obj.hasPendingOpsForHost(host)) {
|
||||
List<Op> collected = obj.getPendingOpsForHost(host, batchSize);
|
||||
try {
|
||||
// The peer should finish the call only if it had persisted everything
|
||||
remoteObjectServiceClient.pushOps(collected, obj.getId(), host);
|
||||
// If we crash here, it's ok, the peer will just skip these ops the next time we send them
|
||||
jObjectTxManager.executeTx(() -> {
|
||||
obj.addToTx();
|
||||
for (var op : collected)
|
||||
obj.commitOpForHost(host, op);
|
||||
});
|
||||
Log.info("Sent " + collected.size() + " op updates to " + host + "of" + obj.getId());
|
||||
} catch (Throwable e) {
|
||||
Log.warn("Error sending op to " + host, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void push(OpObject queue) {
|
||||
_queue.readd(queue);
|
||||
}
|
||||
}
|
||||
@@ -1,117 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository.peerdiscovery;
|
||||
|
||||
import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService;
|
||||
import io.quarkus.arc.properties.IfBuildProperty;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.Startup;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.inject.Inject;
|
||||
import org.eclipse.microprofile.config.inject.ConfigProperty;
|
||||
|
||||
import java.net.*;
|
||||
|
||||
@ApplicationScoped
|
||||
@IfBuildProperty(name = "dhfs.local-discovery", stringValue = "true")
|
||||
public class LocalPeerDiscoveryBroadcaster {
|
||||
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
|
||||
@ConfigProperty(name = "quarkus.http.port")
|
||||
Integer ourPort;
|
||||
|
||||
@ConfigProperty(name = "quarkus.http.ssl-port")
|
||||
Integer ourSecurePort;
|
||||
|
||||
@ConfigProperty(name = "dhfs.objects.peerdiscovery.port")
|
||||
Integer broadcastPort;
|
||||
|
||||
@ConfigProperty(name = "dhfs.objects.peerdiscovery.interval")
|
||||
Integer broadcastInterval;
|
||||
|
||||
private Thread _broadcasterThread;
|
||||
|
||||
private DatagramSocket _socket;
|
||||
|
||||
@Startup
|
||||
void init() throws SocketException {
|
||||
_socket = new DatagramSocket();
|
||||
_socket.setBroadcast(true);
|
||||
|
||||
_broadcasterThread = new Thread(this::broadcast);
|
||||
_broadcasterThread.setName("LocalPeerDiscoveryBroadcaster");
|
||||
_broadcasterThread.start();
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(10) ShutdownEvent event) {
|
||||
_socket.close();
|
||||
_broadcasterThread.interrupt();
|
||||
while (_broadcasterThread.isAlive()) {
|
||||
try {
|
||||
_broadcasterThread.join();
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void broadcast() {
|
||||
try {
|
||||
while (!Thread.interrupted() && !_socket.isClosed()) {
|
||||
Thread.sleep(broadcastInterval);
|
||||
|
||||
try {
|
||||
var sendData = PeerDiscoveryInfo.newBuilder()
|
||||
.setUuid(persistentPeerDataService.getSelfUuid().toString())
|
||||
.setPort(ourPort)
|
||||
.setSecurePort(ourSecurePort)
|
||||
.build();
|
||||
|
||||
var sendBytes = sendData.toByteArray();
|
||||
|
||||
DatagramPacket sendPacket
|
||||
= new DatagramPacket(sendBytes, sendBytes.length,
|
||||
InetAddress.getByName("255.255.255.255"), broadcastPort);
|
||||
|
||||
_socket.send(sendPacket);
|
||||
|
||||
var interfaces = NetworkInterface.getNetworkInterfaces();
|
||||
while (interfaces.hasMoreElements()) {
|
||||
NetworkInterface networkInterface = interfaces.nextElement();
|
||||
|
||||
try {
|
||||
if (networkInterface.isLoopback() || !networkInterface.isUp()) {
|
||||
continue;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (InterfaceAddress interfaceAddress : networkInterface.getInterfaceAddresses()) {
|
||||
InetAddress broadcast = interfaceAddress.getBroadcast();
|
||||
if (broadcast == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
sendPacket = new DatagramPacket(sendBytes, sendBytes.length, broadcast, broadcastPort);
|
||||
_socket.send(sendPacket);
|
||||
} catch (Exception ignored) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Log.trace(getClass().getName() + "Broadcast sent to: " + broadcast.getHostAddress()
|
||||
// + ", at: " + networkInterface.getDisplayName());
|
||||
}
|
||||
}
|
||||
|
||||
} catch (Exception ignored) {
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
Log.info("PeerDiscoveryServer stopped");
|
||||
}
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository.peerdiscovery;
|
||||
|
||||
import com.google.protobuf.InvalidProtocolBufferException;
|
||||
import com.usatiuk.dhfs.objects.repository.PeerManager;
|
||||
import io.quarkus.arc.properties.IfBuildProperty;
|
||||
import io.quarkus.logging.Log;
|
||||
import io.quarkus.runtime.ShutdownEvent;
|
||||
import io.quarkus.runtime.Startup;
|
||||
import jakarta.annotation.Priority;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.enterprise.event.Observes;
|
||||
import jakarta.inject.Inject;
|
||||
|
||||
import java.net.*;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.UUID;
|
||||
|
||||
@ApplicationScoped
|
||||
@IfBuildProperty(name = "dhfs.local-discovery", stringValue = "true")
|
||||
public class LocalPeerDiscoveryClient {
|
||||
|
||||
@Inject
|
||||
PeerManager remoteHostManager;
|
||||
|
||||
private Thread _clientThread;
|
||||
|
||||
private DatagramSocket _socket;
|
||||
|
||||
@Startup
|
||||
void init() throws SocketException, UnknownHostException {
|
||||
_socket = new DatagramSocket(42069, InetAddress.getByName("0.0.0.0"));
|
||||
_socket.setBroadcast(true);
|
||||
|
||||
_clientThread = new Thread(this::client);
|
||||
_clientThread.setName("LocalPeerDiscoveryClient");
|
||||
_clientThread.start();
|
||||
}
|
||||
|
||||
void shutdown(@Observes @Priority(10) ShutdownEvent event) throws InterruptedException {
|
||||
_socket.close();
|
||||
_clientThread.interrupt();
|
||||
_clientThread.interrupt();
|
||||
while (_clientThread.isAlive()) {
|
||||
try {
|
||||
_clientThread.join();
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void client() {
|
||||
while (!Thread.interrupted() && !_socket.isClosed()) {
|
||||
try {
|
||||
byte[] buf = new byte[10000];
|
||||
DatagramPacket packet = new DatagramPacket(buf, buf.length);
|
||||
_socket.receive(packet);
|
||||
|
||||
try {
|
||||
var got = PeerDiscoveryInfo.parseFrom(ByteBuffer.wrap(buf, 0, packet.getLength()));
|
||||
|
||||
remoteHostManager.notifyAddr(UUID.fromString(got.getUuid()), packet.getAddress().getHostAddress(), got.getPort(), got.getSecurePort());
|
||||
|
||||
} catch (InvalidProtocolBufferException e) {
|
||||
continue;
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
Log.error(ex);
|
||||
}
|
||||
}
|
||||
Log.info("PeerDiscoveryClient stopped");
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository.peersync;
|
||||
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectData;
|
||||
import com.usatiuk.dhfs.objects.jrepository.PushResolution;
|
||||
import com.usatiuk.dhfs.objects.repository.ConflictResolver;
|
||||
import lombok.Getter;
|
||||
|
||||
import java.io.Serial;
|
||||
import java.util.Collection;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
|
||||
@PushResolution
|
||||
public class PeerDirectory extends JObjectData {
|
||||
public static final String PeerDirectoryObjName = "peer_directory";
|
||||
@Serial
|
||||
private static final long serialVersionUID = 1;
|
||||
@Getter
|
||||
private final Set<UUID> _peers = new LinkedHashSet<>();
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return PeerDirectoryObjName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends ConflictResolver> getConflictResolver() {
|
||||
return PeerDirectoryConflictResolver.class;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends JObjectData> getRefType() {
|
||||
return PersistentPeerInfo.class;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> extractRefs() {
|
||||
return _peers.stream().map(PersistentPeerInfo::getNameFromUuid).toList();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int estimateSize() {
|
||||
return _peers.size() * 32;
|
||||
}
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository.peersync;
|
||||
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObject;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectData;
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectManager;
|
||||
import com.usatiuk.dhfs.objects.repository.ConflictResolver;
|
||||
import com.usatiuk.dhfs.objects.repository.ObjectHeader;
|
||||
import com.usatiuk.dhfs.objects.repository.PersistentPeerDataService;
|
||||
import com.usatiuk.dhfs.objects.repository.RemoteObjectServiceClient;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import io.quarkus.logging.Log;
|
||||
import jakarta.enterprise.context.ApplicationScoped;
|
||||
import jakarta.inject.Inject;
|
||||
import org.apache.commons.lang3.NotImplementedException;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
@ApplicationScoped
|
||||
public class PeerDirectoryConflictResolver implements ConflictResolver {
|
||||
@Inject
|
||||
PersistentPeerDataService persistentPeerDataService;
|
||||
|
||||
@Inject
|
||||
RemoteObjectServiceClient remoteObjectServiceClient;
|
||||
|
||||
@Inject
|
||||
JObjectManager jObjectManager;
|
||||
|
||||
@Override
|
||||
public void resolve(UUID conflictHost, ObjectHeader theirsHeader, JObjectData theirsData, JObject<?> ours) {
|
||||
var theirsDir = (PeerDirectory) theirsData;
|
||||
if (!theirsDir.getClass().equals(PeerDirectory.class)) {
|
||||
Log.error("Object type mismatch!");
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
ours.runWriteLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, oursDirU, bump, invalidate) -> {
|
||||
if (oursDirU == null)
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict but we don't have local copy"));
|
||||
if (!(oursDirU instanceof PeerDirectory oursPD))
|
||||
throw new NotImplementedException("Type conflict for " + ours.getMeta().getName() + ", directory was expected");
|
||||
|
||||
LinkedHashSet<UUID> mergedChildren = new LinkedHashSet<>(oursPD.getPeers());
|
||||
mergedChildren.addAll(theirsDir.getPeers());
|
||||
Map<UUID, Long> newChangelog = new LinkedHashMap<>(m.getChangelog());
|
||||
|
||||
for (var entry : theirsHeader.getChangelog().getEntriesList()) {
|
||||
newChangelog.merge(UUID.fromString(entry.getHost()), entry.getVersion(), Long::max);
|
||||
}
|
||||
|
||||
boolean wasChanged = oursPD.getPeers().size() != mergedChildren.size();
|
||||
|
||||
if (m.getBestVersion() > newChangelog.values().stream().reduce(0L, Long::sum))
|
||||
throw new StatusRuntimeException(Status.ABORTED.withDescription("Race when conflict resolving"));
|
||||
|
||||
if (wasChanged) {
|
||||
newChangelog.merge(persistentPeerDataService.getSelfUuid(), 1L, Long::sum);
|
||||
|
||||
for (var child : mergedChildren) {
|
||||
if (!oursPD.getPeers().contains(child)) {
|
||||
jObjectManager.getOrPut(PersistentPeerInfo.getNameFromUuid(child), PersistentPeerInfo.class, Optional.of(oursPD.getName()));
|
||||
}
|
||||
}
|
||||
|
||||
oursPD.getPeers().addAll(mergedChildren);
|
||||
}
|
||||
|
||||
m.setChangelog(newChangelog);
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository.peersync;
|
||||
|
||||
import com.usatiuk.dhfs.objects.jrepository.JObjectData;
|
||||
import com.usatiuk.dhfs.objects.jrepository.OnlyLocal;
|
||||
import lombok.Getter;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.UUID;
|
||||
|
||||
@OnlyLocal
|
||||
public class PeerDirectoryLocal extends JObjectData {
|
||||
public static final String PeerDirectoryLocalObjName = "peer_directory_local";
|
||||
@Getter
|
||||
private final HashSet<UUID> _initialOpSyncDone = new HashSet<>();
|
||||
@Getter
|
||||
private final HashSet<UUID> _initialObjSyncDone = new HashSet<>();
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return PeerDirectoryLocalObjName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int estimateSize() {
|
||||
return 1024; //FIXME:
|
||||
}
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository.peersync;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.objects.persistence.PeerDirectoryLocalP;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.UUID;
|
||||
|
||||
@Singleton
|
||||
public class PeerDirectoryLocalSerializer implements ProtoSerializer<PeerDirectoryLocalP, PeerDirectoryLocal> {
|
||||
@Override
|
||||
public PeerDirectoryLocal deserialize(PeerDirectoryLocalP message) {
|
||||
var ret = new PeerDirectoryLocal();
|
||||
ret.getInitialOpSyncDone().addAll(message.getInitialOpSyncDonePeersList().stream().map(UUID::fromString).toList());
|
||||
ret.getInitialObjSyncDone().addAll(message.getInitialObjSyncDonePeersList().stream().map(UUID::fromString).toList());
|
||||
return ret;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PeerDirectoryLocalP serialize(PeerDirectoryLocal object) {
|
||||
return PeerDirectoryLocalP.newBuilder()
|
||||
.addAllInitialObjSyncDonePeers(() -> object.getInitialObjSyncDone().stream().map(Objects::toString).iterator())
|
||||
.addAllInitialOpSyncDonePeers(() -> object.getInitialOpSyncDone().stream().map(Objects::toString).iterator()).build();
|
||||
}
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository.peersync;
|
||||
|
||||
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
|
||||
import com.usatiuk.dhfs.objects.persistence.PeerDirectoryP;
|
||||
import jakarta.inject.Singleton;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.UUID;
|
||||
|
||||
@Singleton
|
||||
public class PeerDirectorySerializer implements ProtoSerializer<PeerDirectoryP, PeerDirectory> {
|
||||
@Override
|
||||
public PeerDirectory deserialize(PeerDirectoryP message) {
|
||||
var ret = new PeerDirectory();
|
||||
message.getPeersList().stream().map(UUID::fromString).forEach(ret.getPeers()::add);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PeerDirectoryP serialize(PeerDirectory object) {
|
||||
return PeerDirectoryP.newBuilder().addAllPeers(() -> object.getPeers().stream().map(Objects::toString).iterator()).build();
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
package com.usatiuk.dhfs.objects.repository.peersync;
|
||||
|
||||
public record PeerInfo(String selfUuid, String cert) {
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user