revert crapfs

This commit is contained in:
2024-12-18 13:30:32 +01:00
parent e5949b7507
commit 14ba4b8e2e
14 changed files with 0 additions and 1234 deletions

View File

@@ -1,208 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>crapfs</artifactId>
<version>1.0.0-SNAPSHOT</version>
<parent>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<dependencies>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-deployment</artifactId>
<version>1.0-SNAPSHOT</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcprov-jdk18on</artifactId>
<version>1.78.1</version>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcpkix-jdk18on</artifactId>
<version>1.78.1</version>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-security</artifactId>
</dependency>
<dependency>
<groupId>net.openhft</groupId>
<artifactId>zero-allocation-hashing</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-arc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-client</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-client-jsonb</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-jsonb</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-scheduler</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.github.SerCeMan</groupId>
<artifactId>jnr-fuse</artifactId>
<version>44ed40f8ce</version>
</dependency>
<dependency>
<groupId>com.github.jnr</groupId>
<artifactId>jnr-ffi</artifactId>
<version>2.2.16</version>
</dependency>
<dependency>
<groupId>com.github.jnr</groupId>
<artifactId>jnr-posix</artifactId>
<version>3.1.19</version>
</dependency>
<dependency>
<groupId>com.github.jnr</groupId>
<artifactId>jnr-constants</artifactId>
<version>0.10.4</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
</dependency>
<dependency>
<groupId>org.jboss.slf4j</groupId>
<artifactId>slf4j-jboss-logmanager</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math3</artifactId>
<version>3.6.1</version>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>kleppmanntree</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>supportlib</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>objects</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>utils</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<forkCount>1C</forkCount>
<reuseForks>false</reuseForks>
<parallel>classes</parallel>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<configuration>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
true
</junit.jupiter.execution.parallel.enabled>
<junit.jupiter.execution.parallel.mode.default>
concurrent
</junit.jupiter.execution.parallel.mode.default>
<junit.jupiter.execution.parallel.config.dynamic.factor>
0.5
</junit.jupiter.execution.parallel.config.dynamic.factor>
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>${quarkus.platform.group-id}</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.platform.version}</version>
<extensions>true</extensions>
<executions>
<execution>
<id>quarkus-plugin</id>
<goals>
<goal>build</goal>
<goal>generate-code</goal>
<goal>generate-code-tests</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@@ -1 +0,0 @@
lombok.accessors.prefix += _

View File

@@ -1,97 +0,0 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
#
# Before building the container image run:
#
# ./mvnw package
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/crapfs-jvm .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/crapfs-jvm
#
# If you want to include the debug port into your docker image
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
# when running the container
#
# Then run the container using :
#
# docker run -i --rm -p 8080:8080 quarkus/crapfs-jvm
#
# This image uses the `run-java.sh` script to run the application.
# This scripts computes the command line to execute your Java application, and
# includes memory/GC tuning.
# You can configure the behavior using the following environment properties:
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
# in JAVA_OPTS (example: "-Dsome.property=foo")
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
# used to calculate a default maximal heap memory based on a containers restriction.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
# of the container available memory as set here. The default is `50` which means 50%
# of the available memory is used as an upper boundary. You can skip this mechanism by
# setting this value to `0` in which case no `-Xmx` option is added.
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
# is used to calculate a default initial heap memory based on the maximum heap memory.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
# is used as the initial heap size. You can skip this mechanism by setting this value
# to `0` in which case no `-Xms` option is added (example: "25")
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
# This is used to calculate the maximum value of the initial heap memory. If used in
# a container without any memory constraints for the container then this option has
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
# here. The default is 4096MB which means the calculated value of `-Xms` never will
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
# when things are happening. This option, if set to true, will set
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
# true").
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
# (example: "20")
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
# (example: "40")
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
# (example: "4")
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
# previous GC times. (example: "90")
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
# contain the necessary JRE command-line options to specify the required GC, which
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
# accessed directly. (example: "foo.example.com,bar.example.com")
#
###
FROM registry.access.redhat.com/ubi8/openjdk-21:1.20
ENV LANGUAGE='en_US:en'
# We make four distinct layers so if there are application changes the library layers can be re-used
COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/
COPY --chown=185 target/quarkus-app/*.jar /deployments/
COPY --chown=185 target/quarkus-app/app/ /deployments/app/
COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/
EXPOSE 8080
USER 185
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]

View File

@@ -1,93 +0,0 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
#
# Before building the container image run:
#
# ./mvnw package -Dquarkus.package.jar.type=legacy-jar
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/crapfs-legacy-jar .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/crapfs-legacy-jar
#
# If you want to include the debug port into your docker image
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
# when running the container
#
# Then run the container using :
#
# docker run -i --rm -p 8080:8080 quarkus/crapfs-legacy-jar
#
# This image uses the `run-java.sh` script to run the application.
# This scripts computes the command line to execute your Java application, and
# includes memory/GC tuning.
# You can configure the behavior using the following environment properties:
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
# in JAVA_OPTS (example: "-Dsome.property=foo")
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
# used to calculate a default maximal heap memory based on a containers restriction.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
# of the container available memory as set here. The default is `50` which means 50%
# of the available memory is used as an upper boundary. You can skip this mechanism by
# setting this value to `0` in which case no `-Xmx` option is added.
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
# is used to calculate a default initial heap memory based on the maximum heap memory.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
# is used as the initial heap size. You can skip this mechanism by setting this value
# to `0` in which case no `-Xms` option is added (example: "25")
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
# This is used to calculate the maximum value of the initial heap memory. If used in
# a container without any memory constraints for the container then this option has
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
# here. The default is 4096MB which means the calculated value of `-Xms` never will
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
# when things are happening. This option, if set to true, will set
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
# true").
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
# (example: "20")
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
# (example: "40")
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
# (example: "4")
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
# previous GC times. (example: "90")
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
# contain the necessary JRE command-line options to specify the required GC, which
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
# accessed directly. (example: "foo.example.com,bar.example.com")
#
###
FROM registry.access.redhat.com/ubi8/openjdk-21:1.20
ENV LANGUAGE='en_US:en'
COPY target/lib/* /deployments/lib/
COPY target/*-runner.jar /deployments/quarkus-run.jar
EXPOSE 8080
USER 185
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]

View File

@@ -1,27 +0,0 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
#
# Before building the container image run:
#
# ./mvnw package -Dnative
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.native -t quarkus/crapfs .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/crapfs
#
###
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.10
WORKDIR /work/
RUN chown 1001 /work \
&& chmod "g+rwX" /work \
&& chown 1001:root /work
COPY --chown=1001:root target/*-runner /work/application
EXPOSE 8080
USER 1001
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]

View File

@@ -1,30 +0,0 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
# It uses a micro base image, tuned for Quarkus native executables.
# It reduces the size of the resulting container image.
# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image.
#
# Before building the container image run:
#
# ./mvnw package -Dnative
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/crapfs .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/crapfs
#
###
FROM quay.io/quarkus/quarkus-micro-image:2.0
WORKDIR /work/
RUN chown 1001 /work \
&& chmod "g+rwX" /work \
&& chown 1001:root /work
COPY --chown=1001:root target/*-runner /work/application
EXPOSE 8080
USER 1001
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]

View File

@@ -1,21 +0,0 @@
package org.acme;
import io.quarkus.runtime.Quarkus;
import io.quarkus.runtime.QuarkusApplication;
import io.quarkus.runtime.annotations.QuarkusMain;
@QuarkusMain
public class Main {
public static void main(String... args) {
Quarkus.run(CrapfsServerApp.class, args);
}
public static class CrapfsServerApp implements QuarkusApplication {
@Override
public int run(String... args) throws Exception {
Quarkus.waitForExit();
return 0;
}
}
}

View File

@@ -1,9 +0,0 @@
package org.acme.files.objects;
import com.usatiuk.objects.common.runtime.JData;
public interface ChunkData extends JData {
byte[] getData();
void setData(byte[] data);
}

View File

@@ -1,16 +0,0 @@
package org.acme.files.objects;
import com.usatiuk.objects.common.runtime.JData;
import com.usatiuk.objects.common.runtime.JObjectKey;
import java.util.NavigableMap;
public interface File extends JData {
NavigableMap<Long, JObjectKey> getChunks();
void setChunks(NavigableMap<Long, JObjectKey> chunk);
long getSize();
void setSize(long size);
}

View File

@@ -1,444 +0,0 @@
package org.acme.files.service;
import com.google.protobuf.ByteString;
import com.google.protobuf.UnsafeByteOperations;
import com.usatiuk.dhfs.objects.TransactionManager;
import com.usatiuk.dhfs.objects.transaction.Transaction;
import com.usatiuk.objects.alloc.runtime.ObjectAllocator;
import com.usatiuk.objects.common.runtime.JObjectKey;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.quarkus.logging.Log;
import io.quarkus.runtime.StartupEvent;
import jakarta.annotation.Priority;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import jakarta.inject.Inject;
import org.acme.files.objects.ChunkData;
import org.acme.files.objects.File;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import java.util.*;
@ApplicationScoped
public class DhfsFileService {
@ConfigProperty(name = "dhfs.files.target_chunk_size")
int targetChunkSize;
@ConfigProperty(name = "dhfs.files.write_merge_threshold")
float writeMergeThreshold;
@ConfigProperty(name = "dhfs.files.write_merge_max_chunk_to_take")
float writeMergeMaxChunkToTake;
@ConfigProperty(name = "dhfs.files.write_merge_limit")
float writeMergeLimit;
@ConfigProperty(name = "dhfs.files.write_last_chunk_limit")
float writeLastChunkLimit;
@ConfigProperty(name = "dhfs.objects.write_log")
boolean writeLogging;
@Inject
Transaction curTx;
@Inject
TransactionManager txm;
@Inject
ObjectAllocator alloc;
long chunkCounter = 0;
void init(@Observes @Priority(500) StartupEvent event) {
Log.info("Initializing file service");
}
public Optional<String> open(String path) {
return txm.run(() -> {
if (curTx.getObject(File.class, new JObjectKey(path)).orElse(null) != null) {
return Optional.of(path);
}
return Optional.empty();
});
}
public Optional<String> create(String path) {
if (path.contains("/")) {
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Path should not contain slashes"));
}
return txm.run(() -> {
var file = alloc.create(File.class, new JObjectKey(path));
file.setChunks(new TreeMap<>());
curTx.putObject(file);
return Optional.of(path);
});
}
private JObjectKey createChunk(ByteString bytes) {
var cd = alloc.create(ChunkData.class, new JObjectKey("chunk-" + chunkCounter++));
cd.setData(bytes.toByteArray());
curTx.putObject(cd);
return cd.getKey();
}
private ByteString readChunk(JObjectKey uuid) {
var chunk = curTx.getObject(ChunkData.class, uuid);
if (chunk.isEmpty()) {
Log.error("Chunk not found when trying to read: " + uuid);
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("Chunk not found"));
}
return UnsafeByteOperations.unsafeWrap(chunk.get().getData());
}
private static final List<String> fileNames = List.of("file1", "file2");
public List<String> readdir(String path) {
if (!path.equals("")) {
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Only root directory is supported"));
}
return txm.run(() -> {
var ret = new ArrayList<String>();
for (String fileName : fileNames) {
var got = curTx.getObject(File.class, new JObjectKey(fileName));
if (got.isPresent()) {
ret.add(fileName);
}
}
return ret;
});
}
public Optional<ByteString> read(String fileUuid, long offset, int length) {
if (length < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
if (offset < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
return txm.run(() -> {
var file = curTx.getObject(File.class, new JObjectKey(fileUuid)).orElse(null);
if (file == null) {
Log.error("File not found when trying to read: " + fileUuid);
return Optional.empty();
}
try {
var chunksAll = new TreeMap<>(file.getChunks());
if (chunksAll.isEmpty()) {
return Optional.of(ByteString.empty());
}
var chunksList = chunksAll.tailMap(chunksAll.floorKey(offset)).entrySet();
if (chunksList.isEmpty()) {
return Optional.of(ByteString.empty());
}
var chunks = chunksList.iterator();
ByteString buf = ByteString.empty();
long curPos = offset;
var chunk = chunks.next();
while (curPos < offset + length) {
var chunkPos = chunk.getKey();
long offInChunk = curPos - chunkPos;
long toReadInChunk = (offset + length) - curPos;
var chunkBytes = readChunk(chunk.getValue());
long readableLen = chunkBytes.size() - offInChunk;
var toReadReally = Math.min(readableLen, toReadInChunk);
if (toReadReally < 0) break;
buf = buf.concat(chunkBytes.substring((int) offInChunk, (int) (offInChunk + toReadReally)));
curPos += toReadReally;
if (readableLen > toReadInChunk)
break;
if (!chunks.hasNext()) break;
chunk = chunks.next();
}
// FIXME:
return Optional.of(buf);
} catch (Exception e) {
Log.error("Error reading file: " + fileUuid, e);
return Optional.empty();
}
});
}
public Long write(String fileUuid, long offset, ByteString data) {
if (offset < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
return txm.run(() -> {
// FIXME:
var file = curTx.getObject(File.class, new JObjectKey(fileUuid)).orElse(null);
if (file == null) {
Log.error("File not found when trying to read: " + fileUuid);
return -1L;
}
if (size(fileUuid) < offset)
truncate(fileUuid, offset);
// Get chunk ids from the database
var chunksAll = file.getChunks();
var first = chunksAll.floorEntry(offset);
var last = chunksAll.lowerEntry(offset + data.size());
NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
long start = 0;
NavigableMap<Long, JObjectKey> beforeFirst = first != null ? chunksAll.headMap(first.getKey(), false) : Collections.emptyNavigableMap();
NavigableMap<Long, JObjectKey> afterLast = last != null ? chunksAll.tailMap(last.getKey(), false) : Collections.emptyNavigableMap();
if (first != null && readChunk(first.getValue()).size() + first.getKey() <= offset) {
beforeFirst = chunksAll;
afterLast = Collections.emptyNavigableMap();
first = null;
last = null;
start = offset;
} else if (!chunksAll.isEmpty()) {
var between = chunksAll.subMap(first.getKey(), true, last.getKey(), true);
removedChunks.putAll(between);
start = first.getKey();
}
ByteString pendingWrites = ByteString.empty();
if (first != null && first.getKey() < offset) {
var chunkBytes = readChunk(first.getValue());
pendingWrites = pendingWrites.concat(chunkBytes.substring(0, (int) (offset - first.getKey())));
}
pendingWrites = pendingWrites.concat(data);
if (last != null) {
var lchunkBytes = readChunk(last.getValue());
if (last.getKey() + lchunkBytes.size() > offset + data.size()) {
var startInFile = offset + data.size();
var startInChunk = startInFile - last.getKey();
pendingWrites = pendingWrites.concat(lchunkBytes.substring((int) startInChunk, lchunkBytes.size()));
}
}
int combinedSize = pendingWrites.size();
if (targetChunkSize > 0) {
if (combinedSize < (targetChunkSize * writeMergeThreshold)) {
boolean leftDone = false;
boolean rightDone = false;
while (!leftDone && !rightDone) {
if (beforeFirst.isEmpty()) leftDone = true;
if (!beforeFirst.isEmpty() || !leftDone) {
var takeLeft = beforeFirst.lastEntry();
var cuuid = takeLeft.getValue();
if (readChunk(cuuid).size() >= (targetChunkSize * writeMergeMaxChunkToTake)) {
leftDone = true;
continue;
}
if ((combinedSize + readChunk(cuuid).size()) > (targetChunkSize * writeMergeLimit)) {
leftDone = true;
continue;
}
// FIXME: (and test this)
beforeFirst = beforeFirst.headMap(takeLeft.getKey(), false);
start = takeLeft.getKey();
pendingWrites = readChunk(cuuid).concat(pendingWrites);
combinedSize += readChunk(cuuid).size();
removedChunks.put(takeLeft.getKey(), takeLeft.getValue());
}
if (afterLast.isEmpty()) rightDone = true;
if (!afterLast.isEmpty() && !rightDone) {
var takeRight = afterLast.firstEntry();
var cuuid = takeRight.getValue();
if (readChunk(cuuid).size() >= (targetChunkSize * writeMergeMaxChunkToTake)) {
rightDone = true;
continue;
}
if ((combinedSize + readChunk(cuuid).size()) > (targetChunkSize * writeMergeLimit)) {
rightDone = true;
continue;
}
// FIXME: (and test this)
afterLast = afterLast.tailMap(takeRight.getKey(), false);
pendingWrites = pendingWrites.concat(readChunk(cuuid));
combinedSize += readChunk(cuuid).size();
removedChunks.put(takeRight.getKey(), takeRight.getValue());
}
}
}
}
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
{
int cur = 0;
while (cur < combinedSize) {
int end;
if (targetChunkSize <= 0)
end = combinedSize;
else {
if ((combinedSize - cur) > (targetChunkSize * writeLastChunkLimit)) {
end = Math.min(cur + targetChunkSize, combinedSize);
} else {
end = combinedSize;
}
}
var thisChunk = pendingWrites.substring(cur, end);
newChunks.put(start, createChunk(thisChunk));
start += thisChunk.size();
cur = end;
}
}
var newChunksMap = new TreeMap<>(chunksAll);
for (var e : removedChunks.entrySet()) {
newChunksMap.remove(e.getKey());
// em.remove(em.getReference(ChunkData.class, e.getValue()));
}
newChunksMap.putAll(newChunks);
file.setChunks(newChunksMap);
updateFileSize(file);
return (long) data.size();
});
}
public Boolean truncate(String fileUuid, long length) {
if (length < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
return txm.run(() -> {
var file = curTx.getObject(File.class, new JObjectKey(fileUuid)).orElse(null);
if (file == null) {
Log.error("File not found when trying to read: " + fileUuid);
return false;
}
if (length == 0) {
file.setChunks(new TreeMap<>());
updateFileSize(file);
return true;
}
var curSize = size(fileUuid);
if (curSize == length) return true;
var chunksAll = file.getChunks();
NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
if (curSize < length) {
long combinedSize = (length - curSize);
long start = curSize;
// Hack
HashMap<Long, ByteString> zeroCache = new HashMap<>();
{
long cur = 0;
while (cur < combinedSize) {
long end;
if (targetChunkSize <= 0)
end = combinedSize;
else {
if ((combinedSize - cur) > (targetChunkSize * 1.5)) {
end = cur + targetChunkSize;
} else {
end = combinedSize;
}
}
if (!zeroCache.containsKey(end - cur))
zeroCache.put(end - cur, UnsafeByteOperations.unsafeWrap(new byte[Math.toIntExact(end - cur)]));
newChunks.put(start, createChunk(zeroCache.get(end - cur)));
start += (end - cur);
cur = end;
}
}
} else {
var tail = chunksAll.lowerEntry(length);
var afterTail = chunksAll.tailMap(tail.getKey(), false);
removedChunks.put(tail.getKey(), tail.getValue());
removedChunks.putAll(afterTail);
var tailBytes = readChunk(tail.getValue());
var newChunk = tailBytes.substring(0, (int) (length - tail.getKey()));
newChunks.put(tail.getKey(), createChunk(newChunk));
}
var newChunkMap = new TreeMap<>(chunksAll);
for (var e : removedChunks.entrySet()) {
newChunkMap.remove(e.getKey());
// em.remove(em.getReference(ChunkData.class, e.getValue()));
}
newChunkMap.putAll(newChunks);
file.setChunks(newChunkMap);
updateFileSize(file);
return true;
});
}
public void updateFileSize(File file) {
long realSize = 0;
var last = file.getChunks().lastEntry();
if (last != null) {
var lastSize = readChunk(last.getValue()).size();
realSize = last.getKey() + lastSize;
}
if (realSize != file.getSize()) {
file.setSize(realSize);
}
}
public Long size(String uuid) {
return txm.run(() -> {
var file = curTx.getObject(File.class, new JObjectKey(uuid)).orElse(null);
if (file == null) {
Log.error("File not found when trying to read: " + uuid);
return -1L;
}
return file.getSize();
});
}
}

View File

@@ -1,260 +0,0 @@
package org.acme.fuse;
import com.google.protobuf.UnsafeByteOperations;
import com.sun.security.auth.module.UnixSystem;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.quarkus.logging.Log;
import io.quarkus.runtime.ShutdownEvent;
import io.quarkus.runtime.StartupEvent;
import jakarta.annotation.Priority;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import jakarta.inject.Inject;
import jnr.ffi.Pointer;
import org.acme.files.service.DhfsFileService;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import ru.serce.jnrfuse.ErrorCodes;
import ru.serce.jnrfuse.FuseFillDir;
import ru.serce.jnrfuse.FuseStubFS;
import ru.serce.jnrfuse.struct.FileStat;
import ru.serce.jnrfuse.struct.FuseFileInfo;
import ru.serce.jnrfuse.struct.Statvfs;
import java.nio.file.Paths;
import java.util.ArrayList;
import static jnr.posix.FileStat.S_IFREG;
@ApplicationScoped
public class DhfsFuse extends FuseStubFS {
private static final int blksize = 1048576;
private static final int iosize = 1048576;
@ConfigProperty(name = "dhfs.fuse.root")
String root;
@ConfigProperty(name = "dhfs.files.target_chunk_size")
int targetChunkSize;
@Inject
DhfsFileService fileService;
ClassLoader classLoader;
void init(@Observes @Priority(100000) StartupEvent event) {
classLoader = Thread.currentThread().getContextClassLoader();
Paths.get(root).toFile().mkdirs();
Log.info("Mounting with root " + root);
var uid = new UnixSystem().getUid();
var gid = new UnixSystem().getGid();
var opts = new ArrayList<String>();
// Assuming macFuse
// if (SystemUtils.IS_OS_MAC) {
opts.add("-o");
opts.add("iosize=" + iosize);
// } else if (SystemUtils.IS_OS_LINUX) {
// // FIXME: There's something else missing: the writes still seem to be 32k max
//// opts.add("-o");
//// opts.add("large_read");
// opts.add("-o");
// opts.add("big_writes");
// opts.add("-o");
// opts.add("max_read=" + iosize);
// opts.add("-o");
// opts.add("max_write=" + iosize);
// }
opts.add("-o");
opts.add("auto_cache");
opts.add("-o");
opts.add("uid=" + uid);
opts.add("-o");
opts.add("gid=" + gid);
mount(Paths.get(root), false, false, opts.toArray(String[]::new));
}
void shutdown(@Observes @Priority(1) ShutdownEvent event) {
Log.info("Unmounting");
umount();
Log.info("Unmounted");
}
@Override
public int statfs(String path, Statvfs stbuf) {
Log.info("statfs " + path);
try {
stbuf.f_frsize.set(blksize);
stbuf.f_bsize.set(blksize);
stbuf.f_blocks.set(1024 * 1024); // total data blocks in file system
stbuf.f_bfree.set(1024 * 1024); // free blocks in fs
stbuf.f_bavail.set(1024 * 1024); // avail blocks in fs
stbuf.f_files.set(1000); //FIXME:
stbuf.f_ffree.set(Integer.MAX_VALUE - 2000); //FIXME:
stbuf.f_favail.set(Integer.MAX_VALUE - 2000); //FIXME:
stbuf.f_namemax.set(2048);
return super.statfs(path, stbuf);
} catch (Exception e) {
Log.error("When statfs " + path, e);
return -ErrorCodes.EIO();
}
}
@Override
public int getattr(String path, FileStat stat) {
Thread.currentThread().setContextClassLoader(classLoader);
Log.info("getattr " + path);
if (path.equals("/")) {
stat.st_mode.set(FileStat.S_IFDIR | 0777);
stat.st_nlink.set(2);
return 0;
}
try {
var fileOpt = fileService.open(path.substring(1));
if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
var uuid = fileOpt.get();
stat.st_mode.set(S_IFREG | 0755);
stat.st_nlink.set(1);
stat.st_size.set(fileService.size(uuid));
// FIXME: Race?
// stat.st_ctim.tv_sec.set(found.get().ctime() / 1000);
// stat.st_ctim.tv_nsec.set((found.get().ctime() % 1000) * 1000);
// stat.st_mtim.tv_sec.set(found.get().mtime() / 1000);
// stat.st_mtim.tv_nsec.set((found.get().mtime() % 1000) * 1000);
// stat.st_atim.tv_sec.set(found.get().mtime() / 1000);
// stat.st_atim.tv_nsec.set((found.get().mtime() % 1000) * 1000);
stat.st_blksize.set(blksize);
} catch (Exception e) {
Log.error("When getattr " + path, e);
return -ErrorCodes.EIO();
} catch (Throwable e) {
Log.error("When getattr " + path, e);
return -ErrorCodes.EIO();
}
return 0;
}
@Override
public int open(String path, FuseFileInfo fi) {
Thread.currentThread().setContextClassLoader(classLoader);
Log.info("open " + path);
if (path.equals("/")) return 0;
try {
if (fileService.open(path.substring(1)).isEmpty()) return -ErrorCodes.ENOENT();
return 0;
} catch (Exception e) {
Log.error("When open " + path, e);
return -ErrorCodes.EIO();
}
}
@Override
public int read(String path, Pointer buf, long size, long offset, FuseFileInfo fi) {
Thread.currentThread().setContextClassLoader(classLoader);
Log.info("read " + path + " " + size + " " + offset);
if (size < 0) return -ErrorCodes.EINVAL();
if (offset < 0) return -ErrorCodes.EINVAL();
try {
var fileOpt = fileService.open(path.substring(1));
if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
var file = fileOpt.get();
var read = fileService.read(fileOpt.get(), offset, (int) size);
if (read.isEmpty()) return 0;
buf.put(0, read.get().toByteArray(), 0, read.get().size());
return read.get().size();
} catch (Exception e) {
Log.error("When reading " + path, e);
return -ErrorCodes.EIO();
}
}
@Override
public int write(String path, Pointer buf, long size, long offset, FuseFileInfo fi) {
Thread.currentThread().setContextClassLoader(classLoader);
Log.info("write " + path + " " + size + " " + offset);
if (offset < 0) return -ErrorCodes.EINVAL();
try {
var fileOpt = fileService.open(path.substring(1));
if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
var buffer = new byte[(int) size];
buf.get(0, buffer, 0, (int) size);
var written = fileService.write(fileOpt.get(), offset, UnsafeByteOperations.unsafeWrap(buffer));
return written.intValue();
} catch (Exception e) {
Log.error("When writing " + path, e);
return -ErrorCodes.EIO();
}
}
@Override
public int truncate(String path, long size) {
if (size < 0) return -ErrorCodes.EINVAL();
try {
var ok = fileService.truncate(path.substring(1), size);
if (ok)
return 0;
else
return -ErrorCodes.ENOSPC();
} catch (Exception e) {
Log.error("When truncating " + path, e);
return -ErrorCodes.EIO();
}
}
@Override
public int create(String path, long mode, FuseFileInfo fi) {
Thread.currentThread().setContextClassLoader(classLoader);
try {
var ret = fileService.create(path.substring(1));
if (ret.isEmpty()) return -ErrorCodes.ENOSPC();
else return 0;
} catch (Exception e) {
Log.error("When creating " + path, e);
return -ErrorCodes.EIO();
}
}
@Override
public int readdir(String path, Pointer buf, FuseFillDir filler, long offset, FuseFileInfo fi) {
Thread.currentThread().setContextClassLoader(classLoader);
Log.info("readdir " + path);
try {
Iterable<String> found;
try {
found = fileService.readdir(path.substring(1));
} catch (StatusRuntimeException e) {
if (e.getStatus().getCode().equals(Status.NOT_FOUND.getCode()))
return -ErrorCodes.ENOENT();
else throw e;
}
filler.apply(buf, ".", null, 0);
filler.apply(buf, "..", null, 0);
for (var c : found) {
filler.apply(buf, c, null, 0);
}
return 0;
} catch (Exception e) {
Log.error("When readdir " + path, e);
return -ErrorCodes.EIO();
}
}
}

View File

@@ -1,21 +0,0 @@
quarkus.grpc.server.use-separate-server=false
dhfs.objects.reconnect_interval=5s
dhfs.objects.write_log=false
dhfs.fuse.root=${HOME}/dhfs_default/fuse
dhfs.files.target_chunk_size=262144
# Writes strictly smaller than this will try to merge with blocks nearby
dhfs.files.write_merge_threshold=0.8
# If a merge would result in a block of greater size than this, stop merging
dhfs.files.write_merge_limit=1.2
# Don't take blocks of this size and above when merging
dhfs.files.write_merge_max_chunk_to_take=1
dhfs.files.write_last_chunk_limit=1.5
quarkus.log.category."com.usatiuk.dhfs".min-level=INFO
quarkus.log.category."com.usatiuk.dhfs".level=INFO
quarkus.http.insecure-requests=enabled
quarkus.http.ssl.client-auth=required
dhfs.objects.persistence.files.root=${HOME}/dhfs_default/dhfsdb
quarkus.hibernate-orm.database.generation=drop-and-create
quarkus.datasource.jdbc.url=jdbc:h2:file:${HOME}/dhfs_default/dhfsdb
quarkus.datasource.db-kind=h2
quarkus.hibernate-orm.cache."org.acme.files.objects.ChunkData".memory.object-count=500

View File

@@ -1,6 +0,0 @@
-- This file allow to write SQL commands that will be emitted in test and dev.
-- The commands are commented as their support depends of the database
-- insert into myentity (id, field) values(1, 'field-1');
-- insert into myentity (id, field) values(2, 'field-2');
-- insert into myentity (id, field) values(3, 'field-3');
-- alter sequence myentity_seq restart with 4;

View File

@@ -19,7 +19,6 @@
<module>utils</module>
<module>objects-alloc</module>
<module>objects-common</module>
<module>crapfs</module>
</modules>
<properties>