192 Commits

Author SHA1 Message Date
2434b0464f fit.cvut.cz link fix 2025-06-12 09:52:24 +02:00
ab4e06177e add a notice 2025-06-12 09:49:30 +02:00
86f240f439 cleanup dependencies a little 2025-05-14 21:45:20 +02:00
59447aa286 fix windows map size 2025-05-14 20:54:59 +02:00
98b7fe81ae another note 2025-05-14 20:43:32 +02:00
0713d20d95 windows note 2 2025-05-14 20:39:14 +02:00
13390ba301 windows note 2025-05-14 20:24:42 +02:00
b040c64da7 grammar fix 2025-05-14 20:21:19 +02:00
fa9b77dc34 update java version 2025-05-14 20:20:53 +02:00
508df91c0a readme fixie 2025-05-14 19:54:31 +02:00
20eb436c4b remove readme trailing space space 2025-05-14 19:53:19 +02:00
59d5b22266 improve readme 2 2025-05-14 19:52:19 +02:00
4167f661e8 copy readme to run wrapper 2025-05-14 19:49:28 +02:00
2cc5a703ef Improve readme 2025-05-14 19:48:38 +02:00
a5490047b8 Slight fixes 2025-05-14 19:21:00 +02:00
2cd210dfd1 Slight test cleanup 2025-05-14 19:16:22 +02:00
6e37c26845 Slight config cleanup 2025-05-14 19:00:51 +02:00
4f7c7927f3 JAVA_HOME in run wrapper 2025-05-14 18:33:27 +02:00
723a94ce0e update readme 2025-05-14 18:11:53 +02:00
57b57397b6 Dhfs-fuse: remove TieredStopAtLevel from test 2025-05-14 18:05:25 +02:00
2a6656cd1a Sync-base: increase default getSelfInfo timeout 2025-05-14 18:04:50 +02:00
de5338a813 more cleanup 2025-05-14 16:52:45 +02:00
8b4430fa73 Some config cleanup 2025-05-14 14:16:06 +02:00
1025e6b246 Sync-base: cleanup proto files 2025-05-14 12:14:38 +02:00
7274131052 update readme 2025-05-14 12:05:13 +02:00
930eb38b9b Objects: cleanup ReadTrackingSourceWrapper 2025-05-14 11:41:11 +02:00
afb6f0c270 Dhfs-fuse: fix ctime 2025-05-14 11:39:40 +02:00
e7f5be689f Dhfs-fuse: report real filesystem space 2025-05-14 11:32:00 +02:00
84b1d57125 Sync-base: skip scheduled execution when app is not running 2025-05-14 11:27:39 +02:00
2206c13466 More lmdb logs 2025-05-14 11:18:05 +02:00
d1df6b705f Show logs for test containers that failed starting
it just... doesn't start sometimes???
2025-05-14 10:38:55 +02:00
83ceefa041 Sync-base: more javadocs 2 2025-05-13 23:46:07 +02:00
838405fb46 Sync-base: more javadocs 2025-05-13 22:20:13 +02:00
dbad8a2b22 Objects: javadocs 2025-05-13 20:53:44 +02:00
66dabdef25 a couple more javadocs 2025-05-13 20:13:28 +02:00
87e127bdfb KleppmannTree javadocs
and some more
2025-05-13 15:55:33 +02:00
fd62543687 CI: reenable rest of CI 2025-05-12 16:15:51 +02:00
757a0bbc8a javadocs github pages (#7)
* javadocs github pages

* fix

* fix

* proper pages
2025-05-12 16:13:42 +02:00
0c3524851e Some javadocs + CI 2025-05-12 12:49:10 +02:00
3eb7164c0f Dhfs-fuse: fix LazyFsIT tests 2025-05-10 16:49:58 +02:00
f544a67fb5 Objects: cleanup AutoCloseableNoThrow 2025-05-10 13:49:42 +02:00
964b3da951 Objects: remove getUsableSpace 2025-05-10 11:20:34 +02:00
cb33472dc5 Utils: remove VoidFn 2025-05-10 11:07:40 +02:00
de211bb2d2 Objects: remove prepareTx 2025-05-07 16:12:47 +02:00
56ab3bad4c Objects: remove TransactionPrivate 2025-05-07 15:00:15 +02:00
9403556220 Objects: remove TransactionFactory 2025-05-07 14:39:55 +02:00
469a6b9011 Objects: remove lockmanager 2025-05-07 11:21:48 +02:00
52ccbb99bc Sync-base: rename ConnectedPeerManager to ReachablePeerManager
tests check for "connected" in logs
2025-05-06 20:28:21 +02:00
d972cd1562 Objects: remove LockingStrategy 2025-05-06 20:21:29 +02:00
80151bcca5 Dhfs-fuse: less parallel e2e tests 2025-05-06 20:07:03 +02:00
289a2b880e Sync-base: rename ConnectedPeerManager 2025-05-05 22:18:00 +02:00
0849df60ae Dhfs-fs: remove DhfsFileService 2025-05-05 21:58:37 +02:00
9cb5c226f9 remove dhfs-app 2025-05-05 21:20:07 +02:00
87c404828c add powershell run scripts 2025-05-04 17:44:57 +02:00
b074e8eb44 Dhfs-fs: proper not found unlink exception 2025-05-04 17:13:43 +02:00
eb5b0ae03c cleanup run wrapper 2025-05-03 17:18:28 +02:00
c329c1f982 Objects: nested transactions 2025-05-03 13:57:44 +02:00
4e7b13227b Sync-base: "kick out" inactive peers 2025-05-03 13:14:06 +02:00
db51d7280c Revert "Sync-base: get rid of JDataRemotePush"
This reverts commit 07133a71
2025-05-03 11:25:23 +02:00
70fecb389b Objects: cleanup transaction put a little 2025-05-02 12:50:43 +02:00
6e9a2b25f6 Utils: cleanup UnsafeAccessor a little 2025-05-02 12:39:50 +02:00
b84ef95703 Revert "Objects: simplify tx commit hooks"
This reverts commit c0735801b9.
2025-05-01 13:42:06 +02:00
c0735801b9 Objects: simplify tx commit hooks 2025-05-01 13:27:39 +02:00
b506ced9d5 Objects: simplify WritebackObjectPersistentStore 2025-05-01 10:29:11 +02:00
46bc9fa810 Objects: remove transactionobject 2025-05-01 09:14:50 +02:00
8ab034402d Revert "Objects: simplify cache"
This reverts commit d94d11ec8b.
2025-04-29 20:36:19 +02:00
d94d11ec8b Objects: simplify cache 2025-04-29 20:22:21 +02:00
5beaad2d32 Objects: better iterators 2025-04-29 16:53:26 +02:00
c4484d21e5 Objects: simplify tx commit a little 2025-04-29 16:50:41 +02:00
2766ef1bae Add --enable-preview to run.xml and run 2025-04-29 12:49:03 +02:00
58de85c078 Sync-base: WritebackObjectPersistentStore cleanup 2025-04-29 12:46:39 +02:00
cc9da86440 Sync-base: JObjectKeyImpl import fix 2025-04-29 12:44:41 +02:00
e6c9e6aee9 Dhfs-fuse: implement write_buf for one less copy 2025-04-29 12:44:18 +02:00
62265355c4 Dhfs-fs: a little cleanup in DhfsFileServiceImpl 2025-04-29 12:44:00 +02:00
854bce1627 Utils: UninitializedByteBuffer 2025-04-29 12:43:18 +02:00
1b19c77bb6 Utils: slightly faster add in HashSetDelayedBlockingQueue 2025-04-29 12:40:45 +02:00
7aa968a569 Dhfs-fuse: fix import 2025-04-29 00:45:34 +02:00
e348c39be1 Utils: add UnsafeAccessor to JnrPtrByteOutput
oops
2025-04-28 23:50:31 +02:00
1b54830651 Objects: don't lock some objects twice for no reason 2025-04-28 23:49:45 +02:00
bc5f0b816c Objects: add putNew
to avoid searching for nonexistent objects
2025-04-28 23:47:53 +02:00
9ff914bdaa Utils: move UnsafeAccessor to utils 2025-04-28 23:36:42 +02:00
1cee6f62b8 Utils: less dumb DataLocker 2025-04-28 23:34:30 +02:00
81703a9406 Sync-base: some microoptimizations 2025-04-28 15:44:36 +02:00
1757034e0b Sync-base: speed up RemoteObjPusherTxHook
they are immutable, no need to do real equals, they can't be same if different
2025-04-28 15:09:23 +02:00
d9765a51d8 Sync-base: freeze JKleppmannTreeNodeHolder root nodes 2025-04-28 13:00:50 +02:00
99ef560b95 Sync-base: static final hooks
so that compiler can fold them
2025-04-28 12:59:21 +02:00
f87eb365c3 Sync-base: remove our referrers from canDelete 2025-04-26 16:07:23 +02:00
8d3244fe64 Webui: use node-forge for hashing
apparently crypto works only on ssl websites
2025-04-26 14:09:05 +02:00
0a8985c93f Short readme 2025-04-26 11:19:06 +02:00
a8cf483eee Simplify ObjectPersistentStore 2025-04-26 11:11:51 +02:00
f7338f4e80 Dhfs-app: check that kill tests ls/cat return success 2025-04-26 10:44:33 +02:00
b89b182c58 Dhfs-fuse: make lmdb map size configurable 2025-04-25 23:11:02 +02:00
ad4ce72fdd Dhfs-fuse: attempt at windows support 2025-04-25 22:17:55 +02:00
26ba65fdce Sync-base: make Pushing invalidations log message trace
it's too big
2025-04-25 22:02:21 +02:00
697add66d5 Kelppmanntree: fix a dumb bug
directories are always the same duh
2025-04-25 22:00:33 +02:00
a53fc5e973 Kelppmanntree: remove undocontext 2025-04-25 21:37:50 +02:00
b034591091 Sync-base: OpHandler interface 2025-04-25 15:04:07 +02:00
07133a7186 Sync-base: get rid of JDataRemotePush 2025-04-25 14:57:06 +02:00
8cbecf1714 Dhfs-fs: remove optional from read 2025-04-25 13:40:48 +02:00
16ba692019 Recordify tree metadata 2025-04-25 13:35:54 +02:00
e5be1e6164 Cleanup poms 2025-04-25 13:13:21 +02:00
c74fdfc5a6 Dhfs-app: test fixes 2 2025-04-25 12:59:25 +02:00
c4268ab35b Dhfs-app: test fixes 2025-04-25 11:23:12 +02:00
2ab6e3c3f7 Sync-base: Handle getting peer info failure nicely 2025-04-25 11:15:36 +02:00
ec8546bd69 Show Peer address in WebUI 2025-04-25 11:13:30 +02:00
6ecef94b90 Webui: a little nicer 2025-04-25 11:07:08 +02:00
e7f22d783f Webui: proper async hash 2025-04-25 11:03:39 +02:00
bed55162d7 Peer certificate check when adding 2025-04-25 10:48:55 +02:00
f43c6db4f0 Run code format 2025-04-25 09:58:46 +02:00
56a15f4672 Sync-base: cleanup JKleppmannTree meta 2025-04-25 09:57:44 +02:00
85a1fa09ab KleppmannTree: a little cleanup 2025-04-25 09:45:35 +02:00
cca0b410cf Some packages cleanup 2025-04-25 09:16:31 +02:00
d94abfee97 Sync-base: op extractor interface 2025-04-25 09:16:31 +02:00
dependabot[bot]
6bd92ad7cd Bump the npm_and_yarn group across 1 directory with 2 updates
Bumps the npm_and_yarn group with 2 updates in the /webui directory: [react-router](https://github.com/remix-run/react-router/tree/HEAD/packages/react-router) and [react-router-dom](https://github.com/remix-run/react-router/tree/HEAD/packages/react-router-dom).


Updates `react-router` from 7.4.1 to 7.5.2
- [Release notes](https://github.com/remix-run/react-router/releases)
- [Changelog](https://github.com/remix-run/react-router/blob/main/packages/react-router/CHANGELOG.md)
- [Commits](https://github.com/remix-run/react-router/commits/react-router@7.5.2/packages/react-router)

Updates `react-router-dom` from 7.4.1 to 7.5.2
- [Release notes](https://github.com/remix-run/react-router/releases)
- [Changelog](https://github.com/remix-run/react-router/blob/main/packages/react-router-dom/CHANGELOG.md)
- [Commits](https://github.com/remix-run/react-router/commits/react-router-dom@7.5.2/packages/react-router-dom)

---
updated-dependencies:
- dependency-name: react-router
  dependency-version: 7.5.2
  dependency-type: direct:production
  dependency-group: npm_and_yarn
- dependency-name: react-router-dom
  dependency-version: 7.5.2
  dependency-type: direct:production
  dependency-group: npm_and_yarn
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-25 09:14:39 +02:00
1965d93f25 Dhfs-app: handle empty op push correctly 2025-04-24 22:38:01 +02:00
f6685f45f9 Dhfs-app: increase sync timeout 2025-04-24 22:34:04 +02:00
060ab1767d Dhfs-app: don't crash too late (never) 2025-04-24 19:46:41 +02:00
89d87095c8 Dhfs-app: class level parallel docker tests
otherwise the logs are unreadable
2025-04-24 17:42:24 +02:00
7425c1f312 Dhfs-app: add failed to connect logs 2025-04-24 17:40:38 +02:00
428eca325f Dhfs-app: increase lazyfs start timeout 2025-04-24 17:19:41 +02:00
005bc35496 Dhfs-app: assume lazyfs doesn't crash too early 2025-04-24 16:32:25 +02:00
6685575ca5 Dhfs-app: please work now 2025-04-24 15:48:57 +02:00
1ae813aacd Sync-base: cleanup old grpc channels 2025-04-24 15:46:23 +02:00
e81671251a Dhfs-fs: nevermind, no sleep 2025-04-24 15:30:18 +02:00
add26bb156 Dhfs-fs: larger sleep 2025-04-24 15:23:31 +02:00
4060045f15 Dhfs-fs: no need for removing/adding chunk logs now 2025-04-24 15:14:18 +02:00
75b484d5b2 Dhfs-app: fix asyncFence if there were no transactions 2025-04-24 15:14:03 +02:00
1d9dc8ed4d Dhfs-app: lazyfs test fixes 2025-04-24 15:13:45 +02:00
7a85704862 Dhfs-app: stopatlevel1 for docker tests 2025-04-24 14:14:52 +02:00
367eedd540 Dhfs-app: less verbose docker tests 2025-04-24 12:53:26 +02:00
d01b9204f7 Dhfs-app: better checkConsistency 2025-04-24 12:36:11 +02:00
67fdacc3ff Dhfs-app: remove docker networks in test 2025-04-24 12:28:56 +02:00
6ed9051be1 Dhfs-app: better kill tests 2025-04-24 12:13:56 +02:00
abf95ba847 Dhfs-app: kill test second container 2025-04-24 11:23:32 +02:00
6a9f64439f Executor fix 2025-04-24 10:37:59 +02:00
ceb9342b45 Dhfs-app: try to crash lazyfs a bit later 2025-04-24 09:22:22 +02:00
ca354ba09c Webui: don't show complete address 2025-04-23 17:24:48 +02:00
81af021292 Wait for lazyfs to crash before unmount 2025-04-23 17:19:44 +02:00
0c04079258 Improved peer UI 2025-04-23 16:37:45 +02:00
2e2eb3ac97 Dhfs-app: lazyfs torn op testing 2025-04-23 15:07:09 +02:00
e2e756e7c5 Objects: getFromSource just ever so slightly faster
one map access
2025-04-23 14:50:34 +02:00
04e932ed62 Dhfs-app: LazyFs directory test non-repeated
some docker ip pool problems
2025-04-23 14:17:42 +02:00
aeec66389d Dhfs-app: LazyFs directory test 2025-04-23 14:00:44 +02:00
adc7356d4a Sync-base: fix leaking non-flushed ops 2025-04-23 14:00:30 +02:00
16da05292f Objects: better onflush for no write transactions 2025-04-23 13:58:55 +02:00
b0149b7251 Objects: less logs in iterators
less crap, and there are tests now
2025-04-23 11:06:26 +02:00
24416c1e87 Dhfs-app: less badlazyfs crash test 2025-04-23 10:38:52 +02:00
34db870fc6 Objects: simplify TombstoneMergingKvIterator 2025-04-22 23:53:41 +02:00
0e62a29ce0 Objects: cache peeked key in LmdbKvIterator 2025-04-22 23:53:18 +02:00
7de5f91fd2 Dhfs-app: lazyfs crash test 2025-04-22 23:25:26 +02:00
ac68208b1a Sync-base: don't crash if invalidation queue is corrupted 2025-04-22 21:38:15 +02:00
4e0675940e Dhfs-app: better fileConflictTest 2025-04-22 21:37:51 +02:00
4f5f347b3c Use stable jnr-fuse version 2025-04-21 11:30:14 +02:00
bd5395e03f Dhfs-fs: mtime fix 2025-04-21 11:29:35 +02:00
f56f564e8b Objects: simplify TransactionManager 2025-04-21 11:15:48 +02:00
eaa413e200 Objects: interfacify MaybeTombstone Data 2025-04-19 17:25:06 +02:00
f3e4d99fcb Objects: seal JDataVersionedWrapper 2025-04-19 12:07:36 +02:00
1c71b26ed8 Objects: 1 less field in JDataVersionedWrapperLazy 2025-04-19 12:06:33 +02:00
e6f95ef028 Remove supportlib
nice idea, but ram usage explosion seems to cancel out the benefits
2025-04-19 11:32:35 +02:00
59e8f6a6b4 Objects: one less copy when serializing
only cache what was really read, otherwise its lifetime is the same as transaction
2025-04-19 11:03:26 +02:00
0292df7f0e Objects: faster JObjectKey 2025-04-19 11:02:30 +02:00
a6a4101bb0 Objects: use bytebuffer to read
a little less GC pressure
2025-04-18 13:21:04 +02:00
59fa5dcf28 Fixie for HashSetDelayedBlockingQueueTest 2025-04-18 13:08:55 +02:00
0f5fb8b8b6 Objects: PBT MergingIterator test 2025-04-18 13:08:40 +02:00
c087dd8971 More microoptimizations 3! 2025-04-18 12:13:22 +02:00
14ddddd0ff Sync-base: use serialized certificate in self data
makes it easier to switch serialization
2025-04-18 11:06:40 +02:00
9859378627 Sync-base: move "_data" to suffix
makes cache much less bad
2025-04-18 11:06:15 +02:00
e167c21d40 More microoptimizations 2! 2025-04-17 11:48:43 +02:00
7dc8f486ea More microoptimizations! 2025-04-17 10:02:26 +02:00
da1a996e6f Support: un-simplify allocateUninitialized 2025-04-17 09:20:56 +02:00
bb52a3af0e Objects: waste less cpu in transaction commit 2025-04-17 00:26:58 +02:00
de0b868349 Objects: one less sorted tree traversal in advanceIterator
totally overengineering
2025-04-17 00:14:56 +02:00
d4d4e150c1 Objects: use LATIN1 strings for keys
should be a bit faster to match the internal string representation
2025-04-17 00:12:37 +02:00
c9b0400d50 Objects: faster MergingKvIterator 2025-04-16 23:41:30 +02:00
94218330b1 Simplify allocateUninitialized 2025-04-16 16:26:58 +02:00
dbe2a72f7c Objects: don't create a db_ver_obj bytebuffer every time 2025-04-16 15:39:52 +02:00
643c53c894 Objects: less string concats 2025-04-15 17:07:03 +02:00
29fdd3eb08 Objects: don't calculate bundle size all the time from scratch 2025-04-15 17:02:26 +02:00
e6ead10e7f Objects: use direct bytebuffers when reading 2025-04-15 16:35:31 +02:00
04c5685fd5 Use @Singleton instead of @ApplicationScoped in hot paths
Definitely a microoptimization but noticeable on the flamegraph
2025-04-15 16:29:49 +02:00
7061117f56 Fs: file writing with less cpu wasted 2025-04-15 16:14:40 +02:00
67852fb37e Objects: less awful put, with 2 less copies 2025-04-15 11:17:23 +02:00
d48cc18e85 Server: only try to purge directory
won't bother getting it working with rootful docker
2025-04-13 22:03:31 +02:00
77177414eb Server: slight directory fixes 2025-04-13 20:37:12 +02:00
83e0f6eb0a Server: fix not being able to delete temp dir in tests 2025-04-13 20:05:51 +02:00
a5727c01b1 Server: push resync after crash 2025-04-13 19:57:16 +02:00
711c4f5e28 Server: simple kill test 2025-04-13 19:36:34 +02:00
45556f2b74 Objects: rename name to value in JObjectKey 2025-04-13 17:10:33 +02:00
146870c281 Objects: support parallel test running
crashes with env maxreaders reached though
2025-04-13 14:54:25 +02:00
9178e7ee2d Separate dhfs-fs/fuse/sync-base 2025-04-13 14:30:00 +02:00
7c605135c5 Some native image support
but it still doesn't work because of JNR
2025-04-13 13:09:36 +02:00
491afd454b Objects: serialize written objects in parallel 2025-04-13 12:08:40 +02:00
405 changed files with 9647 additions and 7587 deletions

View File

@@ -1,4 +0,0 @@
**/.parcel-cache
**/dist
**/node_modules
**/target

View File

@@ -7,12 +7,6 @@ on:
pull_request:
branches: ["main"]
env:
# Use docker.io for Docker Hub if empty
REGISTRY: ghcr.io
# github.repository as <account>/<repo>
IMAGE_NAME: ${{ github.repository }}
jobs:
build-dhfs:
runs-on: ubuntu-latest
@@ -20,26 +14,21 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: "recursive"
- name: Install sudo for ACT
run: apt-get update && apt-get install -y sudo
if: env.ACT=='true'
- name: Install fuse and maven
run: sudo apt-get update && sudo apt-get install -y libfuse2
- name: Install FUSE
run: sudo apt-get update && sudo apt-get install -y libfuse2 libfuse3-dev libfuse3-3 fuse3
- name: Download maven
run: |
cd "$HOME"
mkdir maven-bin
curl -s -L https://dlcdn.apache.org/maven/maven-3/3.9.9/binaries/apache-maven-3.9.9-bin.tar.gz | tar xvz --strip-components=1 -C maven-bin
echo "$HOME"/maven-bin/bin >> $GITHUB_PATH
- name: User allow other for fuse
run: echo "user_allow_other" | sudo tee -a /etc/fuse.conf
- name: Maven info
run: |
echo $GITHUB_PATH
echo $PATH
mvn -v
- name: Dump fuse.conf
run: cat /etc/fuse.conf
- name: Set up JDK 21
uses: actions/setup-java@v4
@@ -48,16 +37,21 @@ jobs:
distribution: "zulu"
cache: maven
- name: Test with Maven
run: cd dhfs-parent && mvn -T $(nproc) --batch-mode --update-snapshots package verify
- name: Build LazyFS
run: cd thirdparty/lazyfs/ && ./build.sh
# - name: Build with Maven
# run: cd dhfs-parent && mvn --batch-mode --update-snapshots package # -Dquarkus.log.category.\"com.usatiuk.dhfs\".min-level=DEBUG
- name: Test with Maven
run: cd dhfs-parent && mvn -T $(nproc) --batch-mode --update-snapshots package verify javadoc:aggregate
- uses: actions/upload-artifact@v4
with:
name: DHFS Server Package
path: dhfs-parent/server/target/quarkus-app
path: dhfs-parent/dhfs-fuse/target/quarkus-app
- uses: actions/upload-artifact@v4
with:
name: DHFS Javadocs
path: dhfs-parent/target/reports/apidocs/
- uses: actions/upload-artifact@v4
if: ${{ always() }}
@@ -89,211 +83,12 @@ jobs:
name: Webui
path: webui/dist
build-native-libs:
strategy:
matrix:
include:
- os: ubuntu-latest
cross: "linux/amd64"
- os: ubuntu-latest
cross: "linux/arm64"
- os: macos-latest
runs-on: ${{ matrix.os }}
env:
DO_LOCAL_BUILD: ${{ matrix.os == 'macos-latest' }}
DOCKER_PLATFORM: ${{ matrix.cross || 'NATIVE' }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set SANITIZED_DOCKER_PLATFORM
run: echo "SANITIZED_DOCKER_PLATFORM=$(echo $DOCKER_PLATFORM | tr / _ )" >> $GITHUB_ENV
- name: Set DOCKER_BUILDER_IMAGE
run: echo "DOCKER_BUILDER_IMAGE=dhfs_lib_builder-${{matrix.os}}-$SANITIZED_DOCKER_PLATFORM" >> $GITHUB_ENV
- name: Build config
run: |
echo DO_LOCAL_BUILD: $DO_LOCAL_BUILD
echo DOCKER_PLATFORM: $DOCKER_PLATFORM
echo SANITIZED_DOCKER_PLATFORM: $SANITIZED_DOCKER_PLATFORM
echo DOCKER_BUILDER_IMAGE: $DOCKER_BUILDER_IMAGE
- name: Set up JDK 21
if: ${{ env.DO_LOCAL_BUILD == 'TRUE' }}
uses: actions/setup-java@v4
with:
java-version: "21"
distribution: "zulu"
cache: maven
- name: Set up Docker Buildx
if: ${{ env.DO_LOCAL_BUILD != 'TRUE' }}
uses: docker/setup-buildx-action@v3
- name: Set up QEMU
if: ${{ env.DO_LOCAL_BUILD != 'TRUE' }}
uses: docker/setup-qemu-action@v3
- name: Build Docker builder image
if: ${{ env.DO_LOCAL_BUILD != 'TRUE' }}
uses: docker/build-push-action@v5
with:
context: ./libdhfs_support/builder
file: ./libdhfs_support/builder/Dockerfile
push: false
platforms: ${{ env.DOCKER_PLATFORM }}
tags: ${{ env.DOCKER_BUILDER_IMAGE }}
cache-from: type=gha,scope=build-${{ env.DOCKER_BUILDER_IMAGE }}
cache-to: type=gha,mode=max,scope=build-${{ env.DOCKER_BUILDER_IMAGE }}
load: true
- name: Build the library
run: |
CMAKE_ARGS="-DCMAKE_BUILD_TYPE=Release" libdhfs_support/builder/cross-build.sh both build "$(pwd)/result"
- name: Upload build
uses: actions/upload-artifact@v4
with:
name: NativeLib-${{ matrix.os }}-${{ env.SANITIZED_DOCKER_PLATFORM }}
path: result
merge-native-libs:
runs-on: ubuntu-latest
needs: [build-native-libs]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Download artifacts
uses: actions/download-artifact@v4
with:
path: downloaded-libs
- name: Merge all
run: rsync -av downloaded-libs/NativeLib*/* result/
- name: Check that libs exists
run: |
test -f "result/Linux-x86_64/libdhfs_support.so" || exit 1
- name: Upload
uses: actions/upload-artifact@v4
with:
name: NativeLibs
path: result
publish-docker:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
# This is used to complete the identity challenge
# with sigstore/fulcio when running outside of PRs.
id-token: write
needs: [build-webui, merge-native-libs, build-dhfs]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Download server package
uses: actions/download-artifact@v4
with:
name: DHFS Server Package
path: dhfs-package-downloaded
- name: Download webui
uses: actions/download-artifact@v4
with:
name: Webui
path: webui-dist-downloaded
- name: Download native libs
uses: actions/download-artifact@v4
with:
name: NativeLibs
path: dhfs-native-downloaded
- name: Show all the files
run: find .
# Install the cosign tool except on PR
# https://github.com/sigstore/cosign-installer
- name: Install cosign
if: github.event_name != 'pull_request'
uses: sigstore/cosign-installer@v3.5.0
with:
cosign-release: "v2.2.4"
# Set up BuildKit Docker container builder to be able to build
# multi-platform images and export cache
# https://github.com/docker/setup-buildx-action
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
# Login against a Docker registry except on PR
# https://github.com/docker/login-action
- name: Log into registry ${{ env.REGISTRY }}
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
# Extract metadata (tags, labels) for Docker
# https://github.com/docker/metadata-action
- name: Extract Docker metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
# Build and push Docker image with Buildx (don't push on PR)
# https://github.com/docker/build-push-action
- name: Build and push Docker image
id: build-and-push
uses: docker/build-push-action@v5
with:
context: .
file: ./Dockerfile.ci
push: ${{ github.event_name != 'pull_request' }}
platforms: linux/amd64,linux/arm64
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
# Sign the resulting Docker image digest except on PRs.
# This will only write to the public Rekor transparency log when the Docker
# repository is public to avoid leaking data. If you would like to publish
# transparency data even for private images, pass --force to cosign below.
# https://github.com/sigstore/cosign
- name: Sign the published Docker image
if: ${{ github.event_name != 'pull_request' }}
env:
# https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions#using-an-intermediate-environment-variable
TAGS: ${{ steps.meta.outputs.tags }}
DIGEST: ${{ steps.build-and-push.outputs.digest }}
# This step uses the identity token to provision an ephemeral certificate
# against the sigstore community Fulcio instance.
run: echo "${TAGS}" | xargs -I {} cosign sign --yes {}@${DIGEST}
publish-run-wrapper:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
# This is used to complete the identity challenge
# with sigstore/fulcio when running outside of PRs.
id-token: write
needs: [build-webui, merge-native-libs, build-dhfs]
needs: [build-webui, build-dhfs]
steps:
- name: Checkout repository
@@ -309,11 +104,6 @@ jobs:
name: Webui
path: webui-dist-downloaded
- uses: actions/download-artifact@v4
with:
name: NativeLibs
path: dhfs-native-downloaded
- name: Show all the files
run: find .
@@ -321,17 +111,18 @@ jobs:
run: mkdir -p run-wrapper-out/dhfs/data && mkdir -p run-wrapper-out/dhfs/fuse && mkdir -p run-wrapper-out/dhfs/app
- name: Copy DHFS
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/DHFS Package"
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/Server"
- name: Copy Webui
run: cp -r ./webui-dist-downloaded "run-wrapper-out/dhfs/app/Webui"
- name: Copy Webui
run: cp -r ./dhfs-native-downloaded "run-wrapper-out/dhfs/app/NativeLibs"
- name: Copy run wrapper
run: cp -r ./run-wrapper/* "run-wrapper-out/dhfs/app/"
- name: Copy README
run: |
cp README.md "run-wrapper-out/dhfs/"
- name: Add version to run wrapper
run: echo $GITHUB_RUN_ID > "run-wrapper-out/dhfs/app/"version
@@ -343,3 +134,36 @@ jobs:
with:
name: Run wrapper
path: ~/run-wrapper.tar.gz
publish-javadoc:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
permissions:
contents: read
pages: write
id-token: write
needs: [build-webui, build-dhfs]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: DHFS Javadocs
path: dhfs-javadocs-downloaded
- name: Setup Pages
uses: actions/configure-pages@v5
- name: Upload artifact
uses: actions/upload-pages-artifact@v3
with:
path: "dhfs-javadocs-downloaded"
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4

3
.gitmodules vendored Normal file
View File

@@ -0,0 +1,3 @@
[submodule "thirdparty/lazyfs/lazyfs"]
path = thirdparty/lazyfs/lazyfs
url = git@github.com:dsrhaslab/lazyfs.git

1
.vscode/ltex.dictionary.en-US.txt vendored Normal file
View File

@@ -0,0 +1 @@
Syncthing

View File

@@ -1,35 +0,0 @@
FROM node:20-bullseye as webui-build
WORKDIR /usr/src/app/webui-build
COPY ./webui/package*.json ./
RUN npm i
COPY ./webui/. .
RUN npm run build
FROM azul/zulu-openjdk:21 as server-build
WORKDIR /usr/src/app/server-build
COPY ./server/.mvn .mvn
COPY ./server/mvnw ./server/pom.xml ./
RUN ./mvnw quarkus:go-offline
# The previous thing still doesn't download 100% everything
RUN ./mvnw -Dmaven.test.skip=true -Dskip.unit=true package --fail-never
COPY ./server/. .
RUN ./mvnw -Dmaven.test.skip=true -Dskip.unit=true clean package
FROM azul/zulu-openjdk-alpine:21-jre-headless
RUN apk update && apk add fuse && rm -rf /var/cache/apk/*
WORKDIR /usr/src/app
COPY --from=server-build /usr/src/app/server-build/target/quarkus-app/. .
RUN mkdir -p webui
COPY --from=webui-build /usr/src/app/webui-build/dist/. ./webui
ENV dhfs_webui_root=/usr/src/app/webui
COPY ./dockerentry.sh .
RUN ["chmod", "+x", "./dockerentry.sh"]
CMD [ "./dockerentry.sh" ]

View File

@@ -1,24 +0,0 @@
FROM azul/zulu-openjdk:21-jre-headless
RUN apt update && apt install -y libfuse2 && apt-get clean
WORKDIR /usr/src/app
COPY ./dhfs-package-downloaded/lib .
COPY ./dhfs-package-downloaded/*.jar .
COPY ./dhfs-package-downloaded/app .
COPY ./dhfs-package-downloaded/quarkus .
WORKDIR /usr/src/app/native-libs
COPY ./dhfs-native-downloaded/. .
WORKDIR /usr/src/app/webui
COPY ./webui-dist-downloaded/. .
ENV dhfs_webui_root=/usr/src/app/webui
WORKDIR /usr/src/app
COPY ./dockerentry.sh .
RUN ["chmod", "+x", "./dockerentry.sh"]
CMD [ "./dockerentry.sh" ]

View File

@@ -1,4 +1,6 @@
# Distributed Home File System 🚧
# Distributed Home File System
[Javadocs](https://usatiuk.github.io/dhfs/)
## What is this?
@@ -11,9 +13,78 @@ Syncthing and allowing you to stream your files like Google Drive File Stream
[Download latest build](https://nightly.link/usatiuk/dhfs/workflows/server/main/Run%20wrapper.zip)
This is a simple wrapper around the jar/web ui distribution that allows you to run/stop
the DHFS server in the background, and update itself (hopefully!)
This is a simple set of scripts that allows you to run/stop
the DHFS server in the background, and update it.
## How to use it and how it works?
Once unpacked, in the root folder (`dhfs`), there will be 3 folders:
TODO 😁
- `app` contains the application
- `data` contains the filesystem data storage
- `fuse` is the default filesystem mount point (not on Windows, the default mount drive letter is `Z`)
Note that on Windows, the path to the root can not contain spaces.
## How to use it?
### General prerequisites
Java should be available as `java` in `PATH`, or with a correctly set `JAVA_HOME` (ignored on Windows), and Java 21 is required.
FUSE 2 userspace library also should be available:
- On Ubuntu `libfuse2` package can be installed, or an analogous package for other distributions.
- On Windows, [WinFsp](https://winfsp.dev/) should be installed.
- On macOS, [macFUSE](https://macfuse.github.io/).
### How to run it?
In the run-wrapper `app` folder, 3 scripts are available.
- `run` script starts the filesystem
- `stop` script stops it
- `update` script will update the filesystem to the newest available CI build
On Windows, Powershell versions of the scripts should be used. For them to work, it might be required to allow execution of unsigned scripts using `set-executionpolicy unrestricted`.
### Additional options
Additional options for the filesystem can be specified in the `extra-opts` file in the same directory with the run scripts.
One line in the `extra-opts` file corresponds to one option passed to the JVM when starting the filesystem.
Some extra possible configuration options are:
- `-Ddhfs.fuse.root=` specifies the root where filesystem should be mounted. By default, it is the `fuse` path under the `run-wrapper` root. For windows, it should be a disk root, by default `Z:\`.
- `-Ddhfs.objects.last-seen.timeout=` specifies the period of time (in seconds) after which unavailable peers will be ignored for garbage collection and resynchronized after being reconnected. The default is 43200 (30 days), if set to `-1`, this feature is disabled.
- `-Ddhfs.objects.autosync.download-all=` specifies whether all objects (files and their data) should be downloaded to this peer. `true` or `false`, the default is `false`.
- `-Ddhfs.objects.peerdiscovery.port=` port to broadcast on and listen to for LAN peer discovery (default is `42262`)
- `-Ddhfs.objects.peerdiscovery.broadcast=` whether to enable local peer discovery or not (default is `true`)
- `-Dquarkus.http.port=` HTTP port to listen on (default is `8080`)
- `-Dquarkus.http.ssl-port=` HTTPS port to listen on (default is `8443`)
- `-Dquarkus.http.host=` IP address to listen on (default is `0.0.0.0`)
- `-Ddhfs.peerdiscovery.static-peers=` allows to manually specify a peer's address in format of `peer id:http port:https port`, for example `-Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011`
On Windows, the entire space for the filesystem should also be preallocated, the `-Ddhfs.objects.persistence.lmdb.size=` option controls the size (the value is in bytes), on Windows the default is 100 GB.
In case of errors, the standard output is redirected to `quarkus.log` in the `app` folder, on Windows the error output is separate.
### How to connect to other peers?
Then, a web interface will be available at `losthost:8080` (or whatever the HTTP port is), that can be used to connect with other peers. Peers on local network should be available to be connected to automatically.
## Other notes
### Running tests
To run LazyFS tests, LazyFS needs to be built: the git submodules need to be cloned and `./thirdparty/lazyfs/build.sh` script needs to be run.
LazyFS tests were only tested on Linux.
### Notice
This software was developed with the support of the Faculty of Information Technology, Czech Technical University in Prague, [fit.cvut.cz](https://fit.cvut.cz)
<img src="./docs/logo-fit-en-cerna.svg" height="64">

View File

@@ -41,3 +41,5 @@ nb-configuration.xml
# Plugin directory
/.quarkus/cli/plugins/
.jqwik-database

View File

@@ -1,17 +1,16 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Main 2" type="QsApplicationConfigurationType" factoryName="QuarkusApplication">
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.Main"/>
<module name="server"/>
<option name="VM_PARAMETERS"
value="-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Dcom.usatiuk.dhfs.supportlib.native-path=$ProjectFileDir$/target/classes/native -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011"/>
<extension name="coverage">
<pattern>
<option name="PATTERN" value="com.usatiuk.dhfs.*"/>
<option name="ENABLED" value="true"/>
</pattern>
</extension>
<method v="2">
<option name="Make" enabled="true"/>
</method>
</configuration>
<configuration default="false" name="Main 2" type="QsApplicationConfigurationType" factoryName="QuarkusApplication">
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsfuse.Main" />
<module name="dhfs-fuse" />
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseParallelGC -XX:+DebugNonSafepoints --enable-preview --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx512M -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011" />
<extension name="coverage">
<pattern>
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
<option name="ENABLED" value="true" />
</pattern>
</extension>
<method v="2">
<option name="Make" enabled="true" />
</method>
</configuration>
</component>

View File

@@ -1,18 +1,16 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Main" type="QsApplicationConfigurationType" factoryName="QuarkusApplication"
nameIsGenerated="true">
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.Main"/>
<module name="server"/>
<option name="VM_PARAMETERS"
value="-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Dcom.usatiuk.dhfs.supportlib.native-path=$ProjectFileDir$/target/classes/native -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021"/>
<extension name="coverage">
<pattern>
<option name="PATTERN" value="com.usatiuk.dhfs.*"/>
<option name="ENABLED" value="true"/>
</pattern>
</extension>
<method v="2">
<option name="Make" enabled="true"/>
</method>
</configuration>
<configuration default="false" name="Main" type="QsApplicationConfigurationType" factoryName="QuarkusApplication" nameIsGenerated="true">
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsfuse.Main" />
<module name="dhfs-fuse" />
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseZGC -XX:+ZGenerational --enable-preview -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx1G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=true -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021 -Dquarkus.http.host=0.0.0.0" />
<extension name="coverage">
<pattern>
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
<option name="ENABLED" value="true" />
</pattern>
</extension>
<method v="2">
<option name="Make" enabled="true" />
</method>
</configuration>
</component>

View File

@@ -1,60 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<artifactId>autoprotomap-deployment</artifactId>
<name>Autoprotomap - Deployment</name>
<dependencies>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-arc-deployment</artifactId>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5-internal</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc-deployment</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<artifactId>maven-compiler-plugin</artifactId>
<executions>
<execution>
<id>default-compile</id>
<configuration>
<annotationProcessorPaths>
<path>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-extension-processor</artifactId>
<version>${quarkus.platform.version}</version>
</path>
</annotationProcessorPaths>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@@ -1,78 +0,0 @@
package com.usatiuk.autoprotomap.deployment;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
import io.quarkus.arc.deployment.GeneratedBeanBuildItem;
import io.quarkus.arc.deployment.GeneratedBeanGizmoAdaptor;
import io.quarkus.deployment.annotations.BuildProducer;
import io.quarkus.deployment.annotations.BuildStep;
import io.quarkus.deployment.builditem.ApplicationIndexBuildItem;
import io.quarkus.gizmo.ClassCreator;
import io.quarkus.gizmo.SignatureBuilder;
import jakarta.inject.Singleton;
import org.jboss.jandex.ClassType;
import org.jboss.jandex.Type;
class AutoprotomapProcessor {
@BuildStep
ProtoIndexBuildItem index(ApplicationIndexBuildItem jandex) {
var ret = new ProtoIndexBuildItem();
var annot = jandex.getIndex().getAnnotations(ProtoMirror.class);
for (var a : annot) {
var protoTarget = jandex.getIndex().getClassByName(((ClassType) a.value().value()).name());
// if (!messageImplementors.contains(protoTarget))
// throw new IllegalArgumentException("Expected " + protoTarget + " to be a proto message");
System.out.println("Found: " + a.name().toString() + " at " + protoTarget.name().toString() + " of " + a.target().asClass().name().toString());
ret.protoMsgToObj.put(protoTarget, a.target().asClass());
}
return ret;
}
@BuildStep
void generateProtoSerializer(ApplicationIndexBuildItem jandex,
ProtoIndexBuildItem protoIndex,
BuildProducer<GeneratedBeanBuildItem> generatedClasses) {
try {
for (var o : protoIndex.protoMsgToObj.entrySet()) {
System.out.println("Generating " + o.getKey().toString() + " -> " + o.getValue().toString());
var gizmoAdapter = new GeneratedBeanGizmoAdaptor(generatedClasses);
var msgType = io.quarkus.gizmo.Type.classType(o.getKey().name());
var objType = io.quarkus.gizmo.Type.classType(o.getValue().name());
var type = io.quarkus.gizmo.Type.ParameterizedType.parameterizedType(
io.quarkus.gizmo.Type.classType(ProtoSerializer.class),
msgType, objType);
var msgJType = Type.create(o.getKey().name(), Type.Kind.CLASS);
var objJType = Type.create(o.getValue().name(), Type.Kind.CLASS);
try (ClassCreator classCreator = ClassCreator.builder()
.className("com.usatiuk.autoprotomap.generated.for" + o.getKey().simpleName())
.signature(SignatureBuilder.forClass().addInterface(type))
.classOutput(gizmoAdapter)
.setFinal(true)
.build()) {
classCreator.addAnnotation(Singleton.class);
var generator = new ProtoSerializerGenerator(
jandex.getIndex(),
protoIndex,
classCreator,
msgJType,
objJType
);
generator.generate();
}
}
} catch (Throwable e) {
StringBuilder sb = new StringBuilder();
sb.append(e + "\n");
for (var el : e.getStackTrace()) {
sb.append(el.toString() + "\n");
}
System.out.println(sb);
}
}
}

View File

@@ -1,18 +0,0 @@
package com.usatiuk.autoprotomap.deployment;
public class Constants {
public static final String FIELD_PREFIX = "_";
public static String capitalize(String str) {
return str.substring(0, 1).toUpperCase() + str.substring(1);
}
public static String stripPrefix(String str, String prefix) {
if (str.startsWith(prefix)) {
return str.substring(prefix.length());
}
return str;
}
}

View File

@@ -1,6 +0,0 @@
package com.usatiuk.autoprotomap.deployment;
@FunctionalInterface
public interface Effect {
void apply();
}

View File

@@ -1,10 +0,0 @@
package com.usatiuk.autoprotomap.deployment;
import io.quarkus.builder.item.SimpleBuildItem;
import org.apache.commons.collections4.BidiMap;
import org.apache.commons.collections4.bidimap.DualHashBidiMap;
import org.jboss.jandex.ClassInfo;
public final class ProtoIndexBuildItem extends SimpleBuildItem {
BidiMap<ClassInfo, ClassInfo> protoMsgToObj = new DualHashBidiMap<>();
}

View File

@@ -1,342 +0,0 @@
package com.usatiuk.autoprotomap.deployment;
import com.google.protobuf.ByteString;
import com.google.protobuf.Message;
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
import io.quarkus.gizmo.*;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import org.jboss.jandex.*;
import org.jboss.jandex.Type;
import org.objectweb.asm.Opcodes;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Objects;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.IntConsumer;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static com.usatiuk.autoprotomap.deployment.Constants.*;
public class ProtoSerializerGenerator {
private final Index index;
private final ProtoIndexBuildItem protoIndex;
private final ClassCreator classCreator;
private final HashSet<Pair<ClassInfo, ClassInfo>> externalSerializers = new HashSet<>();
private final Type topMessageType;
private final Type topObjectType;
public ProtoSerializerGenerator(Index index, ProtoIndexBuildItem protoIndex, ClassCreator classCreator, Type topMessageType, Type topObjectType) {
this.index = index;
this.protoIndex = protoIndex;
this.classCreator = classCreator;
this.topMessageType = topMessageType;
this.topObjectType = topObjectType;
}
private FieldDescriptor getOutsideSerializer(ClassInfo messageClass, ClassInfo objectClass) {
var name = messageClass.name().withoutPackagePrefix() + objectClass.name().withoutPackagePrefix() + "serializer";
var msgType = io.quarkus.gizmo.Type.classType(messageClass.name());
var objType = io.quarkus.gizmo.Type.classType(objectClass.name());
var type = io.quarkus.gizmo.Type.ParameterizedType.parameterizedType(
io.quarkus.gizmo.Type.classType(ProtoSerializer.class),
msgType, objType);
var sig = SignatureBuilder.forField().setType(type).build();
var fd = FieldDescriptor.of(classCreator.getClassName(), name, ProtoSerializer.class);
if (externalSerializers.add(Pair.of(messageClass, objectClass))) {
var fc = classCreator.getFieldCreator(fd);
fc.addAnnotation(Inject.class);
fc.setSignature(sig);
fc.setModifiers(Opcodes.ACC_PUBLIC);
}
return fd;
}
private void traverseHierarchy(Index index, ClassInfo klass, Consumer<ClassInfo> visitor) {
var cur = klass;
while (true) {
visitor.accept(cur);
var next = cur.superClassType().name();
if (next.equals(DotName.OBJECT_NAME) || next.equals(DotName.RECORD_NAME)) break;
cur = index.getClassByName(next);
}
}
private ArrayList<FieldInfo> findAllFields(Index index, ClassInfo klass) {
ArrayList<FieldInfo> ret = new ArrayList<>();
traverseHierarchy(index, klass, cur -> {
ret.addAll(cur.fields());
});
return ret;
}
private void generateBuilderUse(BytecodeCreator bytecodeCreator,
ResultHandle builder,
Type messageType, Type objectType,
ResultHandle object) {
var builderType = Type.create(DotName.createComponentized(messageType.name(), "Builder", true), Type.Kind.CLASS);
var objectClass = index.getClassByName(objectType.name().toString());
Function<String, String> getterGetter = objectClass.isRecord()
? Function.identity()
: s -> "get" + capitalize(stripPrefix(s, FIELD_PREFIX));
for (var f : findAllFields(index, objectClass)) {
var consideredFieldName = stripPrefix(f.name(), FIELD_PREFIX);
Supplier<ResultHandle> get = () -> {
if ((f.flags() & Opcodes.ACC_PUBLIC) != 0)
return bytecodeCreator.readInstanceField(f, object);
else {
var fieldGetter = getterGetter.apply(f.name());
return bytecodeCreator.invokeVirtualMethod(
MethodDescriptor.ofMethod(objectType.toString(), fieldGetter, f.type().name().toString()), object);
}
};
Effect doSimpleCopy = () -> {
var setter = MethodDescriptor.ofMethod(builderType.name().toString(), "set" + capitalize(consideredFieldName),
builderType.name().toString(), f.type().toString());
var val = get.get();
bytecodeCreator.invokeVirtualMethod(setter, builder, val);
};
switch (f.type().kind()) {
case CLASS -> {
if (f.type().equals(Type.create(String.class)) || f.type().equals(Type.create(ByteString.class))) {
doSimpleCopy.apply();
} else {
var builderGetter = "get" + capitalize(f.name()) + "Builder";
var protoType = protoIndex.protoMsgToObj.inverseBidiMap().get(index.getClassByName(f.type().name()));
var nestedBuilderType = Type.create(DotName.createComponentized(protoType.name(), "Builder", true), Type.Kind.CLASS);
var nestedBuilder = bytecodeCreator.invokeVirtualMethod(
MethodDescriptor.ofMethod(builderType.toString(), builderGetter, nestedBuilderType.name().toString()), builder);
var val = get.get();
generateBuilderUse(bytecodeCreator, nestedBuilder, Type.create(protoType.name(), Type.Kind.CLASS), f.type(), val);
}
}
case PRIMITIVE -> {
doSimpleCopy.apply();
}
case WILDCARD_TYPE -> throw new UnsupportedOperationException("Wildcards not supported yet");
case PARAMETERIZED_TYPE ->
throw new UnsupportedOperationException("Parametrized types not supported yet");
case ARRAY -> throw new UnsupportedOperationException("Arrays not supported yet");
default -> throw new IllegalStateException("Unexpected type: " + f.type());
}
}
}
private ResultHandle generateConstructorUse(
BytecodeCreator bytecodeCreator,
ClassCreator classCreator,
Type messageType, Type objectType,
ResultHandle message
) {
var constructor = findAllArgsConstructor(index, index.getClassByName(objectType.name()));
if (constructor == null) {
throw new IllegalStateException("No constructor found for type: " + objectType.name());
}
var argMap = new ResultHandle[constructor.parametersCount()];
for (int i = 0; i < argMap.length; i++) {
var type = constructor.parameterType(i);
var strippedName = stripPrefix(constructor.parameterName(i), FIELD_PREFIX);
IntConsumer doSimpleCopy = (arg) -> {
var call = MethodDescriptor.ofMethod(messageType.name().toString(), "get" + capitalize(strippedName),
type.name().toString());
argMap[arg] = bytecodeCreator.invokeVirtualMethod(call, message);
};
switch (type.kind()) {
case CLASS -> {
if (type.equals(Type.create(String.class)) || type.equals(Type.create(ByteString.class))) {
doSimpleCopy.accept(i);
} else {
var nestedProtoType = protoIndex.protoMsgToObj.inverseBidiMap().get(index.getClassByName(type.name()));
var call = MethodDescriptor.ofMethod(messageType.name().toString(), "get" + capitalize(strippedName),
nestedProtoType.name().toString());
var nested = bytecodeCreator.invokeVirtualMethod(call, message);
argMap[i] = generateConstructorUse(bytecodeCreator, classCreator, Type.create(nestedProtoType.name(), Type.Kind.CLASS), type, nested);
}
}
case PRIMITIVE -> {
doSimpleCopy.accept(i);
}
case WILDCARD_TYPE -> throw new UnsupportedOperationException("Wildcards not supported yet");
case PARAMETERIZED_TYPE ->
throw new UnsupportedOperationException("Parametrized types not supported yet");
case ARRAY -> throw new UnsupportedOperationException("Arrays not supported yet");
default -> throw new IllegalStateException("Unexpected type: " + type);
}
}
return bytecodeCreator.newInstance(constructor, argMap);
}
private MethodInfo findAllArgsConstructor(Index index, ClassInfo klass) {
ArrayList<FieldInfo> fields = findAllFields(index, klass);
var fieldCount = fields.size();
var fieldNames = fields.stream().map(f -> stripPrefix(f.name(), FIELD_PREFIX)).sorted().toList();
var fieldNameToType = fields.stream().collect(Collectors.toMap(f -> stripPrefix(f.name(), FIELD_PREFIX), FieldInfo::type));
for (var m : klass.constructors()) {
if (m.parametersCount() != fieldCount) continue;
var parameterNames = m.parameters().stream().map(n -> stripPrefix(n.name(), FIELD_PREFIX)).sorted().toList();
if (!Objects.equals(fieldNames, parameterNames)) continue;
for (var p : m.parameters()) {
if (!Objects.equals(fieldNameToType.get(stripPrefix(p.name(), FIELD_PREFIX)), p.type())) continue;
}
return m;
}
return null;
}
public void generateAbstract() {
var kids = Stream.concat(index.getAllKnownSubclasses(topObjectType.name()).stream(),
index.getAllKnownImplementors(topObjectType.name()).stream())
.filter(k -> !k.isAbstract() && !k.isInterface()).toList();
try (MethodCreator method = classCreator.getMethodCreator("serialize",
Message.class, Object.class)) {
method.setModifiers(Opcodes.ACC_PUBLIC);
var builderType = Type.create(DotName.createComponentized(topMessageType.name(), "Builder", true), Type.Kind.CLASS);
var builder = method.invokeStaticMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(), "newBuilder", builderType.name().toString()));
var arg = method.getMethodParam(0);
for (var nestedObjClass : kids) {
System.out.println("Generating " + nestedObjClass.name() + " serializer for " + topObjectType.name());
var nestedObjType = Type.create(nestedObjClass.name(), Type.Kind.CLASS);
var nestedMessageClass = protoIndex.protoMsgToObj.inverseBidiMap().get(nestedObjClass);
boolean doExternalCall = false;
if (nestedMessageClass == null) {
var msgInfo = index.getClassByName(topMessageType.name());
nestedMessageClass = index.getClassByName(msgInfo.method("get" + capitalize(nestedObjType.name().withoutPackagePrefix())).returnType().name());
doExternalCall = true;
}
var nestedMessageType = Type.create(nestedMessageClass.name(), Type.Kind.CLASS);
var statement = method.ifTrue(method.instanceOf(arg, nestedObjClass.name().toString()));
try (var branch = statement.trueBranch()) {
if (doExternalCall) {
var externalSerializer = getOutsideSerializer(nestedMessageClass, nestedObjClass);
var serializerLoaded = branch.readInstanceField(externalSerializer, branch.getThis());
var serialized = branch.invokeInterfaceMethod(
MethodDescriptor.ofMethod(ProtoSerializer.class,
"serialize", Message.class, Object.class),
serializerLoaded, arg);
branch.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(),
"set" + capitalize(nestedObjType.name().withoutPackagePrefix()),
builderType.name().toString(), nestedMessageType.name().toString()), builder, serialized);
} else {
var nestedBuilderType = Type.create(DotName.createComponentized(nestedMessageType.name(), "Builder", true), Type.Kind.CLASS);
var nestedBuilder = branch.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(),
"get" + capitalize(nestedObjType.name().withoutPackagePrefix()) + "Builder",
nestedBuilderType.name().toString()), builder);
generateBuilderUse(branch, nestedBuilder, nestedMessageType, nestedObjType, arg);
}
var result = branch.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(), "build", topMessageType.name().toString()), builder);
branch.returnValue(result);
}
}
method.throwException(IllegalArgumentException.class, "Unknown object type");
}
try (MethodCreator method = classCreator.getMethodCreator("deserialize",
Object.class, Message.class)) {
method.setModifiers(Opcodes.ACC_PUBLIC);
var arg = method.getMethodParam(0);
for (var nestedObjClass : kids) {
System.out.println("Generating " + nestedObjClass.name() + " deserializer for " + topObjectType.name());
var nestedObjType = Type.create(nestedObjClass.name(), Type.Kind.CLASS);
var nestedMessageClass = protoIndex.protoMsgToObj.inverseBidiMap().get(nestedObjClass);
boolean doExternalCall = false;
if (nestedMessageClass == null) {
var msgInfo = index.getClassByName(topMessageType.name());
nestedMessageClass = index.getClassByName(msgInfo.method("get" + capitalize(nestedObjType.name().withoutPackagePrefix())).returnType().name());
doExternalCall = true;
}
var nestedMessageType = Type.create(nestedMessageClass.name(), Type.Kind.CLASS);
var typeCheck = method.invokeVirtualMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(),
"has" + capitalize(nestedObjType.name().withoutPackagePrefix()), boolean.class), arg);
var statement = method.ifTrue(typeCheck);
try (var branch = statement.trueBranch()) {
var nestedMessage = branch.invokeVirtualMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(),
"get" + capitalize(nestedObjType.name().withoutPackagePrefix()), nestedMessageType.name().toString()), arg);
if (doExternalCall) {
var externalSerializer = getOutsideSerializer(nestedMessageClass, nestedObjClass);
var serializerLoaded = branch.readInstanceField(externalSerializer, branch.getThis());
branch.returnValue(branch.invokeInterfaceMethod(
MethodDescriptor.ofMethod(ProtoSerializer.class,
"deserialize", Object.class, Message.class),
serializerLoaded, nestedMessage));
} else {
branch.returnValue(generateConstructorUse(branch, classCreator, nestedMessageType, nestedObjType, nestedMessage));
}
}
}
method.throwException(IllegalArgumentException.class, "Unknown object type");
}
}
public void generate() {
var objInfo = index.getClassByName(topObjectType.name());
if (objInfo.isAbstract() || objInfo.isInterface()) {
generateAbstract();
return;
}
try (MethodCreator method = classCreator.getMethodCreator("serialize",
Message.class, Object.class)) {
method.setModifiers(Opcodes.ACC_PUBLIC);
var builderType = Type.create(DotName.createComponentized(topMessageType.name(), "Builder", true), Type.Kind.CLASS);
var builder = method.invokeStaticMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(), "newBuilder", builderType.name().toString()));
var arg = method.getMethodParam(0);
generateBuilderUse(method, builder, topMessageType, topObjectType, arg);
var result = method.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(), "build", topMessageType.name().toString()), builder);
method.returnValue(result);
}
try (MethodCreator method = classCreator.getMethodCreator("deserialize",
Object.class, Message.class)) {
method.setModifiers(Opcodes.ACC_PUBLIC);
var arg = method.getMethodParam(0);
method.returnValue(generateConstructorUse(method, classCreator, topMessageType, topObjectType, arg));
}
}
}

View File

@@ -1,22 +0,0 @@
package com.usatiuk.autoprotomap.test;
import io.quarkus.test.QuarkusDevModeTest;
import org.jboss.shrinkwrap.api.ShrinkWrap;
import org.jboss.shrinkwrap.api.spec.JavaArchive;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.RegisterExtension;
public class AutoprotomapDevModeTest {
// Start hot reload (DevMode) test with your extension loaded
@RegisterExtension
static final QuarkusDevModeTest devModeTest = new QuarkusDevModeTest()
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class));
@Test
public void writeYourOwnDevModeTest() {
// Write your dev mode tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-hot-reload for more information
Assertions.assertTrue(true, "Add dev mode assertions to " + getClass().getName());
}
}

View File

@@ -1,22 +0,0 @@
package com.usatiuk.autoprotomap.test;
import io.quarkus.test.QuarkusUnitTest;
import org.jboss.shrinkwrap.api.ShrinkWrap;
import org.jboss.shrinkwrap.api.spec.JavaArchive;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.RegisterExtension;
public class AutoprotomapTest {
// Start unit test with your extension loaded
@RegisterExtension
static final QuarkusUnitTest unitTest = new QuarkusUnitTest()
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class));
@Test
public void writeYourOwnUnitTest() {
// Write your unit tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-extensions for more information
Assertions.assertTrue(true, "Add some assertions to " + getClass().getName());
}
}

View File

@@ -1,107 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<artifactId>autoprotomap-integration-tests</artifactId>
<name>Autoprotomap - Integration Tests</name>
<properties>
<skipITs>true</skipITs>
</properties>
<dependencies>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-deployment</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>build</goal>
<goal>generate-code</goal>
<goal>generate-code-tests</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>integration-test</goal>
<goal>verify</goal>
</goals>
</execution>
</executions>
<configuration>
<systemPropertyVariables>
<native.image.path>${project.build.directory}/${project.build.finalName}-runner
</native.image.path>
<java.util.logging.manager>org.jboss.logmanager.LogManager</java.util.logging.manager>
<maven.home>${maven.home}</maven.home>
</systemPropertyVariables>
</configuration>
</plugin>
</plugins>
</build>
<profiles>
<profile>
<id>native</id>
<activation>
<property>
<name>native</name>
</property>
</activation>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<skipTests>${native.surefire.skip}</skipTests>
</configuration>
</plugin>
</plugins>
</build>
<properties>
<skipITs>false</skipITs>
<quarkus.native.enabled>true</quarkus.native.enabled>
</properties>
</profile>
</profiles>
</project>

View File

@@ -1,7 +0,0 @@
package com.usatiuk.autoprotomap.it;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
@ProtoMirror(AbstractProto.class)
public abstract class AbstractObject {
}

View File

@@ -1,10 +0,0 @@
package com.usatiuk.autoprotomap.it;
import lombok.AllArgsConstructor;
import lombok.Getter;
@AllArgsConstructor
@Getter
public class CustomObject extends AbstractObject {
public int testNum = 0;
}

View File

@@ -1,17 +0,0 @@
package com.usatiuk.autoprotomap.it;
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
import jakarta.inject.Singleton;
@Singleton
public class CustomObjectSerializer implements ProtoSerializer<CustomObjectProto, CustomObject> {
@Override
public CustomObject deserialize(CustomObjectProto message) {
return new CustomObject(2);
}
@Override
public CustomObjectProto serialize(CustomObject object) {
return CustomObjectProto.newBuilder().setTest(1).build();
}
}

View File

@@ -1,8 +0,0 @@
package com.usatiuk.autoprotomap.it;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
@ProtoMirror(InterfaceObjectProto.class)
public interface InterfaceObject {
String key();
}

View File

@@ -1,15 +0,0 @@
package com.usatiuk.autoprotomap.it;
import com.google.protobuf.ByteString;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
import lombok.AllArgsConstructor;
import lombok.Getter;
@ProtoMirror(NestedObjectProto.class)
@AllArgsConstructor
@Getter
public class NestedObject extends AbstractObject {
public SimpleObject object;
public String _nestedName;
public ByteString _nestedSomeBytes;
}

View File

@@ -1,7 +0,0 @@
package com.usatiuk.autoprotomap.it;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
@ProtoMirror(RecordObjectProto.class)
public record RecordObject(String key) implements InterfaceObject {
}

View File

@@ -1,7 +0,0 @@
package com.usatiuk.autoprotomap.it;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
@ProtoMirror(RecordObject2Proto.class)
public record RecordObject2(String key, int value) implements InterfaceObject {
}

View File

@@ -1,15 +0,0 @@
package com.usatiuk.autoprotomap.it;
import com.google.protobuf.ByteString;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
import lombok.AllArgsConstructor;
import lombok.Getter;
@ProtoMirror(SimpleObjectProto.class)
@AllArgsConstructor
@Getter
public class SimpleObject extends AbstractObject {
public int numfield = 0;
private String name;
public ByteString someBytes;
}

View File

@@ -1,47 +0,0 @@
syntax = "proto3";
option java_multiple_files = true;
option java_package = "com.usatiuk.autoprotomap.it";
option java_outer_classname = "TestProto";
package autoprotomap.test;
message SimpleObjectProto {
int32 numfield = 1;
string name = 2;
bytes someBytes = 3;
}
message NestedObjectProto {
SimpleObjectProto object = 1;
string nestedName = 2;
bytes nestedSomeBytes = 3;
}
message CustomObjectProto {
int64 test = 1;
}
message AbstractProto {
oneof obj {
NestedObjectProto nestedObject = 1;
SimpleObjectProto simpleObject = 2;
CustomObjectProto customObject = 3;
}
}
message RecordObjectProto {
string key = 1;
}
message RecordObject2Proto {
string key = 1;
int32 value = 2;
}
message InterfaceObjectProto {
oneof obj {
RecordObjectProto recordObject = 1;
RecordObject2Proto recordObject2 = 2;
}
}

View File

@@ -1,7 +0,0 @@
package com.usatiuk.autoprotomap.it;
import io.quarkus.test.junit.QuarkusIntegrationTest;
@QuarkusIntegrationTest
public class AutoprotomapResourceIT extends AutoprotomapResourceTest {
}

View File

@@ -1,113 +0,0 @@
package com.usatiuk.autoprotomap.it;
import com.google.protobuf.ByteString;
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
import io.quarkus.test.junit.QuarkusTest;
import jakarta.inject.Inject;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
@QuarkusTest
public class AutoprotomapResourceTest {
@Inject
ProtoSerializer<SimpleObjectProto, SimpleObject> simpleProtoSerializer;
@Inject
ProtoSerializer<NestedObjectProto, NestedObject> nestedProtoSerializer;
@Inject
ProtoSerializer<AbstractProto, AbstractObject> abstractProtoSerializer;
@Inject
ProtoSerializer<InterfaceObjectProto, InterfaceObject> interfaceProtoSerializer;
@Test
public void testSimple() {
var ret = simpleProtoSerializer.serialize(new SimpleObject(1234, "simple test", ByteString.copyFrom(new byte[]{1, 2, 3})));
Assertions.assertEquals(1234, ret.getNumfield());
Assertions.assertEquals("simple test", ret.getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getSomeBytes());
var des = simpleProtoSerializer.deserialize(ret);
Assertions.assertEquals(1234, des.getNumfield());
Assertions.assertEquals("simple test", des.getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getSomeBytes());
}
@Test
public void testNested() {
var ret = nestedProtoSerializer.serialize(
new NestedObject(
new SimpleObject(333, "nested so", ByteString.copyFrom(new byte[]{1, 2, 3})),
"nested obj", ByteString.copyFrom(new byte[]{4, 5, 6})));
Assertions.assertEquals(333, ret.getObject().getNumfield());
Assertions.assertEquals("nested so", ret.getObject().getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getObject().getSomeBytes());
Assertions.assertEquals("nested obj", ret.getNestedName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), ret.getNestedSomeBytes());
var des = nestedProtoSerializer.deserialize(ret);
Assertions.assertEquals(333, des.object.numfield);
Assertions.assertEquals(333, des.getObject().getNumfield());
Assertions.assertEquals("nested so", des.getObject().getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getObject().getSomeBytes());
Assertions.assertEquals("nested obj", des.get_nestedName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), des.get_nestedSomeBytes());
}
@Test
public void testAbstractSimple() {
var ret = abstractProtoSerializer.serialize(new SimpleObject(1234, "simple test", ByteString.copyFrom(new byte[]{1, 2, 3})));
Assertions.assertEquals(1234, ret.getSimpleObject().getNumfield());
Assertions.assertEquals("simple test", ret.getSimpleObject().getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getSimpleObject().getSomeBytes());
var des = (SimpleObject) abstractProtoSerializer.deserialize(ret);
Assertions.assertEquals(1234, des.getNumfield());
Assertions.assertEquals("simple test", des.getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getSomeBytes());
}
@Test
public void testAbstractCustom() {
var ret = abstractProtoSerializer.serialize(new CustomObject(1234));
Assertions.assertEquals(1, ret.getCustomObject().getTest());
var des = (CustomObject) abstractProtoSerializer.deserialize(ret);
Assertions.assertEquals(2, des.getTestNum());
}
@Test
public void testAbstractNested() {
var ret = abstractProtoSerializer.serialize(
new NestedObject(
new SimpleObject(333, "nested so", ByteString.copyFrom(new byte[]{1, 2, 3})),
"nested obj", ByteString.copyFrom(new byte[]{4, 5, 6})));
Assertions.assertEquals(333, ret.getNestedObject().getObject().getNumfield());
Assertions.assertEquals("nested so", ret.getNestedObject().getObject().getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getNestedObject().getObject().getSomeBytes());
Assertions.assertEquals("nested obj", ret.getNestedObject().getNestedName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), ret.getNestedObject().getNestedSomeBytes());
var des = (NestedObject) abstractProtoSerializer.deserialize(ret);
Assertions.assertEquals(333, des.object.numfield);
Assertions.assertEquals(333, des.getObject().getNumfield());
Assertions.assertEquals("nested so", des.getObject().getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getObject().getSomeBytes());
Assertions.assertEquals("nested obj", des.get_nestedName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), des.get_nestedSomeBytes());
}
@Test
public void testInterface() {
var ret = interfaceProtoSerializer.serialize(new RecordObject("record test"));
Assertions.assertEquals("record test", ret.getRecordObject().getKey());
var des = (RecordObject) interfaceProtoSerializer.deserialize(ret);
Assertions.assertEquals("record test", des.key());
var ret2 = interfaceProtoSerializer.serialize(new RecordObject2("record test 2", 1234));
Assertions.assertEquals("record test 2", ret2.getRecordObject2().getKey());
Assertions.assertEquals(1234, ret2.getRecordObject2().getValue());
var des2 = (RecordObject2) interfaceProtoSerializer.deserialize(ret2);
Assertions.assertEquals("record test 2", des2.key());
Assertions.assertEquals(1234, des2.value());
}
}

View File

@@ -1,24 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-parent</artifactId>
<version>1.0-SNAPSHOT</version>
<packaging>pom</packaging>
<name>Autoprotomap - Parent</name>
<modules>
<module>deployment</module>
<module>runtime</module>
<module>integration-tests</module>
</modules>
</project>

View File

@@ -1,63 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<artifactId>autoprotomap</artifactId>
<name>Autoprotomap - Runtime</name>
<dependencies>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-arc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-extension-maven-plugin</artifactId>
<version>${quarkus.platform.version}</version>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>extension-descriptor</goal>
</goals>
<configuration>
<deployment>${project.groupId}:${project.artifactId}-deployment:${project.version}
</deployment>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-compiler-plugin</artifactId>
<executions>
<execution>
<id>default-compile</id>
<configuration>
<annotationProcessorPaths>
<path>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-extension-processor</artifactId>
<version>${quarkus.platform.version}</version>
</path>
</annotationProcessorPaths>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@@ -1,12 +0,0 @@
package com.usatiuk.autoprotomap.runtime;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.CLASS)
@Target(ElementType.TYPE)
public @interface ProtoMirror {
Class<?> value() default Object.class;
}

View File

@@ -1,9 +0,0 @@
name: Autoprotomap
#description: Do something useful.
metadata:
# keywords:
# - autoprotomap
# guide: ... # To create and publish this guide, see https://github.com/quarkiverse/quarkiverse/wiki#documenting-your-extension
# categories:
# - "miscellaneous"
# status: "preview"

127
dhfs-parent/dhfs-fs/pom.xml Normal file
View File

@@ -0,0 +1,127 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>dhfs-fs</artifactId>
<version>1.0-SNAPSHOT</version>
<parent>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<dependencies>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-security</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-arc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-scheduler</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
</dependency>
<dependency>
<groupId>org.jboss.slf4j</groupId>
<artifactId>slf4j-jboss-logmanager</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
</dependency>
<dependency>
<groupId>org.pcollections</groupId>
<artifactId>pcollections</artifactId>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>sync-base</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<forkCount>1C</forkCount>
<reuseForks>false</reuseForks>
<parallel>classes</parallel>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
false
</junit.jupiter.execution.parallel.enabled>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<configuration>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
true
</junit.jupiter.execution.parallel.enabled>
<junit.jupiter.execution.parallel.mode.default>
concurrent
</junit.jupiter.execution.parallel.mode.default>
<junit.jupiter.execution.parallel.config.dynamic.factor>
0.5
</junit.jupiter.execution.parallel.config.dynamic.factor>
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>${quarkus.platform.group-id}</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.platform.version}</version>
<extensions>true</extensions>
<executions>
<execution>
<id>quarkus-plugin</id>
<goals>
<goal>generate-code</goal>
<goal>generate-code-tests</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@@ -0,0 +1,18 @@
package com.usatiuk.dhfsfs.objects;
import com.google.protobuf.ByteString;
import com.usatiuk.dhfs.remoteobj.JDataRemote;
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
import com.usatiuk.objects.JObjectKey;
/**
* ChunkData is a data structure that represents an immutable binary blob
* @param key unique key
* @param data binary data
*/
public record ChunkData(JObjectKey key, ByteString data) implements JDataRemote, JDataRemoteDto {
@Override
public int estimateSize() {
return data.size();
}
}

View File

@@ -1,14 +1,23 @@
package com.usatiuk.dhfs.files.objects;
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.JDataRemote;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.dhfs.jmap.JMapHolder;
import com.usatiuk.dhfs.jmap.JMapLongKey;
import com.usatiuk.dhfs.repository.JDataRemoteDto;
import com.usatiuk.dhfs.remoteobj.JDataRemote;
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
import com.usatiuk.objects.JObjectKey;
import java.util.Collection;
import java.util.Set;
/**
* File is a data structure that represents a file in the file system
*
* @param key unique key
* @param mode file mode
* @param cTime inode modification time
* @param mTime modification time
* @param symlink true if the file is a symlink, false otherwise
*/
public record File(JObjectKey key, long mode, long cTime, long mTime,
boolean symlink
) implements JDataRemote, JMapHolder<JMapLongKey> {
@@ -28,6 +37,14 @@ public record File(JObjectKey key, long mode, long cTime, long mTime,
return new File(key, mode, cTime, mTime, symlink);
}
public File withCurrentMTime() {
return new File(key, mode, cTime, System.currentTimeMillis(), symlink);
}
public File withCurrentCTime() {
return new File(key, mode, System.currentTimeMillis(), mTime, symlink);
}
@Override
public Collection<JObjectKey> collectRefsTo() {
return Set.of();

View File

@@ -0,0 +1,20 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.remoteobj.JDataRemote;
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
import com.usatiuk.objects.JObjectKey;
import org.apache.commons.lang3.tuple.Pair;
import java.util.List;
/**
* FileDto is a data transfer object that contains a file and its chunks.
* @param file the file
* @param chunks the list of chunks, each represented as a pair of a long and a JObjectKey
*/
public record FileDto(File file, List<Pair<Long, JObjectKey>> chunks) implements JDataRemoteDto {
@Override
public Class<? extends JDataRemote> objClass() {
return File.class;
}
}

View File

@@ -1,10 +1,13 @@
package com.usatiuk.dhfs.files.objects;
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.repository.syncmap.DtoMapper;
import com.usatiuk.dhfs.syncmap.DtoMapper;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
/**
* Maps a {@link File} object to a {@link FileDto} object and vice versa.
*/
@ApplicationScoped
public class FileDtoMapper implements DtoMapper<File, FileDto> {
@Inject

View File

@@ -1,8 +1,8 @@
package com.usatiuk.dhfs.files.objects;
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.jmap.JMapLongKey;
import com.usatiuk.objects.JObjectKey;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
@@ -10,11 +10,20 @@ import org.apache.commons.lang3.tuple.Pair;
import java.util.ArrayList;
import java.util.List;
/**
* Helper class for working with files.
*/
@ApplicationScoped
public class FileHelper {
@Inject
JMapHelper jMapHelper;
/**
* Get the chunks of a file.
* Transaction is expected to be already started.
* @param file the file to get chunks from
* @return a list of pairs of chunk offset and chunk key
*/
public List<Pair<Long, JObjectKey>> getChunks(File file) {
ArrayList<Pair<Long, JObjectKey>> chunks = new ArrayList<>();
try (var it = jMapHelper.getIterator(file)) {
@@ -26,6 +35,13 @@ public class FileHelper {
return List.copyOf(chunks);
}
/**
* Replace the chunks of a file.
* All previous chunks will be deleted.
* Transaction is expected to be already started.
* @param file the file to replace chunks in
* @param chunks the list of pairs of chunk offset and chunk key
*/
public void replaceChunks(File file, List<Pair<Long, JObjectKey>> chunks) {
jMapHelper.deleteAll(file);

View File

@@ -1,20 +1,14 @@
package com.usatiuk.dhfs.files.objects;
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.PeerId;
import com.usatiuk.dhfs.RemoteObjectDataWrapper;
import com.usatiuk.dhfs.RemoteObjectMeta;
import com.usatiuk.dhfs.RemoteTransaction;
import com.usatiuk.dhfs.files.service.DhfsFileService;
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.repository.ObjSyncHandler;
import com.usatiuk.dhfs.repository.PersistentPeerDataService;
import com.usatiuk.dhfs.repository.SyncHelper;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.transaction.LockingStrategy;
import com.usatiuk.objects.transaction.Transaction;
import com.usatiuk.dhfs.peersync.PeerId;
import com.usatiuk.dhfs.peersync.PersistentPeerDataService;
import com.usatiuk.dhfs.remoteobj.*;
import com.usatiuk.dhfsfs.service.DhfsFileService;
import com.usatiuk.kleppmanntree.AlreadyExistsException;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.transaction.Transaction;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.quarkus.logging.Log;
@@ -29,6 +23,9 @@ import javax.annotation.Nullable;
import java.util.List;
import java.util.Objects;
/**
* Handles synchronization of file objects.
*/
@ApplicationScoped
public class FileSyncHandler implements ObjSyncHandler<File, FileDto> {
@Inject
@@ -47,14 +44,18 @@ public class FileSyncHandler implements ObjSyncHandler<File, FileDto> {
@Inject
DhfsFileService fileService;
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"));
}
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC);
private JKleppmannTreeManager.JKleppmannTree getTree() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs")).orElseThrow();
}
/**
* Resolve conflict between two file versions, update the file in storage and create a conflict file.
*
* @param from the peer that sent the update
* @param key the key of the file
* @param receivedChangelog the changelog of the received file
* @param receivedData the received file data
*/
private void resolveConflict(PeerId from, JObjectKey key, PMap<PeerId, Long> receivedChangelog,
@Nullable FileDto receivedData) {
var oursCurMeta = curTx.get(RemoteObjectMeta.class, key).orElse(null);
@@ -136,12 +137,12 @@ public class FileSyncHandler implements ObjSyncHandler<File, FileDto> {
do {
try {
getTreeW().move(parent.getRight(),
getTree().move(parent.getRight(),
new JKleppmannTreeNodeMetaFile(
parent.getLeft() + ".fconflict." + persistentPeerDataService.getSelfUuid() + "." + otherHostname.toString() + "." + i,
newFile.key()
),
getTreeW().getNewNodeId()
getTree().getNewNodeId()
);
} catch (AlreadyExistsException aex) {
i++;

View File

@@ -0,0 +1,22 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
import com.usatiuk.objects.JObjectKey;
import java.util.Collection;
import java.util.List;
/**
* JKleppmannTreeNodeMetaDirectory is a record that represents a directory in the JKleppmann tree.
* @param name the name of the directory
*/
public record JKleppmannTreeNodeMetaDirectory(String name) implements JKleppmannTreeNodeMeta {
public JKleppmannTreeNodeMeta withName(String name) {
return new JKleppmannTreeNodeMetaDirectory(name);
}
@Override
public Collection<JObjectKey> collectRefsTo() {
return List.of();
}
}

View File

@@ -0,0 +1,24 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
import com.usatiuk.objects.JObjectKey;
import java.util.Collection;
import java.util.List;
/**
* JKleppmannTreeNodeMetaFile is a record that represents a file in the JKleppmann tree.
* @param name the name of the file
* @param fileIno a reference to the `File` object
*/
public record JKleppmannTreeNodeMetaFile(String name, JObjectKey fileIno) implements JKleppmannTreeNodeMeta {
@Override
public JKleppmannTreeNodeMeta withName(String name) {
return new JKleppmannTreeNodeMetaFile(name, fileIno);
}
@Override
public Collection<JObjectKey> collectRefsTo() {
return List.of(fileIno);
}
}

View File

@@ -1,27 +1,28 @@
package com.usatiuk.dhfs.files.service;
package com.usatiuk.dhfsfs.service;
import com.google.protobuf.ByteString;
import com.google.protobuf.UnsafeByteOperations;
import com.usatiuk.dhfs.JDataRemote;
import com.usatiuk.dhfs.RemoteObjectMeta;
import com.usatiuk.dhfs.RemoteTransaction;
import com.usatiuk.dhfs.files.objects.ChunkData;
import com.usatiuk.dhfs.files.objects.File;
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNode;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeHolder;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMetaDirectory;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile;
import com.usatiuk.dhfs.jmap.JMapEntry;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.jmap.JMapLongKey;
import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace;
import com.usatiuk.dhfs.remoteobj.JDataRemote;
import com.usatiuk.dhfs.remoteobj.RemoteObjectMeta;
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
import com.usatiuk.dhfsfs.objects.ChunkData;
import com.usatiuk.dhfsfs.objects.File;
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaDirectory;
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaFile;
import com.usatiuk.objects.JData;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.IteratorStart;
import com.usatiuk.objects.transaction.LockingStrategy;
import com.usatiuk.objects.stores.ObjectPersistentStore;
import com.usatiuk.objects.transaction.Transaction;
import com.usatiuk.objects.transaction.TransactionManager;
import com.usatiuk.utils.StatusRuntimeExceptionNoStacktrace;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.quarkus.logging.Log;
@@ -33,85 +34,83 @@ import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.util.*;
import java.util.stream.StreamSupport;
/**
* Actual filesystem implementation.
*/
@ApplicationScoped
public class DhfsFileServiceImpl implements DhfsFileService {
public class DhfsFileService {
@ConfigProperty(name = "dhfs.files.target_chunk_alignment", defaultValue = "17")
int targetChunkAlignment;
@ConfigProperty(name = "dhfs.files.max_chunk_size", defaultValue = "524288")
int maxChunkSize;
@ConfigProperty(name = "dhfs.files.allow_recursive_delete")
boolean allowRecursiveDelete;
@Inject
Transaction curTx;
@Inject
RemoteTransaction remoteTx;
@Inject
TransactionManager jObjectTxManager;
@ConfigProperty(name = "dhfs.files.target_chunk_alignment")
int targetChunkAlignment;
@ConfigProperty(name = "dhfs.files.target_chunk_size")
int targetChunkSize;
@ConfigProperty(name = "dhfs.files.use_hash_for_chunks")
boolean useHashForChunks;
@ConfigProperty(name = "dhfs.files.allow_recursive_delete")
boolean allowRecursiveDelete;
@ConfigProperty(name = "dhfs.objects.ref_verification")
boolean refVerification;
@ConfigProperty(name = "dhfs.objects.write_log")
boolean writeLogging;
@Inject
JKleppmannTreeManager jKleppmannTreeManager;
@Inject
JMapHelper jMapHelper;
@Inject
ObjectPersistentStore objectPersistentStore;
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"));
}
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC);
private JKleppmannTreeManager.JKleppmannTree getTree() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), () -> new JKleppmannTreeNodeMetaDirectory(""));
}
/**
* Create a new chunk with the given data and a new unique ID.
*
* @param bytes the data to store in the chunk
* @return the created chunk
*/
private ChunkData createChunk(ByteString bytes) {
var newChunk = new ChunkData(JObjectKey.of(UUID.randomUUID().toString()), bytes);
remoteTx.putData(newChunk);
remoteTx.putDataNew(newChunk);
return newChunk;
}
int targetChunkSize() {
return 1 << targetChunkAlignment;
}
void init(@Observes @Priority(500) StartupEvent event) {
Log.info("Initializing file service");
getTreeW();
getTree();
}
private JKleppmannTreeNode getDirEntryW(String name) {
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
private JKleppmannTreeNode getDirEntry(String name) {
var res = getTree().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
var ret = curTx.get(JKleppmannTreeNode.class, res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
return ret;
}
private JKleppmannTreeNode getDirEntryR(String name) {
var res = getTreeR().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
var ret = curTx.get(JKleppmannTreeNode.class, res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
return ret;
}
private Optional<JKleppmannTreeNode> getDirEntryOpt(String name) {
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
var res = getTree().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
if (res == null) return Optional.empty();
var ret = curTx.get(JKleppmannTreeNode.class, res);
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node);
return ret;
}
@Override
/**
* Get the attributes of a file or directory.
*
* @param uuid the UUID of the file or directory
* @return the attributes of the file or directory
*/
public Optional<GetattrRes> getattr(JObjectKey uuid) {
return jObjectTxManager.executeTx(() -> {
var ref = curTx.get(JData.class, uuid).orElse(null);
@@ -124,7 +123,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
} else {
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
}
} else if (ref instanceof JKleppmannTreeNode) {
} else if (ref instanceof JKleppmannTreeNodeHolder) {
ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY);
} else {
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
@@ -133,13 +132,18 @@ public class DhfsFileServiceImpl implements DhfsFileService {
});
}
@Override
/**
* Try to resolve a path to a file or directory.
*
* @param name the path to resolve
* @return the key of the file or directory, or an empty optional if it does not exist
*/
public Optional<JObjectKey> open(String name) {
return jObjectTxManager.executeTx(() -> {
try {
var ret = getDirEntryR(name);
var ret = getDirEntry(name);
return switch (ret.meta()) {
case JKleppmannTreeNodeMetaFile f -> Optional.of(f.getFileIno());
case JKleppmannTreeNodeMetaFile f -> Optional.of(f.fileIno());
case JKleppmannTreeNodeMetaDirectory f -> Optional.of(ret.key());
default -> Optional.empty();
};
@@ -157,11 +161,17 @@ public class DhfsFileServiceImpl implements DhfsFileService {
throw new StatusRuntimeExceptionNoStacktrace(Status.INVALID_ARGUMENT.withDescription("Not a directory: " + entry.key()));
}
@Override
/**
* Create a new file with the given name and mode.
*
* @param name the name of the file
* @param mode the mode of the file
* @return the key of the created file
*/
public Optional<JObjectKey> create(String name, long mode) {
return jObjectTxManager.executeTx(() -> {
Path path = Path.of(name);
var parent = getDirEntryW(path.getParent().toString());
var parent = getDirEntry(path.getParent().toString());
ensureDir(parent);
@@ -173,7 +183,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
remoteTx.putData(f);
try {
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
getTree().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTree().getNewNodeId());
} catch (Exception e) {
// fobj.getMeta().removeRef(newNodeId);
throw e;
@@ -182,71 +192,101 @@ public class DhfsFileServiceImpl implements DhfsFileService {
});
}
//FIXME: Slow..
@Override
/**
* Get the parent directory of a file or directory.
*
* @param ino the key of the file or directory
* @return the parent directory
*/
public Pair<String, JObjectKey> inoToParent(JObjectKey ino) {
return jObjectTxManager.executeTx(() -> {
return getTreeW().findParent(w -> {
// FIXME: Slow
return getTree().findParent(w -> {
if (w.meta() instanceof JKleppmannTreeNodeMetaFile f)
return f.getFileIno().equals(ino);
return f.fileIno().equals(ino);
return false;
});
});
}
@Override
/**
* Create a new directory with the given name and mode.
*
* @param name the name of the directory
* @param mode the mode of the directory
*/
public void mkdir(String name, long mode) {
jObjectTxManager.executeTx(() -> {
Path path = Path.of(name);
var parent = getDirEntryW(path.getParent().toString());
var parent = getDirEntry(path.getParent().toString());
ensureDir(parent);
String dname = path.getFileName().toString();
Log.debug("Creating directory " + name);
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaDirectory(dname), getTreeW().getNewNodeId());
// TODO: No modes for directories yet
getTree().move(parent.key(), new JKleppmannTreeNodeMetaDirectory(dname), getTree().getNewNodeId());
});
}
@Override
/**
* Unlink a file or directory.
*
* @param name the name of the file or directory
* @throws DirectoryNotEmptyException if the directory is not empty and recursive delete is not allowed
*/
public void unlink(String name) {
jObjectTxManager.executeTx(() -> {
var node = getDirEntryOpt(name).orElse(null);
if (node == null)
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to unlink: " + name));
if (node.meta() instanceof JKleppmannTreeNodeMetaDirectory f) {
if (!allowRecursiveDelete && !node.children().isEmpty())
throw new DirectoryNotEmptyException();
}
getTreeW().trash(node.meta(), node.key());
getTree().trash(node.meta(), node.key());
});
}
@Override
public Boolean rename(String from, String to) {
/**
* Rename a file or directory.
*
* @param from the old name
* @param to the new name
* @return true if the rename was successful, false otherwise
*/
public boolean rename(String from, String to) {
return jObjectTxManager.executeTx(() -> {
var node = getDirEntryW(from);
var node = getDirEntry(from);
JKleppmannTreeNodeMeta meta = node.meta();
var toPath = Path.of(to);
var toDentry = getDirEntryW(toPath.getParent().toString());
var toDentry = getDirEntry(toPath.getParent().toString());
ensureDir(toDentry);
getTreeW().move(toDentry.key(), meta.withName(toPath.getFileName().toString()), node.key());
getTree().move(toDentry.key(), meta.withName(toPath.getFileName().toString()), node.key());
return true;
});
}
@Override
public Boolean chmod(JObjectKey uuid, long mode) {
/**
* Change the mode of a file or directory.
*
* @param uuid the ID of the file or directory
* @param mode the new mode
* @return true if the mode was changed successfully, false otherwise
*/
public boolean chmod(JObjectKey uuid, long mode) {
return jObjectTxManager.executeTx(() -> {
var dent = curTx.get(JData.class, uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
if (dent instanceof JKleppmannTreeNode) {
if (dent instanceof JKleppmannTreeNodeHolder) {
return true;
} else if (dent instanceof RemoteObjectMeta) {
var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null);
if (remote instanceof File f) {
remoteTx.putData(f.withMode(mode).withMTime(System.currentTimeMillis()));
remoteTx.putData(f.withMode(mode).withCurrentCTime());
return true;
} else {
throw new IllegalArgumentException(uuid + " is not a file");
@@ -257,10 +297,15 @@ public class DhfsFileServiceImpl implements DhfsFileService {
});
}
@Override
/**
* Read the contents of a directory.
*
* @param name the path of the directory
* @return an iterable of the names of the files in the directory
*/
public Iterable<String> readDir(String name) {
return jObjectTxManager.executeTx(() -> {
var found = getDirEntryW(name);
var found = getDirEntry(name);
if (!(found.meta() instanceof JKleppmannTreeNodeMetaDirectory md))
throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
@@ -269,8 +314,15 @@ public class DhfsFileServiceImpl implements DhfsFileService {
});
}
@Override
public Optional<ByteString> read(JObjectKey fileUuid, long offset, int length) {
/**
* Read the contents of a file.
*
* @param fileUuid the ID of the file
* @param offset the offset to start reading from
* @param length the number of bytes to read
* @return the contents of the file as a ByteString
*/
public ByteString read(JObjectKey fileUuid, long offset, int length) {
return jObjectTxManager.executeTx(() -> {
if (length < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
@@ -280,12 +332,12 @@ public class DhfsFileServiceImpl implements DhfsFileService {
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
if (file == null) {
Log.error("File not found when trying to read: " + fileUuid);
return Optional.empty();
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to read: " + fileUuid));
}
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
if (!it.hasNext())
return Optional.of(ByteString.empty());
return ByteString.empty();
// if (it.peekNextKey().key() != offset) {
// Log.warnv("Read over the end of file: {0} {1} {2}, next chunk: {3}", fileUuid, offset, length, it.peekNextKey());
@@ -323,14 +375,20 @@ public class DhfsFileServiceImpl implements DhfsFileService {
chunk = it.next();
}
return Optional.of(buf);
return buf;
} catch (Exception e) {
Log.error("Error reading file: " + fileUuid, e);
return Optional.empty();
throw new StatusRuntimeException(Status.INTERNAL.withDescription("Error reading file: " + fileUuid));
}
});
}
/**
* Get the size of a file.
*
* @param uuid the ID of the file
* @return the size of the file
*/
private ByteString readChunk(JObjectKey uuid) {
var chunkRead = remoteTx.getData(ChunkData.class, uuid).orElse(null);
@@ -342,6 +400,12 @@ public class DhfsFileServiceImpl implements DhfsFileService {
return chunkRead.data();
}
/**
* Get the size of a chunk.
*
* @param uuid the ID of the chunk
* @return the size of the chunk
*/
private int getChunkSize(JObjectKey uuid) {
return readChunk(uuid).size();
}
@@ -350,33 +414,30 @@ public class DhfsFileServiceImpl implements DhfsFileService {
return num & -(1L << n);
}
@Override
public Long write(JObjectKey fileUuid, long offset, ByteString data) {
/**
* Write data to a file.
*
* @param fileUuid the ID of the file
* @param offset the offset to write to
* @param data the data to write
* @return the number of bytes written
*/
public long write(JObjectKey fileUuid, long offset, ByteString data) {
return jObjectTxManager.executeTx(() -> {
if (offset < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
var file = remoteTx.getData(File.class, fileUuid, LockingStrategy.WRITE).orElse(null);
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
if (file == null) {
Log.error("File not found when trying to write: " + fileUuid);
return -1L;
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to write: " + fileUuid));
}
if (writeLogging) {
Log.info("Writing to file: " + file.key() + " size=" + size(fileUuid) + " "
+ offset + " " + data.size());
}
if (size(fileUuid) < offset) {
truncate(fileUuid, offset);
file = remoteTx.getData(File.class, fileUuid).orElse(null);
}
NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
Map<Long, JObjectKey> removedChunks = new HashMap<>();
long realOffset = targetChunkAlignment >= 0 ? alignDown(offset, targetChunkAlignment) : offset;
long writeEnd = offset + data.size();
long start = realOffset;
long existingEnd = 0;
ByteString pendingPrefix = ByteString.empty();
ByteString pendingSuffix = ByteString.empty();
@@ -385,8 +446,8 @@ public class DhfsFileServiceImpl implements DhfsFileService {
var curEntry = it.next();
long curChunkStart = curEntry.getKey().key();
var curChunkId = curEntry.getValue().ref();
long curChunkEnd = curChunkStart + getChunkSize(curChunkId);
long curChunkEnd = it.hasNext() ? it.peekNextKey().key() : curChunkStart + getChunkSize(curChunkId);
existingEnd = curChunkEnd;
if (curChunkEnd <= realOffset) break;
removedChunks.put(curEntry.getKey().key(), curChunkId);
@@ -408,22 +469,34 @@ public class DhfsFileServiceImpl implements DhfsFileService {
}
}
Map<Long, JObjectKey> newChunks = new HashMap<>();
if (existingEnd < offset) {
if (!pendingPrefix.isEmpty()) {
int diff = Math.toIntExact(offset - existingEnd);
pendingPrefix = pendingPrefix.concat(UnsafeByteOperations.unsafeWrap(ByteBuffer.allocateDirect(diff)));
} else {
fillZeros(existingEnd, offset, newChunks);
start = offset;
}
}
ByteString pendingWrites = pendingPrefix.concat(data).concat(pendingSuffix);
int combinedSize = pendingWrites.size();
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
{
int targetChunkSize = 1 << targetChunkAlignment;
int cur = 0;
while (cur < combinedSize) {
int end;
if (targetChunkAlignment < 0)
if (combinedSize - cur < maxChunkSize)
end = combinedSize;
else if (targetChunkAlignment < 0)
end = combinedSize;
else
end = Math.min(cur + targetChunkSize, combinedSize);
end = Math.min(cur + targetChunkSize(), combinedSize);
var thisChunk = pendingWrites.substring(cur, end);
@@ -436,23 +509,29 @@ public class DhfsFileServiceImpl implements DhfsFileService {
}
for (var e : removedChunks.entrySet()) {
Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
}
for (var e : newChunks.entrySet()) {
Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
}
remoteTx.putData(file);
remoteTx.putData(file.withCurrentMTime());
return (long) data.size();
});
}
@Override
public Boolean truncate(JObjectKey fileUuid, long length) {
/**
* Truncate a file to the given length.
*
* @param fileUuid the ID of the file
* @param length the new length of the file
* @return true if the truncate was successful, false otherwise
*/
public boolean truncate(JObjectKey fileUuid, long length) {
return jObjectTxManager.executeTx(() -> {
if (length < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
@@ -476,38 +555,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
if (curSize < length) {
long combinedSize = (length - curSize);
long start = curSize;
// Hack
HashMap<Long, ChunkData> zeroCache = new HashMap<>();
{
long cur = 0;
while (cur < combinedSize) {
long end;
if (targetChunkSize <= 0)
end = combinedSize;
else {
if ((combinedSize - cur) > (targetChunkSize * 1.5)) {
end = cur + targetChunkSize;
} else {
end = combinedSize;
}
}
if (!zeroCache.containsKey(end - cur))
zeroCache.put(end - cur, createChunk(UnsafeByteOperations.unsafeWrap(new byte[Math.toIntExact(end - cur)])));
ChunkData newChunkData = zeroCache.get(end - cur);
newChunks.put(start, newChunkData.key());
start += newChunkData.data().size();
cur = end;
}
}
fillZeros(curSize, length, newChunks);
} else {
// Pair<JMapLongKey, JMapEntry<JMapLongKey>> first;
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
@@ -558,40 +606,98 @@ public class DhfsFileServiceImpl implements DhfsFileService {
// file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis());
for (var e : removedChunks.entrySet()) {
Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
}
for (var e : newChunks.entrySet()) {
Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
}
remoteTx.putData(file);
remoteTx.putData(file.withCurrentMTime());
return true;
});
}
@Override
/**
* Fill the given range with zeroes.
*
* @param fillStart the start of the range
* @param length the end of the range
* @param newChunks the map to store the new chunks in
*/
private void fillZeros(long fillStart, long length, Map<Long, JObjectKey> newChunks) {
long combinedSize = (length - fillStart);
long start = fillStart;
// Hack
HashMap<Long, ChunkData> zeroCache = new HashMap<>();
{
long cur = 0;
while (cur < combinedSize) {
long end;
if (targetChunkSize() <= 0)
end = combinedSize;
else {
if ((combinedSize - cur) > (targetChunkSize() * 1.5)) {
end = cur + targetChunkSize();
} else {
end = combinedSize;
}
}
if (!zeroCache.containsKey(end - cur))
zeroCache.put(end - cur, createChunk(UnsafeByteOperations.unsafeWrap(ByteBuffer.allocateDirect(Math.toIntExact(end - cur)))));
ChunkData newChunkData = zeroCache.get(end - cur);
newChunks.put(start, newChunkData.key());
start += newChunkData.data().size();
cur = end;
}
}
}
/**
* Read the contents of a symlink.
*
* @param uuid the ID of the symlink
* @return the contents of the symlink as a string
*/
public String readlink(JObjectKey uuid) {
return jObjectTxManager.executeTx(() -> {
return readlinkBS(uuid).toStringUtf8();
});
}
@Override
/**
* Read the contents of a symlink as a ByteString.
*
* @param uuid the ID of the symlink
* @return the contents of the symlink as a ByteString
*/
public ByteString readlinkBS(JObjectKey uuid) {
return jObjectTxManager.executeTx(() -> {
var fileOpt = remoteTx.getData(File.class, uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid)));
return read(uuid, 0, Math.toIntExact(size(uuid))).get();
return read(uuid, 0, Math.toIntExact(size(uuid)));
});
}
@Override
/**
* Create a symlink.
*
* @param oldpath the target of the symlink
* @param newpath the path of the symlink
* @return the key of the created symlink
*/
public JObjectKey symlink(String oldpath, String newpath) {
return jObjectTxManager.executeTx(() -> {
Path path = Path.of(newpath);
var parent = getDirEntryW(path.getParent().toString());
var parent = getDirEntry(path.getParent().toString());
ensureDir(parent);
@@ -605,23 +711,29 @@ public class DhfsFileServiceImpl implements DhfsFileService {
jMapHelper.put(f, JMapLongKey.of(0), newChunkData.key());
remoteTx.putData(f);
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
getTree().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTree().getNewNodeId());
return f.key();
});
}
@Override
public Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs) {
/**
* Set the access and modification times of a file.
*
* @param fileUuid the ID of the file
* @param mtimeMs the modification time in milliseconds
* @return true if the times were set successfully, false otherwise
*/
public boolean setTimes(JObjectKey fileUuid, long mtimeMs) {
return jObjectTxManager.executeTx(() -> {
var dent = curTx.get(JData.class, fileUuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
// FIXME:
if (dent instanceof JKleppmannTreeNode) {
if (dent instanceof JKleppmannTreeNodeHolder) {
return true;
} else if (dent instanceof RemoteObjectMeta) {
var remote = remoteTx.getData(JDataRemote.class, fileUuid).orElse(null);
if (remote instanceof File f) {
remoteTx.putData(f.withCTime(atimeMs).withMTime(mtimeMs));
remoteTx.putData(f.withCTime(System.currentTimeMillis()).withMTime(mtimeMs));
return true;
} else {
throw new IllegalArgumentException(fileUuid + " is not a file");
@@ -632,7 +744,12 @@ public class DhfsFileServiceImpl implements DhfsFileService {
});
}
@Override
/**
* Get the size of a file.
*
* @param fileUuid the ID of the file
* @return the size of the file
*/
public long size(JObjectKey fileUuid) {
return jObjectTxManager.executeTx(() -> {
long realSize = 0;
@@ -651,4 +768,34 @@ public class DhfsFileServiceImpl implements DhfsFileService {
return realSize;
});
}
/**
* Write data to a file.
*
* @param fileUuid the ID of the file
* @param offset the offset to write to
* @param data the data to write
* @return the number of bytes written
*/
public long write(JObjectKey fileUuid, long offset, byte[] data) {
return write(fileUuid, offset, UnsafeByteOperations.unsafeWrap(data));
}
/**
* Get the free space on the filesystem.
*
* @return the free space in bytes
*/
public long getFreeSpace() {
return objectPersistentStore.getFreeSpace();
}
/**
* Get the total space on the filesystem.
*
* @return the total space in bytes
*/
public long getTotalSpace() {
return objectPersistentStore.getTotalSpace();
}
}

View File

@@ -0,0 +1,13 @@
package com.usatiuk.dhfsfs.service;
/**
* DirectoryNotEmptyException is thrown when a directory is not empty.
* This exception is used to indicate that a directory cannot be deleted
* because it contains files or subdirectories.
*/
public class DirectoryNotEmptyException extends RuntimeException {
@Override
public synchronized Throwable fillInStackTrace() {
return this;
}
}

View File

@@ -0,0 +1,11 @@
package com.usatiuk.dhfsfs.service;
/**
* GetattrRes is a record that represents the result of a getattr operation.
* @param mtime File modification time
* @param ctime File creation time
* @param mode File mode
* @param type File type
*/
public record GetattrRes(long mtime, long ctime, long mode, GetattrType type) {
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs.files.service;
package com.usatiuk.dhfsfs.service;
public enum GetattrType {
FILE,

View File

@@ -1,7 +1,5 @@
quarkus.grpc.server.use-separate-server=false
dhfs.objects.peerdiscovery.port=42069
dhfs.objects.peerdiscovery.interval=4s
dhfs.objects.peerdiscovery.broadcast=true
dhfs.objects.sync.timeout=30
dhfs.objects.sync.ping.timeout=5
dhfs.objects.invalidation.threads=16
@@ -14,8 +12,6 @@ dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff
dhfs.fuse.debug=false
dhfs.fuse.enabled=true
dhfs.files.allow_recursive_delete=false
dhfs.files.target_chunk_size=2097152
dhfs.files.target_chunk_alignment=19
dhfs.objects.deletion.delay=1000
dhfs.objects.deletion.can-delete-retry-delay=10000
dhfs.objects.ref_verification=true

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs.files;
package com.usatiuk.dhfsfs;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;

View File

@@ -1,9 +1,8 @@
package com.usatiuk.dhfs.files;
package com.usatiuk.dhfsfs;
import com.usatiuk.dhfs.RemoteTransaction;
import com.usatiuk.dhfs.TempDataProfile;
import com.usatiuk.dhfs.files.objects.File;
import com.usatiuk.dhfs.files.service.DhfsFileService;
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
import com.usatiuk.dhfsfs.objects.File;
import com.usatiuk.dhfsfs.service.DhfsFileService;
import com.usatiuk.kleppmanntree.AlreadyExistsException;
import com.usatiuk.objects.transaction.Transaction;
import com.usatiuk.objects.transaction.TransactionManager;
@@ -90,7 +89,7 @@ public abstract class DhfsFileServiceSimpleTestImpl {
// for (int start = 0; start < all.length(); start++) {
// for (int end = start; end <= all.length(); end++) {
// var read = fileService.read(fuuid.toString(), start, end - start);
// Assertions.assertArrayEquals(all.substring(start, end).getBytes(), read.get().toByteArray());
// Assertions.assertArrayEquals(all.substring(start, end).getBytes(), read.toByteArray());
// }
// }
// }
@@ -111,17 +110,21 @@ public abstract class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
var curMtime = fileService.getattr(uuid).get().mtime();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 2, 8).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
Assertions.assertArrayEquals(new byte[]{2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 2, 8).toByteArray());
fileService.write(uuid, 4, new byte[]{10, 11, 12});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
fileService.write(uuid, 10, new byte[]{13, 14});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).toByteArray());
fileService.write(uuid, 6, new byte[]{15, 16});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).toByteArray());
fileService.write(uuid, 3, new byte[]{17, 18});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 17, 18, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 17, 18, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).toByteArray());
var newMtime = fileService.getattr(uuid).get().mtime();
Assertions.assertTrue(newMtime > curMtime);
fileService.unlink("/writeTest");
Assertions.assertFalse(fileService.open("/writeTest").isPresent());
@@ -135,7 +138,7 @@ public abstract class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
fileService.unlink("/removeTest");
Assertions.assertFalse(fileService.open("/removeTest").isPresent());
@@ -149,12 +152,12 @@ public abstract class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
fileService.truncate(uuid, 20);
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).toByteArray());
fileService.write(uuid, 5, new byte[]{10, 11, 12, 13, 14, 15, 16, 17});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).toByteArray());
}
@RepeatedTest(100)
@@ -166,11 +169,12 @@ public abstract class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
fileService.truncate(uuid, 20);
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).toByteArray());
fileService.write(uuid, 10, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, fileService.read(uuid, 0, 20).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, fileService.read(uuid, 0, 20).toByteArray());
} finally {
fileService.unlink("/truncateTest2");
}
@@ -184,10 +188,10 @@ public abstract class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
fileService.truncate(uuid, 7);
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6,}, fileService.read(uuid, 0, 20).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6,}, fileService.read(uuid, 0, 20).toByteArray());
}
@Test
@@ -197,14 +201,14 @@ public abstract class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
Assertions.assertTrue(fileService.rename("/moveTest", "/movedTest"));
Assertions.assertFalse(fileService.open("/moveTest").isPresent());
Assertions.assertTrue(fileService.open("/movedTest").isPresent());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
fileService.read(fileService.open("/movedTest").get(), 0, 10).get().toByteArray());
fileService.read(fileService.open("/movedTest").get(), 0, 10).toByteArray());
}
@Test
@@ -217,9 +221,9 @@ public abstract class DhfsFileServiceSimpleTestImpl {
var uuid2 = ret2.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
fileService.write(uuid2, 0, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29});
Assertions.assertArrayEquals(new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29}, fileService.read(uuid2, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29}, fileService.read(uuid2, 0, 10).toByteArray());
jObjectTxManager.run(() -> {
@@ -233,7 +237,7 @@ public abstract class DhfsFileServiceSimpleTestImpl {
Assertions.assertTrue(fileService.open("/moveOverTest2").isPresent());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
fileService.read(fileService.open("/moveOverTest2").get(), 0, 10).get().toByteArray());
fileService.read(fileService.open("/moveOverTest2").get(), 0, 10).toByteArray());
// await().atMost(5, TimeUnit.SECONDS).until(() -> {
// jObjectTxManager.run(() -> {
@@ -251,8 +255,8 @@ public abstract class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{}, fileService.read(uuid, 20, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
Assertions.assertArrayEquals(new byte[]{}, fileService.read(uuid, 20, 10).toByteArray());
}
@Test
@@ -262,13 +266,13 @@ public abstract class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
fileService.write(uuid, 20, new byte[]{10, 11, 12, 13, 14, 15, 16, 17, 18, 19});
Assertions.assertArrayEquals(new byte[]{
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19
}, fileService.read(uuid, 0, 30).get().toByteArray());
}, fileService.read(uuid, 0, 30).toByteArray());
}
@Test
@@ -278,7 +282,7 @@ public abstract class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
// var oldfile = jObjectManager.get(uuid).orElseThrow(IllegalStateException::new);
// var chunk = oldfile.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.extractRefs()).stream().toList().get(0);
@@ -293,6 +297,6 @@ public abstract class DhfsFileServiceSimpleTestImpl {
Assertions.assertTrue(fileService.open("/movedTest2").isPresent());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
fileService.read(fileService.open("/movedTest2").get(), 0, 10).get().toByteArray());
fileService.read(fileService.open("/movedTest2").get(), 0, 10).toByteArray());
}
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs.files;
package com.usatiuk.dhfsfs;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs.files;
package com.usatiuk.dhfsfs;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;

View File

@@ -0,0 +1,29 @@
package com.usatiuk.dhfsfs;
import io.quarkus.test.junit.QuarkusTestProfile;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Map;
abstract public class TempDataProfile implements QuarkusTestProfile {
protected void getConfigOverrides(Map<String, String> toPut) {
}
@Override
final public Map<String, String> getConfigOverrides() {
Path tempDirWithPrefix;
try {
tempDirWithPrefix = Files.createTempDirectory("dhfs-test");
} catch (IOException e) {
throw new RuntimeException(e);
}
var ret = new HashMap<String, String>();
ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString());
ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString());
getConfigOverrides(ret);
return ret;
}
}

View File

@@ -0,0 +1,40 @@
package com.usatiuk.dhfsfs;
import io.quarkus.logging.Log;
import io.quarkus.runtime.ShutdownEvent;
import io.quarkus.runtime.StartupEvent;
import jakarta.annotation.Priority;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Objects;
@ApplicationScoped
public class TestDataCleaner {
@ConfigProperty(name = "dhfs.objects.persistence.files.root")
String tempDirectory;
void init(@Observes @Priority(1) StartupEvent event) throws IOException {
try {
purgeDirectory(Path.of(tempDirectory).toFile());
} catch (Exception ignored) {
Log.warn("Couldn't cleanup test data on init");
}
}
void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException {
purgeDirectory(Path.of(tempDirectory).toFile());
}
public void purgeDirectory(File dir) {
for (File file : Objects.requireNonNull(dir.listFiles())) {
if (file.isDirectory())
purgeDirectory(file);
file.delete();
}
}
}

View File

@@ -5,7 +5,6 @@ dhfs.objects.ref_verification=true
dhfs.objects.deletion.delay=0
quarkus.log.category."com.usatiuk.dhfs".level=TRACE
quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE
quarkus.class-loading.parent-first-artifacts=com.usatiuk.dhfs:supportlib
quarkus.http.test-port=0
quarkus.http.test-ssl-port=0
dhfs.local-discovery=false

View File

@@ -0,0 +1,5 @@
*
!target/*-runner
!target/*-runner.jar
!target/lib/*
!target/quarkus-app/*

43
dhfs-parent/dhfs-fuse/.gitignore vendored Normal file
View File

@@ -0,0 +1,43 @@
#Maven
target/
pom.xml.tag
pom.xml.releaseBackup
pom.xml.versionsBackup
release.properties
.flattened-pom.xml
# Eclipse
.project
.classpath
.settings/
bin/
# IntelliJ
.idea
*.ipr
*.iml
*.iws
# NetBeans
nb-configuration.xml
# Visual Studio Code
.vscode
.factorypath
# OSX
.DS_Store
# Vim
*.swp
*.swo
# patch
*.orig
*.rej
# Local environment
.env
# Plugin directory
/.quarkus/cli/plugins/

View File

@@ -0,0 +1,2 @@
FROM azul/zulu-openjdk-debian:21-jre-latest
RUN apt update && apt install -y libfuse2 curl

View File

@@ -0,0 +1,43 @@
version: "3.2"
services:
dhfs1:
build: .
privileged: true
devices:
- /dev/fuse
volumes:
- $HOME/dhfs/dhfs1:/dhfs_root
- $HOME/dhfs/dhfs1_f:/dhfs_root/fuse:rshared
- ./target/quarkus-app:/app
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
-Ddhfs.objects.root=/dhfs_root/d
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005
-jar /app/quarkus-run.jar"
ports:
- 8080:8080
- 8081:8443
- 5005:5005
dhfs2:
build: .
privileged: true
devices:
- /dev/fuse
volumes:
- $HOME/dhfs/dhfs2:/dhfs_root
- $HOME/dhfs/dhfs2_f:/dhfs_root/fuse:rshared
- ./target/quarkus-app:/app
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
--add-exports java.base/jdk.internal.access=ALL-UNNAMED
--add-opens=java.base/java.nio=ALL-UNNAMED
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
-Ddhfs.objects.root=/dhfs_root/d
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5010
-jar /app/quarkus-run.jar"
ports:
- 8090:8080
- 8091:8443
- 5010:5010

View File

@@ -0,0 +1,143 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>dhfs-fuse</artifactId>
<version>1.0-SNAPSHOT</version>
<parent>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<dependencies>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-security</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-arc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-client-jsonb</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-jsonb</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-scheduler</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.github.serceman</groupId>
<artifactId>jnr-fuse</artifactId>
<version>0.5.8</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
</dependency>
<dependency>
<groupId>org.jboss.slf4j</groupId>
<artifactId>slf4j-jboss-logmanager</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
</dependency>
<dependency>
<groupId>org.pcollections</groupId>
<artifactId>pcollections</artifactId>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>dhfs-fs</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>utils</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<forkCount>1C</forkCount>
<reuseForks>false</reuseForks>
<parallel>classes</parallel>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
false
</junit.jupiter.execution.parallel.enabled>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<configuration>
<forkCount>1C</forkCount>
<reuseForks>false</reuseForks>
<parallel>classes</parallel>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
false
</junit.jupiter.execution.parallel.enabled>
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>${quarkus.platform.group-id}</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.platform.version}</version>
<extensions>true</extensions>
<executions>
<execution>
<id>quarkus-plugin</id>
<goals>
<goal>build</goal>
<goal>generate-code</goal>
<goal>generate-code-tests</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@@ -0,0 +1,97 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
#
# Before building the container image run:
#
# ./mvnw package
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/server-jvm .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
#
# If you want to include the debug port into your docker image
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
# when running the container
#
# Then run the container using :
#
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
#
# This image uses the `run-java.sh` script to run the application.
# This scripts computes the command line to execute your Java application, and
# includes memory/GC tuning.
# You can configure the behavior using the following environment properties:
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
# in JAVA_OPTS (example: "-Dsome.property=foo")
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
# used to calculate a default maximal heap memory based on a containers restriction.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
# of the container available memory as set here. The default is `50` which means 50%
# of the available memory is used as an upper boundary. You can skip this mechanism by
# setting this value to `0` in which case no `-Xmx` option is added.
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
# is used to calculate a default initial heap memory based on the maximum heap memory.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
# is used as the initial heap size. You can skip this mechanism by setting this value
# to `0` in which case no `-Xms` option is added (example: "25")
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
# This is used to calculate the maximum value of the initial heap memory. If used in
# a container without any memory constraints for the container then this option has
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
# here. The default is 4096MB which means the calculated value of `-Xms` never will
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
# when things are happening. This option, if set to true, will set
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
# true").
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
# (example: "20")
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
# (example: "40")
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
# (example: "4")
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
# previous GC times. (example: "90")
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
# contain the necessary JRE command-line options to specify the required GC, which
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
# accessed directly. (example: "foo.example.com,bar.example.com")
#
###
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
ENV LANGUAGE='en_US:en'
# We make four distinct layers so if there are application changes the library layers can be re-used
COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/
COPY --chown=185 target/quarkus-app/*.jar /deployments/
COPY --chown=185 target/quarkus-app/app/ /deployments/app/
COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/
EXPOSE 8080
USER 185
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]

View File

@@ -0,0 +1,93 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
#
# Before building the container image run:
#
# ./mvnw package -Dquarkus.package.jar.type=legacy-jar
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/server-legacy-jar .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
#
# If you want to include the debug port into your docker image
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
# when running the container
#
# Then run the container using :
#
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
#
# This image uses the `run-java.sh` script to run the application.
# This scripts computes the command line to execute your Java application, and
# includes memory/GC tuning.
# You can configure the behavior using the following environment properties:
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
# in JAVA_OPTS (example: "-Dsome.property=foo")
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
# used to calculate a default maximal heap memory based on a containers restriction.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
# of the container available memory as set here. The default is `50` which means 50%
# of the available memory is used as an upper boundary. You can skip this mechanism by
# setting this value to `0` in which case no `-Xmx` option is added.
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
# is used to calculate a default initial heap memory based on the maximum heap memory.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
# is used as the initial heap size. You can skip this mechanism by setting this value
# to `0` in which case no `-Xms` option is added (example: "25")
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
# This is used to calculate the maximum value of the initial heap memory. If used in
# a container without any memory constraints for the container then this option has
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
# here. The default is 4096MB which means the calculated value of `-Xms` never will
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
# when things are happening. This option, if set to true, will set
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
# true").
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
# (example: "20")
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
# (example: "40")
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
# (example: "4")
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
# previous GC times. (example: "90")
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
# contain the necessary JRE command-line options to specify the required GC, which
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
# accessed directly. (example: "foo.example.com,bar.example.com")
#
###
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
ENV LANGUAGE='en_US:en'
COPY target/lib/* /deployments/lib/
COPY target/*-runner.jar /deployments/quarkus-run.jar
EXPOSE 8080
USER 185
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]

View File

@@ -0,0 +1,27 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
#
# Before building the container image run:
#
# ./mvnw package -Dnative
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.native -t quarkus/server .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server
#
###
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9
WORKDIR /work/
RUN chown 1001 /work \
&& chmod "g+rwX" /work \
&& chown 1001:root /work
COPY --chown=1001:root target/*-runner /work/application
EXPOSE 8080
USER 1001
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]

View File

@@ -0,0 +1,30 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
# It uses a micro base image, tuned for Quarkus native executables.
# It reduces the size of the resulting container image.
# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image.
#
# Before building the container image run:
#
# ./mvnw package -Dnative
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/server .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server
#
###
FROM quay.io/quarkus/quarkus-micro-image:2.0
WORKDIR /work/
RUN chown 1001 /work \
&& chmod "g+rwX" /work \
&& chown 1001:root /work
COPY --chown=1001:root target/*-runner /work/application
EXPOSE 8080
USER 1001
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]

View File

@@ -1,13 +1,15 @@
package com.usatiuk.dhfs.fuse;
package com.usatiuk.dhfsfuse;
import com.google.protobuf.UnsafeByteOperations;
import com.kenai.jffi.MemoryIO;
import com.sun.security.auth.module.UnixSystem;
import com.usatiuk.dhfs.files.service.DhfsFileService;
import com.usatiuk.dhfs.files.service.DirectoryNotEmptyException;
import com.usatiuk.dhfs.files.service.GetattrRes;
import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer;
import com.usatiuk.dhfsfs.service.DhfsFileService;
import com.usatiuk.dhfsfs.service.DirectoryNotEmptyException;
import com.usatiuk.dhfsfs.service.GetattrRes;
import com.usatiuk.kleppmanntree.AlreadyExistsException;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.utils.UninitializedByteBuffer;
import com.usatiuk.utils.UnsafeAccessor;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.quarkus.logging.Log;
@@ -18,16 +20,17 @@ import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import jakarta.inject.Inject;
import jnr.ffi.Pointer;
import jnr.ffi.Runtime;
import jnr.ffi.Struct;
import jnr.ffi.types.off_t;
import org.apache.commons.lang3.SystemUtils;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import ru.serce.jnrfuse.ErrorCodes;
import ru.serce.jnrfuse.FuseFillDir;
import ru.serce.jnrfuse.FuseStubFS;
import ru.serce.jnrfuse.struct.FileStat;
import ru.serce.jnrfuse.struct.FuseFileInfo;
import ru.serce.jnrfuse.struct.Statvfs;
import ru.serce.jnrfuse.struct.Timespec;
import ru.serce.jnrfuse.struct.*;
import java.nio.ByteBuffer;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Optional;
@@ -36,26 +39,30 @@ import java.util.concurrent.atomic.AtomicLong;
import static jnr.posix.FileStat.*;
/**
* FUSE file system implementation.
*/
@ApplicationScoped
public class DhfsFuse extends FuseStubFS {
private static final int blksize = 1048576;
private static final int iosize = 1048576;
private final ConcurrentHashMap<Long, JObjectKey> _openHandles = new ConcurrentHashMap<>();
private final AtomicLong _fh = new AtomicLong(1);
@ConfigProperty(name = "dhfs.fuse.root")
String root;
@ConfigProperty(name = "dhfs.fuse.enabled")
boolean enabled;
@ConfigProperty(name = "dhfs.fuse.debug")
Boolean debug;
@ConfigProperty(name = "dhfs.files.target_chunk_size")
int targetChunkSize;
@Inject
JnrPtrByteOutputAccessors jnrPtrByteOutputAccessors;
@Inject
DhfsFileService fileService;
private final ConcurrentHashMap<Long, JObjectKey> _openHandles = new ConcurrentHashMap<>();
private final AtomicLong _fh = new AtomicLong(1);
/**
* Allocate a handle for the given key.
*
* @param key the key to allocate a handle for
* @return the allocated handle, not 0
*/
private long allocateHandle(JObjectKey key) {
while (true) {
var newFh = _fh.getAndIncrement();
@@ -66,43 +73,61 @@ public class DhfsFuse extends FuseStubFS {
}
}
/**
* Get the key from the handle.
*
* @param handle the handle to get the key from
* @return the key, or null if not found
*/
private JObjectKey getFromHandle(long handle) {
assert handle != 0;
if (handle == 0)
throw new IllegalStateException("Handle is 0");
return _openHandles.get(handle);
}
void init(@Observes @Priority(100000) StartupEvent event) {
if (!enabled) return;
Paths.get(root).toFile().mkdirs();
Log.info("Mounting with root " + root);
var uid = new UnixSystem().getUid();
var gid = new UnixSystem().getGid();
var opts = new ArrayList<String>();
// Assuming macFuse
if (SystemUtils.IS_OS_MAC) {
if (SystemUtils.IS_OS_WINDOWS) {
opts.add("-o");
opts.add("iosize=" + iosize);
} else if (SystemUtils.IS_OS_LINUX) {
// FIXME: There's something else missing: the writes still seem to be 32k max
opts.add("auto_cache");
opts.add("-o");
opts.add("uid=-1");
opts.add("-o");
opts.add("gid=-1");
} else {
Paths.get(root).toFile().mkdirs();
if (!Paths.get(root).toFile().isDirectory())
throw new IllegalStateException("Could not create directory " + root);
var uid = new UnixSystem().getUid();
var gid = new UnixSystem().getGid();
// Assuming macFuse
if (SystemUtils.IS_OS_MAC) {
opts.add("-o");
opts.add("iosize=" + iosize);
} else if (SystemUtils.IS_OS_LINUX) {
// opts.add("-o");
// opts.add("large_read");
opts.add("-o");
opts.add("big_writes");
opts.add("-o");
opts.add("max_read=" + iosize);
opts.add("-o");
opts.add("max_write=" + iosize);
}
opts.add("-o");
opts.add("big_writes");
opts.add("auto_cache");
opts.add("-o");
opts.add("max_read=" + iosize);
opts.add("uid=" + uid);
opts.add("-o");
opts.add("max_write=" + iosize);
opts.add("gid=" + gid);
}
opts.add("-o");
opts.add("auto_cache");
opts.add("-o");
opts.add("uid=" + uid);
opts.add("-o");
opts.add("gid=" + gid);
mount(Paths.get(root), false, debug, opts.toArray(String[]::new));
}
@@ -118,13 +143,12 @@ public class DhfsFuse extends FuseStubFS {
try {
stbuf.f_frsize.set(blksize);
stbuf.f_bsize.set(blksize);
// FIXME:
stbuf.f_blocks.set(1024 * 1024 * 1024 / blksize); // total data blocks in file system
stbuf.f_bfree.set(1024 * 1024 * 1024 / blksize); // free blocks in fs
stbuf.f_bavail.set(1024 * 1024 * 1024 / blksize); // avail blocks in fs
stbuf.f_files.set(1000); //FIXME:
stbuf.f_ffree.set(Integer.MAX_VALUE - 2000); //FIXME:
stbuf.f_favail.set(Integer.MAX_VALUE - 2000); //FIXME:
stbuf.f_blocks.set(fileService.getTotalSpace() / blksize); // total data blocks in file system
stbuf.f_bfree.set(fileService.getFreeSpace() / blksize); // free blocks in fs
stbuf.f_bavail.set(fileService.getFreeSpace() / blksize); // avail blocks in fs
stbuf.f_files.set(1000); // TODO: Calculate real file counts?
stbuf.f_ffree.set(Integer.MAX_VALUE - 1000);
stbuf.f_favail.set(Integer.MAX_VALUE - 1000);
stbuf.f_namemax.set(2048);
return super.statfs(path, stbuf);
} catch (Throwable e) {
@@ -160,13 +184,13 @@ public class DhfsFuse extends FuseStubFS {
}
}
// FIXME: Race?
stat.st_ctim.tv_sec.set(found.get().ctime() / 1000);
stat.st_ctim.tv_nsec.set((found.get().ctime() % 1000) * 1000);
stat.st_mtim.tv_sec.set(found.get().mtime() / 1000);
stat.st_mtim.tv_nsec.set((found.get().mtime() % 1000) * 1000);
stat.st_atim.tv_sec.set(found.get().mtime() / 1000);
stat.st_atim.tv_nsec.set((found.get().mtime() % 1000) * 1000);
var atime = Math.max(found.get().ctime(), found.get().mtime());
stat.st_atim.tv_sec.set(atime / 1000);
stat.st_atim.tv_nsec.set((atime % 1000) * 1000000L);
stat.st_blksize.set(blksize);
} catch (Throwable e) {
Log.error("When getattr " + path, e);
@@ -182,8 +206,7 @@ public class DhfsFuse extends FuseStubFS {
if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
var file = fileOpt.get();
var res = fileService.setTimes(file,
timespec[0].tv_sec.get() * 1000,
timespec[1].tv_sec.get() * 1000);
timespec[1].tv_sec.get() * 1000L + timespec[1].tv_nsec.longValue() / 1000000L);
if (!res) return -ErrorCodes.EINVAL();
else return 0;
} catch (Throwable e) {
@@ -220,8 +243,8 @@ public class DhfsFuse extends FuseStubFS {
var fileKey = getFromHandle(fi.fh.get());
var read = fileService.read(fileKey, offset, (int) size);
if (read.isEmpty()) return 0;
UnsafeByteOperations.unsafeWriteTo(read.get(), new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size));
return read.get().size();
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(buf, size));
return read.size();
} catch (Throwable e) {
Log.error("When reading " + path, e);
return -ErrorCodes.EIO();
@@ -230,24 +253,22 @@ public class DhfsFuse extends FuseStubFS {
@Override
public int write(String path, Pointer buf, long size, long offset, FuseFileInfo fi) {
var buffer = UninitializedByteBuffer.allocate((int) size);
UnsafeAccessor.UNSAFE.copyMemory(
buf.address(),
UnsafeAccessor.NIO.getBufferAddress(buffer),
size
);
return write(path, buffer, offset, fi);
}
public int write(String path, ByteBuffer buffer, long offset, FuseFileInfo fi) {
if (offset < 0) return -ErrorCodes.EINVAL();
try {
var fileKey = getFromHandle(fi.fh.get());
var buffer = UninitializedByteBuffer.allocateUninitialized((int) size);
if (buffer.isDirect()) {
jnrPtrByteOutputAccessors.getUnsafe().copyMemory(
buf.address(),
jnrPtrByteOutputAccessors.getNioAccess().getBufferAddress(buffer),
size
);
} else {
buf.get(0, buffer.array(), 0, (int) size);
}
var written = fileService.write(fileKey, offset, UnsafeByteOperations.unsafeWrap(buffer));
return written.intValue();
} catch (Throwable e) {
return Math.toIntExact(written);
} catch (Exception e) {
Log.error("When writing " + path, e);
return -ErrorCodes.EIO();
}
@@ -383,7 +404,7 @@ public class DhfsFuse extends FuseStubFS {
var file = fileOpt.get();
var read = fileService.readlinkBS(fileOpt.get());
if (read.isEmpty()) return 0;
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size));
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(buf, size));
buf.putByte(Math.min(size - 1, read.size()), (byte) 0);
return 0;
} catch (Throwable e) {
@@ -415,4 +436,29 @@ public class DhfsFuse extends FuseStubFS {
return -ErrorCodes.EIO();
}
}
@Override
public int write_buf(String path, FuseBufvec buf, @off_t long off, FuseFileInfo fi) {
int size = (int) libFuse.fuse_buf_size(buf);
FuseBufvec tmpVec = new FuseBufvec(Runtime.getSystemRuntime());
long tmpVecAddr = MemoryIO.getInstance().allocateMemory(Struct.size(tmpVec), false);
try {
tmpVec.useMemory(Pointer.wrap(Runtime.getSystemRuntime(), tmpVecAddr));
FuseBufvec.init(tmpVec, size);
var bb = UninitializedByteBuffer.allocate(size);
var mem = UninitializedByteBuffer.getAddress(bb);
tmpVec.buf.mem.set(mem);
tmpVec.buf.size.set(size);
int res = (int) libFuse.fuse_buf_copy(tmpVec, buf, 0);
if (res != size) {
Log.errorv("fuse_buf_copy failed: {0} != {1}", res, size);
return -ErrorCodes.ENOMEM();
}
return write(path, bb, off, fi);
} finally {
if (tmpVecAddr != 0) {
MemoryIO.getInstance().freeMemory(tmpVecAddr);
}
}
}
}

View File

@@ -1,22 +1,24 @@
package com.usatiuk.dhfs.fuse;
package com.usatiuk.dhfsfuse;
import com.google.protobuf.ByteOutput;
import com.usatiuk.utils.UnsafeAccessor;
import jnr.ffi.Pointer;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
/**
* JnrPtrByteOutput is a ByteOutput implementation that writes to a `jnr.ffi.Pointer`.
*/
public class JnrPtrByteOutput extends ByteOutput {
private final Pointer _backing;
private final long _size;
private final JnrPtrByteOutputAccessors _accessors;
private long _pos;
public JnrPtrByteOutput(JnrPtrByteOutputAccessors accessors, Pointer backing, long size) {
public JnrPtrByteOutput(Pointer backing, long size) {
_backing = backing;
_size = size;
_pos = 0;
_accessors = accessors;
}
@Override
@@ -47,9 +49,9 @@ public class JnrPtrByteOutput extends ByteOutput {
if (value instanceof MappedByteBuffer mb) {
mb.load();
}
long addr = _accessors.getNioAccess().getBufferAddress(value) + value.position();
long addr = UnsafeAccessor.NIO.getBufferAddress(value) + value.position();
var out = _backing.address() + _pos;
_accessors.getUnsafe().copyMemory(addr, out, rem);
UnsafeAccessor.UNSAFE.copyMemory(addr, out, rem);
} else {
_backing.put(_pos, value.array(), value.arrayOffset() + value.position(), rem);
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs;
package com.usatiuk.dhfsfuse;
import io.quarkus.runtime.Quarkus;
import io.quarkus.runtime.QuarkusApplication;

View File

@@ -0,0 +1,24 @@
quarkus.grpc.server.use-separate-server=false
dhfs.objects.peerdiscovery.interval=4s
dhfs.objects.sync.timeout=30
dhfs.objects.sync.ping.timeout=5
dhfs.objects.invalidation.threads=16
dhfs.objects.invalidation.delay=1000
dhfs.fuse.root=${HOME}/dhfs_default/fuse
dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff
dhfs.fuse.debug=false
dhfs.fuse.enabled=true
dhfs.files.allow_recursive_delete=false
dhfs.objects.deletion.delay=1000
dhfs.objects.deletion.can-delete-retry-delay=10000
dhfs.objects.ref_verification=true
dhfs.objects.autosync.threads=8
dhfs.objects.autosync.download-all=false
dhfs.objects.move-processor.threads=8
dhfs.objects.ref-processor.threads=8
dhfs.local-discovery=true
dhfs.peerdiscovery.timeout=10000
quarkus.log.category."com.usatiuk".min-level=TRACE
quarkus.log.category."com.usatiuk".level=TRACE
quarkus.http.insecure-requests=enabled
quarkus.http.ssl.client-auth=required

View File

@@ -1,6 +1,5 @@
package com.usatiuk.dhfs.fuse;
package com.usatiuk.dhfsfuse;
import com.usatiuk.dhfs.TempDataProfile;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;
import org.eclipse.microprofile.config.inject.ConfigProperty;

View File

@@ -0,0 +1,29 @@
package com.usatiuk.dhfsfuse;
import io.quarkus.test.junit.QuarkusTestProfile;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Map;
abstract public class TempDataProfile implements QuarkusTestProfile {
protected void getConfigOverrides(Map<String, String> toPut) {
}
@Override
final public Map<String, String> getConfigOverrides() {
Path tempDirWithPrefix;
try {
tempDirWithPrefix = Files.createTempDirectory("dhfs-test");
} catch (IOException e) {
throw new RuntimeException(e);
}
var ret = new HashMap<String, String>();
ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString());
ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString());
getConfigOverrides(ret);
return ret;
}
}

View File

@@ -0,0 +1,40 @@
package com.usatiuk.dhfsfuse;
import io.quarkus.logging.Log;
import io.quarkus.runtime.ShutdownEvent;
import io.quarkus.runtime.StartupEvent;
import jakarta.annotation.Priority;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Objects;
@ApplicationScoped
public class TestDataCleaner {
@ConfigProperty(name = "dhfs.objects.persistence.files.root")
String tempDirectory;
void init(@Observes @Priority(1) StartupEvent event) throws IOException {
try {
purgeDirectory(Path.of(tempDirectory).toFile());
} catch (Exception ignored) {
Log.warn("Couldn't cleanup test data on init");
}
}
void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException {
purgeDirectory(Path.of(tempDirectory).toFile());
}
public static void purgeDirectory(File dir) {
for (File file : Objects.requireNonNull(dir.listFiles())) {
if (file.isDirectory())
purgeDirectory(file);
file.delete();
}
}
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs.integration;
package com.usatiuk.dhfsfuse.integration;
import com.github.dockerjava.api.model.Device;
import io.quarkus.logging.Log;
@@ -10,10 +10,8 @@ import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.Network;
import org.testcontainers.containers.output.Slf4jLogConsumer;
import org.testcontainers.containers.output.WaitingConsumer;
import org.testcontainers.containers.wait.strategy.Wait;
import java.io.IOException;
import java.time.Duration;
import java.util.Objects;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
@@ -32,17 +30,19 @@ public class DhfsFuseIT {
String c1uuid;
String c2uuid;
Network network;
@BeforeEach
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
Network network = Network.newNetwork();
network = Network.newNetwork();
container1 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network);
.withNetwork(network);
container2 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network);
.withNetwork(network);
Stream.of(container1, container2).parallel().forEach(GenericContainer::start);
@@ -53,8 +53,11 @@ public class DhfsFuseIT {
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFuseIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Listening"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Listening"), 60, TimeUnit.SECONDS);
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
@@ -65,131 +68,121 @@ public class DhfsFuseIT {
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
}
private void checkConsistency() {
await().atMost(45, TimeUnit.SECONDS).until(() -> {
Log.info("Listing consistency");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*/*");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*/*");
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/*/*");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/*/*");
Log.info(ls1);
Log.info(cat1);
Log.info(ls2);
Log.info(cat2);
return ls1.equals(ls2) && cat1.equals(cat2);
});
}
@AfterEach
void stop() {
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
network.close();
}
@Test
void readWriteFileTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
}
@Test
void readWriteRewriteFileTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode());
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo rewritten > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() ->
"rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
"rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
}
@Test
void createDelayedTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
var client = DockerClientFactory.instance().client();
client.pauseContainerCmd(container2.getContainerId()).exec();
client.disconnectFromNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
client.disconnectFromNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo newfile > /root/dhfs_default/fuse/testf2").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo newfile > /dhfs_test/fuse/testf2").getExitCode());
client.unpauseContainerCmd(container2.getContainerId()).exec();
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
client.connectToNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() ->
"newfile\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf2").getStdout()));
"newfile\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf2").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() ->
"newfile\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf2").getStdout()));
"newfile\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf2").getStdout()));
}
@Test
void writeRewriteDelayedTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
var client = DockerClientFactory.instance().client();
client.pauseContainerCmd(container2.getContainerId()).exec();
client.disconnectFromNetworkCmd().withNetworkId(network.getId()).withContainerId(container2.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo rewritten > /dhfs_test/fuse/testf1").getExitCode());
client.unpauseContainerCmd(container2.getContainerId()).exec();
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() ->
"rewritten\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
"rewritten\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() ->
"rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
}
// TODO: How this fits with the tree?
@Test
@Disabled
void deleteDelayedTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
var client = DockerClientFactory.instance().client();
client.pauseContainerCmd(container2.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/testf1").getExitCode());
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Delaying deletion check"), 60, TimeUnit.SECONDS, 1);
client.unpauseContainerCmd(container2.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode());
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 1);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3);
await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container2.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container1.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode());
"rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
}
@Test
void deleteTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
Log.info("Deleting");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() ->
0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode());
0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
Log.info("Deleted");
// FIXME?
@@ -197,83 +190,121 @@ public class DhfsFuseIT {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3);
await().atMost(45, TimeUnit.SECONDS).until(() ->
1 == container2.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode());
1 == container2.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() ->
1 == container1.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode());
1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
}
@Test
void deleteTestKickedOut() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
var client = DockerClientFactory.instance().client();
client.disconnectFromNetworkCmd().withNetworkId(network.getId()).withContainerId(container2.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("kicked"), 60, TimeUnit.SECONDS, 1);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty2 > /dhfs_test/fuse/testf2").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo tesempty3 > /dhfs_test/fuse/testf3").getExitCode());
Log.info("Deleting");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
Log.info("Deleted");
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3);
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() ->
1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty2\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf2").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty3\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf3").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty2\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf2").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty3\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf3").getStdout()));
}
@Test
void moveFileTest() throws IOException, InterruptedException, TimeoutException {
Log.info("Creating");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
Log.info("Listing");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
Log.info("Moving");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/testf1 /root/dhfs_default/fuse/testf2").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/testf1 /dhfs_test/fuse/testf2").getExitCode());
Log.info("Listing");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
Log.info("Reading");
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf2").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf2").getStdout()));
}
@Test
void moveDirTest() throws IOException, InterruptedException, TimeoutException {
Log.info("Creating");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/testdir").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testdir/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testdir/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/testdir").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testdir/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testdir/testf1").getStdout()));
Log.info("Listing");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
Log.info("Moving");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/testdir2").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/testdir /root/dhfs_default/fuse/testdir2/testdirm").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/testdir2").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/testdir /dhfs_test/fuse/testdir2/testdirm").getExitCode());
Log.info("Listing");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
Log.info("Reading");
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testdir2/testdirm/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testdir2/testdirm/testf1").getStdout()));
}
// TODO: This probably shouldn't be working right now
@Test
void removeAddHostTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request DELETE " +
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo jioadsd > /root/dhfs_default/fuse/newfile1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo asvdkljm > /root/dhfs_default/fuse/newfile1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo jioadsd > /dhfs_test/fuse/newfile1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo asvdkljm > /dhfs_test/fuse/newfile1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo dfgvh > /root/dhfs_default/fuse/newfile2").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo dscfg > /root/dhfs_default/fuse/newfile2").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo dfgvh > /dhfs_test/fuse/newfile2").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo dscfg > /dhfs_test/fuse/newfile2").getExitCode());
Log.info("Re-adding");
container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> {
Log.info("Listing removeAddHostTest");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*");
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/");
Log.info(cat1);
Log.info(cat2);
Log.info(ls1);
@@ -286,10 +317,10 @@ public class DhfsFuseIT {
@Test
void dirConflictTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode());
boolean createFail = Stream.of(Pair.of(container1, "echo test1 >> /root/dhfs_default/fuse/testf"),
Pair.of(container2, "echo test2 >> /root/dhfs_default/fuse/testf")).parallel().map(p -> {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
boolean createFail = Stream.of(Pair.of(container1, "echo test1 >> /dhfs_test/fuse/testf"),
Pair.of(container2, "echo test2 >> /dhfs_test/fuse/testf")).parallel().map(p -> {
try {
return p.getLeft().execInContainer("/bin/sh", "-c", p.getRight()).getExitCode();
} catch (Exception e) {
@@ -298,48 +329,75 @@ public class DhfsFuseIT {
}).anyMatch(r -> r != 0);
Assumptions.assumeTrue(!createFail, "Failed creating one or more files");
await().atMost(45, TimeUnit.SECONDS).until(() -> {
var ls = container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse");
var cat = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*");
var ls = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info(ls);
Log.info(cat);
return cat.getStdout().contains("test1") && cat.getStdout().contains("test2");
});
}
@Test
void dirConflictTest2() throws IOException, InterruptedException, TimeoutException {
var client = DockerClientFactory.instance().client();
client.disconnectFromNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
client.disconnectFromNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 1);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 1);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/a && echo fdsaio >> /dhfs_test/fuse/a/testf").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/a && echo exgrg >> /dhfs_test/fuse/a/testf").getExitCode());
client.connectToNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
Log.warn("Waiting for connections");
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 1);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 1);
Log.warn("Connected");
checkConsistency();
var ls1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/a*/*");
Assertions.assertTrue(ls1.getStdout().contains("fdsaio"));
Assertions.assertTrue(ls1.getStdout().contains("exgrg"));
}
@Test
void dirCycleTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/a").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/b").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo xqr489 >> /root/dhfs_default/fuse/a/testfa").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo ahinou >> /root/dhfs_default/fuse/b/testfb").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls -lavh /root/dhfs_default/fuse").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/a").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/b").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo xqr489 >> /dhfs_test/fuse/a/testfa").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo ahinou >> /dhfs_test/fuse/b/testfb").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls -lavh /dhfs_test/fuse").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> {
var c2ls = container2.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f -exec cat {} \\;");
var c2ls = container2.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -type f -exec cat {} \\;");
return c2ls.getExitCode() == 0 && c2ls.getStdout().contains("xqr489") && c2ls.getStdout().contains("ahinou");
});
var client = DockerClientFactory.instance().client();
client.pauseContainerCmd(container1.getContainerId()).exec();
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/a /root/dhfs_default/fuse/b").getExitCode());
client.pauseContainerCmd(container2.getContainerId()).exec();
client.unpauseContainerCmd(container1.getContainerId()).exec();
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/b /root/dhfs_default/fuse/a").getExitCode());
client.unpauseContainerCmd(container2.getContainerId()).exec();
client.disconnectFromNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
client.disconnectFromNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/a /dhfs_test/fuse/b").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/b /dhfs_test/fuse/a").getExitCode());
client.connectToNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
await().atMost(45, TimeUnit.SECONDS).until(() -> {
Log.info("Listing dirCycleTest");
Log.info(container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse"));
Log.info(container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/a"));
Log.info(container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/b"));
Log.info(container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse"));
Log.info(container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/a"));
Log.info(container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/b"));
Log.info(container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse"));
Log.info(container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/a"));
Log.info(container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/b"));
Log.info(container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse"));
Log.info(container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/a"));
Log.info(container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/b"));
var c1ls2 = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -maxdepth 3 -type f -exec cat {} \\;");
var c1ls2 = container1.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -maxdepth 3 -type f -exec cat {} \\;");
Log.info(c1ls2);
var c2ls2 = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -maxdepth 3 -type f -exec cat {} \\;");
var c2ls2 = container1.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -maxdepth 3 -type f -exec cat {} \\;");
Log.info(c2ls2);
return c1ls2.getStdout().contains("xqr489") && c1ls2.getStdout().contains("ahinou")
@@ -353,39 +411,41 @@ public class DhfsFuseIT {
void removeAndMove() throws IOException, InterruptedException, TimeoutException {
var client = DockerClientFactory.instance().client();
Log.info("Creating");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
Log.info("Listing");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
client.disconnectFromNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
client.disconnectFromNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
client.pauseContainerCmd(container1.getContainerId()).exec();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 1);
Log.info("Removing");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
client.pauseContainerCmd(container2.getContainerId()).exec();
client.unpauseContainerCmd(container1.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 1);
Log.info("Moving");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/testf1 /root/dhfs_default/fuse/testf2").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/testf1 /dhfs_test/fuse/testf2").getExitCode());
Log.info("Listing");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
Log.info("Reading");
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf2").getStdout()));
client.unpauseContainerCmd(container2.getContainerId()).exec();
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf2").getStdout()));
client.connectToNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 1);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 1);
// Either removed, or moved
// TODO: it always seems to be removed?
Log.info("Reading both");
await().atMost(45, TimeUnit.SECONDS).until(() -> {
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*");
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info("cat1: " + cat1);
Log.info("cat2: " + cat2);
Log.info("ls1: " + ls1);

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs.integration;
package com.usatiuk.dhfsfuse.integration;
import com.github.dockerjava.api.model.Device;
import io.quarkus.logging.Log;
@@ -9,10 +9,8 @@ import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.Network;
import org.testcontainers.containers.output.Slf4jLogConsumer;
import org.testcontainers.containers.output.WaitingConsumer;
import org.testcontainers.containers.wait.strategy.Wait;
import java.io.IOException;
import java.time.Duration;
import java.util.List;
import java.util.Objects;
import java.util.UUID;
@@ -35,51 +33,53 @@ public class DhfsFusex3IT {
String c2uuid;
String c3uuid;
// This calculation is somewhat racy, so keep it hardcoded for now
long emptyFileCount = 9;
Network network;
@BeforeEach
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
// TODO: Dedup
Network network = Network.newNetwork();
network = Network.newNetwork();
container1 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network);
.withNetwork(network);
container2 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network);
.withNetwork(network);
container3 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network);
.withNetwork(network);
Stream.of(container1, container2, container3).parallel().forEach(GenericContainer::start);
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
c3uuid = container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
Log.info(container1.getContainerId() + "=" + c1uuid);
Log.info(container2.getContainerId() + "=" + c2uuid);
Log.info(container3.getContainerId() + "=" + c3uuid);
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFusex3IT.class))
.withPrefix(c1uuid.substring(0, 4) + "-" + testInfo.getDisplayName());
.withPrefix(1 + "-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFusex3IT.class))
.withPrefix(c2uuid.substring(0, 4) + "-" + testInfo.getDisplayName());
.withPrefix(2 + "-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer3 = new WaitingConsumer();
var loggingConsumer3 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFusex3IT.class))
.withPrefix(c3uuid.substring(0, 4) + "-" + testInfo.getDisplayName());
.withPrefix(3 + "-" + testInfo.getDisplayName());
container3.followOutput(loggingConsumer3.andThen(waitingConsumer3));
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Listening"), 60, TimeUnit.SECONDS);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Listening"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Listening"), 60, TimeUnit.SECONDS);
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
c3uuid = container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
Log.info(container1.getContainerId() + "=" + c1uuid + " = 1");
Log.info(container2.getContainerId() + "=" + c2uuid + " = 2");
Log.info(container3.getContainerId() + "=" + c3uuid + " = 3");
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
Assertions.assertDoesNotThrow(() -> UUID.fromString(c3uuid));
@@ -91,91 +91,62 @@ public class DhfsFusex3IT {
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
var c2curl1 = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
var c2curl3 = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c3uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c3uuid);
var c3curl = container3.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
}
private boolean checkEmpty() throws IOException, InterruptedException {
for (var container : List.of(container1, container2, container3)) {
var found = container.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/data/objs -type f");
var foundWc = container.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/data/objs -type f | wc -l");
Log.info("Remaining objects in " + container.getContainerId() + ": " + found.toString() + " " + foundWc.toString());
if (!(found.getExitCode() == 0 && foundWc.getExitCode() == 0 && Integer.parseInt(foundWc.getStdout().strip()) == emptyFileCount))
return false;
}
return true;
}
@AfterEach
void stop() {
Stream.of(container1, container2, container3).parallel().forEach(GenericContainer::stop);
network.close();
}
@Test
void readWriteFileTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
}
// FIXME:
@Test
@Disabled
void largerFileDeleteTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /root/dhfs_default/fuse && dd if=/dev/urandom of=10MB.bin bs=1M count=10").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "head -c 10 /root/dhfs_default/fuse/10MB.bin").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/10MB.bin").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> checkEmpty());
}
@Test
@Disabled
void largerFileDeleteTestNoDelays() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /root/dhfs_default/fuse && dd if=/dev/urandom of=10MB.bin bs=1M count=10").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "head -c 10 /root/dhfs_default/fuse/10MB.bin").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/10MB.bin").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> checkEmpty());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
}
@Test
void gccHelloWorldTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo '#include<stdio.h>\nint main(){printf(\"hello world\"); return 0;}' > /root/dhfs_default/fuse/hello.c").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /root/dhfs_default/fuse && gcc hello.c").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo '#include<stdio.h>\nint main(){printf(\"hello world\"); return 0;}' > /dhfs_test/fuse/hello.c").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /dhfs_test/fuse && gcc hello.c").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> {
var helloOut = container1.execInContainer("/bin/sh", "-c", "/root/dhfs_default/fuse/a.out");
var helloOut = container1.execInContainer("/bin/sh", "-c", "/dhfs_test/fuse/a.out");
Log.info(helloOut);
return helloOut.getExitCode() == 0 && helloOut.getStdout().equals("hello world");
});
await().atMost(45, TimeUnit.SECONDS).until(() -> {
var helloOut = container2.execInContainer("/bin/sh", "-c", "/root/dhfs_default/fuse/a.out");
var helloOut = container2.execInContainer("/bin/sh", "-c", "/dhfs_test/fuse/a.out");
Log.info(helloOut);
return helloOut.getExitCode() == 0 && helloOut.getStdout().equals("hello world");
});
await().atMost(45, TimeUnit.SECONDS).until(() -> {
var helloOut = container3.execInContainer("/bin/sh", "-c", "/root/dhfs_default/fuse/a.out");
var helloOut = container3.execInContainer("/bin/sh", "-c", "/dhfs_test/fuse/a.out");
Log.info(helloOut);
return helloOut.getExitCode() == 0 && helloOut.getStdout().equals("hello world");
});
@@ -183,50 +154,51 @@ public class DhfsFusex3IT {
@Test
void removeHostTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
var c3curl = container3.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request DELETE " +
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
Thread.sleep(10000);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
}
@Test
void dirConflictTest() throws IOException, InterruptedException, TimeoutException {
var client = DockerClientFactory.instance().client();
client.pauseContainerCmd(container1.getContainerId()).exec();
client.pauseContainerCmd(container2.getContainerId()).exec();
// Pauses needed as otherwise docker buffers some incoming packets
client.disconnectFromNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
client.disconnectFromNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
client.disconnectFromNetworkCmd().withContainerId(container3.getContainerId()).withNetworkId(network.getId()).exec();
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /root/dhfs_default/fuse/testf").getExitCode());
client.pauseContainerCmd(container3.getContainerId()).exec();
client.unpauseContainerCmd(container2.getContainerId()).exec();
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /dhfs_test/fuse/testf").getExitCode());
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /root/dhfs_default/fuse/testf").getExitCode());
client.pauseContainerCmd(container2.getContainerId()).exec();
client.unpauseContainerCmd(container1.getContainerId()).exec();
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /dhfs_test/fuse/testf").getExitCode());
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /root/dhfs_default/fuse/testf").getExitCode());
client.unpauseContainerCmd(container2.getContainerId()).exec();
client.unpauseContainerCmd(container3.getContainerId()).exec();
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /dhfs_test/fuse/testf").getExitCode());
client.connectToNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
client.connectToNetworkCmd().withContainerId(container3.getContainerId()).withNetworkId(network.getId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
await().atMost(45, TimeUnit.SECONDS).until(() -> {
for (var c : List.of(container1, container2, container3)) {
var ls = c.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse");
var cat = c.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*");
var ls = c.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat = c.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info(ls);
Log.info(cat);
if (!(cat.getStdout().contains("test1") && cat.getStdout().contains("test2") && cat.getStdout().contains("test3")))
@@ -236,37 +208,38 @@ public class DhfsFusex3IT {
});
await().atMost(45, TimeUnit.SECONDS).until(() -> {
return container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals(
container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) &&
container3.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals(
container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) &&
container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout().equals(
container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout());
return container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getStdout().equals(
container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getStdout()) &&
container3.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getStdout().equals(
container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getStdout()) &&
container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*").getStdout().equals(
container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*").getStdout());
});
}
@Test
void fileConflictTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf").getStdout()));
var client = DockerClientFactory.instance().client();
client.pauseContainerCmd(container1.getContainerId()).exec();
client.pauseContainerCmd(container2.getContainerId()).exec();
// Pauses needed as otherwise docker buffers some incoming packets
client.disconnectFromNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
client.disconnectFromNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
client.disconnectFromNetworkCmd().withContainerId(container3.getContainerId()).withNetworkId(network.getId()).exec();
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /root/dhfs_default/fuse/testf").getExitCode());
client.pauseContainerCmd(container3.getContainerId()).exec();
client.unpauseContainerCmd(container2.getContainerId()).exec();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /root/dhfs_default/fuse/testf").getExitCode());
client.pauseContainerCmd(container2.getContainerId()).exec();
client.unpauseContainerCmd(container1.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /root/dhfs_default/fuse/testf").getExitCode());
client.unpauseContainerCmd(container2.getContainerId()).exec();
client.unpauseContainerCmd(container3.getContainerId()).exec();
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /dhfs_test/fuse/testf").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /dhfs_test/fuse/testf").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /dhfs_test/fuse/testf").getExitCode());
client.connectToNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
client.connectToNetworkCmd().withContainerId(container3.getContainerId()).withNetworkId(network.getId()).exec();
Log.warn("Waiting for connections");
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
@@ -276,9 +249,9 @@ public class DhfsFusex3IT {
// TODO: There's some issue with cache, so avoid file reads
await().atMost(45, TimeUnit.SECONDS).until(() -> {
Log.info("Listing consistency 1");
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse");
var ls3 = container3.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse");
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var ls3 = container3.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
Log.info(ls1);
Log.info(ls2);
Log.info(ls3);
@@ -290,8 +263,8 @@ public class DhfsFusex3IT {
await().atMost(45, TimeUnit.SECONDS).until(() -> {
Log.info("Listing");
for (var c : List.of(container1, container2, container3)) {
var ls = c.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse");
var cat = c.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*");
var ls = c.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat = c.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info(ls);
Log.info(cat);
if (!(cat.getExitCode() == 0 && ls.getExitCode() == 0))
@@ -304,12 +277,12 @@ public class DhfsFusex3IT {
await().atMost(45, TimeUnit.SECONDS).until(() -> {
Log.info("Listing consistency");
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*");
var ls3 = container3.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse");
var cat3 = container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*");
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var ls3 = container3.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat3 = container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info(ls1);
Log.info(cat1);
Log.info(ls2);

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs.integration;
package com.usatiuk.dhfsfuse.integration;
import io.quarkus.logging.Log;
import org.jetbrains.annotations.NotNull;
@@ -66,24 +66,31 @@ public class DhfsImage implements Future<String> {
.run("apt update && apt install -y libfuse2 curl gcc")
.copy("/app", "/app")
.copy("/libs", "/libs")
.cmd("java", "-ea", "-Xmx128M",
.cmd("java", "-ea", "-Xmx512M", "-XX:+UseParallelGC",
"--add-exports", "java.base/sun.nio.ch=ALL-UNNAMED",
"--add-exports", "java.base/jdk.internal.access=ALL-UNNAMED",
"--add-opens=java.base/java.nio=ALL-UNNAMED",
"--enable-preview",
"-Ddhfs.objects.peerdiscovery.interval=1s",
"-Ddhfs.objects.invalidation.delay=100",
"-Ddhfs.objects.deletion.delay=0",
"-Ddhfs.objects.deletion.can-delete-retry-delay=1000",
"-Ddhfs.objects.ref_verification=true",
"-Ddhfs.objects.write_log=true",
"-Ddhfs.objects.sync.timeout=10",
"-Ddhfs.objects.sync.timeout=30",
"-Ddhfs.objects.sync.ping.timeout=5",
"-Ddhfs.objects.reconnect_interval=1s",
"-Dcom.usatiuk.dhfs.supportlib.native-path=/libs",
"-Ddhfs.objects.last-seen.timeout=30",
"-Ddhfs.objects.last-seen.update=10",
"-Ddhfs.sync.cert-check=false",
"-Dquarkus.log.category.\"com.usatiuk\".level=TRACE",
"-Dquarkus.log.category.\"com.usatiuk.dhfs\".level=TRACE",
"-Dquarkus.log.category.\"com.usatiuk.objects.transaction\".level=INFO",
"-Ddhfs.objects.periodic-push-op-interval=5s",
"-Ddhfs.fuse.root=/dhfs_test/fuse",
"-Ddhfs.objects.persistence.files.root=/dhfs_test/data",
"-Ddhfs.objects.persistence.stuff.root=/dhfs_test/data/stuff",
"-jar", "/app/quarkus-run.jar")
.run("mkdir /dhfs_test && chmod 777 /dhfs_test")
.build())
.withFileFromPath("/app", Paths.get(buildPath, "quarkus-app"))
.withFileFromPath("/libs", Paths.get(nativeLibsDirectory));

View File

@@ -0,0 +1,238 @@
package com.usatiuk.dhfsfuse.integration;
import com.github.dockerjava.api.model.Device;
import com.usatiuk.dhfsfuse.TestDataCleaner;
import io.quarkus.logging.Log;
import org.junit.jupiter.api.*;
import org.slf4j.LoggerFactory;
import org.testcontainers.DockerClientFactory;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.Network;
import org.testcontainers.containers.output.Slf4jLogConsumer;
import org.testcontainers.containers.output.WaitingConsumer;
import org.testcontainers.containers.wait.strategy.Wait;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.time.Duration;
import java.util.Objects;
import java.util.UUID;
import java.util.concurrent.*;
import java.util.stream.Stream;
import static org.awaitility.Awaitility.await;
public class KillIT {
GenericContainer<?> container1;
GenericContainer<?> container2;
WaitingConsumer waitingConsumer1;
WaitingConsumer waitingConsumer2;
String c1uuid;
String c2uuid;
File data1;
File data2;
Network network;
ExecutorService executor;
@BeforeEach
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
executor = Executors.newCachedThreadPool();
data1 = Files.createTempDirectory("").toFile();
data2 = Files.createTempDirectory("").toFile();
network = Network.newNetwork();
container1 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.withNetwork(network)
.withFileSystemBind(data1.getAbsolutePath(), "/dhfs_test/data");
container2 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.withNetwork(network)
.withFileSystemBind(data2.getAbsolutePath(), "/dhfs_test/data");
Stream.of(container1, container2).parallel().forEach(GenericContainer::start);
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Listening"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Listening"), 60, TimeUnit.SECONDS);
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
}
@AfterEach
void stop() {
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
TestDataCleaner.purgeDirectory(data1);
TestDataCleaner.purgeDirectory(data2);
executor.close();
network.close();
}
private void checkConsistency() {
await().atMost(45, TimeUnit.SECONDS).until(() -> {
Log.info("Listing consistency");
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info(ls1);
Log.info(cat1);
Log.info(ls2);
Log.info(cat2);
return ls1.equals(ls2) && cat1.equals(cat2) && ls1.getExitCode() == 0 && ls2.getExitCode() == 0 && cat1.getExitCode() == 0 && cat2.getExitCode() == 0;
});
}
@Test
void killTest(TestInfo testInfo) throws Exception {
var barrier = new CyclicBarrier(2);
var ret1 = executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.await();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(10000);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
container1.start();
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency();
}
@Test
void killTestDirs(TestInfo testInfo) throws Exception {
var barrier = new CyclicBarrier(2);
var ret1 = executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.await();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(10000);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
container1.start();
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency();
}
@Test
void killTest2(TestInfo testInfo) throws Exception {
var barrier = new CyclicBarrier(2);
var ret1 = executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.await();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(10000);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting");
container2.start();
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency();
}
@Test
void killTestDirs2(TestInfo testInfo) throws Exception {
var barrier = new CyclicBarrier(2);
var ret1 = executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.await();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(10000);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting");
container2.start();
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency();
}
}

View File

@@ -0,0 +1,215 @@
package com.usatiuk.dhfsfuse.integration;
import io.quarkus.logging.Log;
import java.io.*;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
public class LazyFs {
private static final String lazyFsPath;
static {
lazyFsPath = System.getProperty("lazyFsPath");
System.out.println("LazyFs Path: " + lazyFsPath);
}
private final String mountRoot;
private final String dataRoot;
private final String name;
private final File configFile;
private final File fifoFile;
private Thread errPiper;
private Thread outPiper;
private CountDownLatch startLatch;
private Process fs;
public LazyFs(String name, String mountRoot, String dataRoot) {
this.name = name;
this.mountRoot = mountRoot;
this.dataRoot = dataRoot;
try {
configFile = File.createTempFile("lazyfs", ".conf");
configFile.deleteOnExit();
fifoFile = new File("/tmp/" + ThreadLocalRandom.current().nextLong() + ".faultsfifo");
fifoFile.deleteOnExit();
} catch (IOException e) {
throw new RuntimeException(e);
}
Runtime.getRuntime().addShutdownHook(new Thread(this::stop));
}
private String fifoPath() {
return fifoFile.getAbsolutePath();
}
public void start(String extraOpts) {
var lfsPath = Path.of(lazyFsPath).resolve("build").resolve("lazyfs");
if (!lfsPath.toFile().isFile())
throw new IllegalStateException("LazyFs binary does not exist: " + lfsPath.toAbsolutePath());
if (!lfsPath.toFile().canExecute())
throw new IllegalStateException("LazyFs binary is not executable: " + lfsPath.toAbsolutePath());
try (var rwFile = new RandomAccessFile(configFile, "rw");
var channel = rwFile.getChannel()) {
channel.truncate(0);
var config = "[faults]\n" +
"fifo_path=\"" + fifoPath() + "\"\n" +
"[cache]\n" +
"apply_eviction=false\n" +
"[cache.simple]\n" +
"custom_size=\"1gb\"\n" +
"blocks_per_page=1\n" +
"[filesystem]\n" +
"log_all_operations=false\n" +
"logfile=\"\"\n" + extraOpts;
rwFile.write(config.getBytes());
Log.info("LazyFs config: \n" + config);
} catch (Exception e) {
throw new RuntimeException(e);
}
var argList = new ArrayList<String>();
argList.add(lfsPath.toString());
argList.add(Path.of(mountRoot).toString());
argList.add("--config-path");
argList.add(configFile.getAbsolutePath());
argList.add("-o");
argList.add("allow_other");
argList.add("-o");
argList.add("modules=subdir");
argList.add("-o");
argList.add("subdir=" + Path.of(dataRoot).toAbsolutePath().toString());
try {
Log.info("Starting LazyFs " + argList);
fs = Runtime.getRuntime().exec(argList.toArray(String[]::new));
} catch (Exception e) {
throw new RuntimeException(e);
}
startLatch = new CountDownLatch(1);
outPiper = new Thread(() -> {
try {
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.getInputStream()))) {
String line;
while ((line = input.readLine()) != null) {
if (line.contains("running LazyFS"))
startLatch.countDown();
System.out.println(line);
}
}
} catch (Exception e) {
Log.info("Exception in LazyFs piper", e);
}
Log.info("LazyFs out piper finished");
});
outPiper.start();
errPiper = new Thread(() -> {
try {
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.getErrorStream()))) {
String line;
while ((line = input.readLine()) != null) {
System.out.println(line);
}
}
} catch (Exception e) {
Log.info("Exception in LazyFs piper", e);
}
Log.info("LazyFs err piper finished");
});
errPiper.start();
try {
if (!startLatch.await(30, TimeUnit.SECONDS))
throw new RuntimeException("StartLatch timed out");
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
Log.info("LazyFs started");
}
public void start() {
start("");
}
private String mdbPath() {
return Path.of(dataRoot).resolve("objects").resolve("data.mdb").toAbsolutePath().toString();
}
public void startTornOp() {
start("\n" +
"[[injection]]\n" +
"type=\"torn-seq\"\n" +
"op=\"write\"\n" +
"file=\"" + mdbPath() + "\"\n" +
"persist=[1,4]\n" +
"occurrence=3");
}
public void startTornSeq() {
start("[[injection]]\n" +
"type=\"torn-op\"\n" +
"file=\"" + mdbPath() + "\"\n" +
"occurrence=3\n" +
"parts=3 #or parts_bytes=[4096,3600,1260]\n" +
"persist=[1,3]");
}
public void crash() {
try {
var cmd = "echo \"lazyfs::crash::timing=after::op=write::from_rgx=*\" > " + fifoPath();
Log.info("Running command: " + cmd);
Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd}).waitFor();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public void stop() {
try {
synchronized (this) {
Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + mountRoot}).waitFor();
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
// Doesn't actually work?
//
// public void crashop() {
// try {
// var cmd = "echo \"lazyfs::torn-op::file=" + Path.of(lazyFsDataPath).toAbsolutePath().toString() + "/objects/data.mdb::persist=1,3::parts=3::occurrence=5\" > /tmp/faults.fifo";
// System.out.println("Running command: " + cmd);
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd});
// Thread.sleep(1000);
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + dataRoot});
// Thread.sleep(1000);
// } catch (Exception e) {
// throw new RuntimeException(e);
// }
// }
//
// public void crashseq() {
// try {
// var cmd = "echo \"lazyfs::torn-seq::op=write::file=" + Path.of(lazyFsDataPath).toAbsolutePath().toString() + "/objects/data.mdb::persist=1,4::occurrence=2\" > /tmp/faults.fifo";
// System.out.println("Running command: " + cmd);
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd});
// Thread.sleep(1000);
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + dataRoot});
// Thread.sleep(1000);
// } catch (Exception e) {
// throw new RuntimeException(e);
// }
// }
}

View File

@@ -0,0 +1,490 @@
package com.usatiuk.dhfsfuse.integration;
import com.github.dockerjava.api.model.Device;
import com.usatiuk.dhfsfuse.TestDataCleaner;
import io.quarkus.logging.Log;
import org.junit.jupiter.api.*;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.EnumSource;
import org.slf4j.LoggerFactory;
import org.testcontainers.DockerClientFactory;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.Network;
import org.testcontainers.containers.output.Slf4jLogConsumer;
import org.testcontainers.containers.output.WaitingConsumer;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.util.Objects;
import java.util.UUID;
import java.util.concurrent.*;
import java.util.stream.Stream;
import static org.awaitility.Awaitility.await;
public class LazyFsIT {
GenericContainer<?> container1;
GenericContainer<?> container2;
WaitingConsumer waitingConsumer1;
WaitingConsumer waitingConsumer2;
String c1uuid;
String c2uuid;
File data1;
File data2;
File data1Lazy;
File data2Lazy;
LazyFs lazyFs1;
LazyFs lazyFs2;
ExecutorService executor;
Network network;
@BeforeEach
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
executor = Executors.newCachedThreadPool();
data1 = Files.createTempDirectory("dhfsdata").toFile();
data2 = Files.createTempDirectory("dhfsdata").toFile();
data1Lazy = Files.createTempDirectory("lazyfsroot").toFile();
data2Lazy = Files.createTempDirectory("lazyfsroot").toFile();
network = Network.newNetwork();
lazyFs1 = new LazyFs(testInfo.getDisplayName(), data1.toString(), data1Lazy.toString());
lazyFs1.start();
lazyFs2 = new LazyFs(testInfo.getDisplayName(), data2.toString(), data2Lazy.toString());
lazyFs2.start();
container1 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.withNetwork(network)
.withFileSystemBind(data1.getAbsolutePath(), "/dhfs_test/data");
container2 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.withNetwork(network)
.withFileSystemBind(data2.getAbsolutePath(), "/dhfs_test/data");
Stream.of(container1, container2).parallel().forEach(GenericContainer::start);
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Listening"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Listening"), 60, TimeUnit.SECONDS);
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
}
@AfterEach
void stop() {
lazyFs1.stop();
lazyFs2.stop();
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
TestDataCleaner.purgeDirectory(data1);
TestDataCleaner.purgeDirectory(data1Lazy);
TestDataCleaner.purgeDirectory(data2);
TestDataCleaner.purgeDirectory(data2Lazy);
executor.close();
network.close();
}
private void checkConsistency(String testName) {
await().atMost(120, TimeUnit.SECONDS).until(() -> {
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info("Listing consistency " + testName + "\n"
+ ls1 + "\n"
+ cat1 + "\n"
+ ls2 + "\n"
+ cat2 + "\n");
return ls1.equals(ls2) && cat1.equals(cat2) && ls1.getExitCode() == 0 && ls2.getExitCode() == 0 && cat1.getExitCode() == 0 && cat2.getExitCode() == 0;
});
}
@ParameterizedTest
@EnumSource(CrashType.class)
void killTest(CrashType crashType, TestInfo testInfo) throws Exception {
var barrier = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(3000);
Log.info("Killing");
lazyFs1.crash();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
lazyFs1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
switch (crashType) {
case CRASH -> lazyFs1.start();
case TORN_OP -> lazyFs1.startTornOp();
case TORN_SEQ -> lazyFs1.startTornSeq();
}
container1.start();
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
Log.info("Failed to connect: " + testInfo.getDisplayName());
// Sometimes it doesn't get mounted properly for some reason
Assumptions.assumeTrue(false);
}
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test2; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
Log.info("Killing");
if (crashType.equals(CrashType.CRASH)) {
Thread.sleep(3000);
lazyFs1.crash();
}
try {
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// Sometimes crash doesn't work
Log.info("Failed to crash: " + testInfo.getDisplayName());
if (crashType.equals(CrashType.CRASH))
throw e;
Assumptions.assumeTrue(false);
}
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
lazyFs1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
lazyFs1.start();
container1.start();
waitingConsumer1 = new WaitingConsumer();
loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency(testInfo.getDisplayName());
}
@ParameterizedTest
@EnumSource(CrashType.class)
void killTestDirs(CrashType crashType, TestInfo testInfo) throws Exception {
var barrier = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(3000);
Log.info("Killing");
lazyFs1.crash();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
lazyFs1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
switch (crashType) {
case CRASH -> lazyFs1.start();
case TORN_OP -> lazyFs1.startTornOp();
case TORN_SEQ -> lazyFs1.startTornSeq();
}
container1.start();
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
Log.info("Failed to connect: " + testInfo.getDisplayName());
// Sometimes it doesn't get mounted properly for some reason
Assumptions.assumeTrue(false);
}
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/2test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
Log.info("Killing");
if (crashType.equals(CrashType.CRASH)) {
Thread.sleep(3000);
lazyFs1.crash();
}
try {
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// Sometimes crash doesn't work
Log.info("Failed to crash: " + testInfo.getDisplayName());
if (crashType.equals(CrashType.CRASH))
throw e;
Assumptions.assumeTrue(false);
}
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
lazyFs1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
lazyFs1.start();
container1.start();
waitingConsumer1 = new WaitingConsumer();
loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency(testInfo.getDisplayName());
}
@ParameterizedTest
@EnumSource(CrashType.class)
void killTest2(CrashType crashType, TestInfo testInfo) throws Exception {
var barrier = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting1 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(3000);
Log.info("Killing");
lazyFs2.crash();
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting1");
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
lazyFs2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
switch (crashType) {
case CRASH -> lazyFs2.start();
case TORN_OP -> lazyFs2.startTornOp();
case TORN_SEQ -> lazyFs2.startTornSeq();
}
container2.start();
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
Log.info("Failed to connect: " + testInfo.getDisplayName());
// Sometimes it doesn't get mounted properly for some reason
Assumptions.assumeTrue(false);
}
var barrier2 = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier2.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting2 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test2; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier2.await();
Log.info("Killing");
Thread.sleep(3000);
if (crashType.equals(CrashType.CRASH)) {
lazyFs2.crash();
}
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting2");
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// Sometimes crash doesn't work
Log.info("Failed to crash: " + testInfo.getDisplayName());
if (crashType.equals(CrashType.CRASH))
throw e;
Assumptions.assumeTrue(false);
}
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
lazyFs2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
lazyFs2.start();
container2.start();
waitingConsumer2 = new WaitingConsumer();
loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency(testInfo.getDisplayName());
}
@ParameterizedTest
@EnumSource(CrashType.class)
void killTestDirs2(CrashType crashType, TestInfo testInfo) throws Exception {
var barrier = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting1 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(3000);
Log.info("Killing");
lazyFs2.crash();
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting1");
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
lazyFs2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
switch (crashType) {
case CRASH -> lazyFs2.start();
case TORN_OP -> lazyFs2.startTornOp();
case TORN_SEQ -> lazyFs2.startTornSeq();
}
container2.start();
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
Log.info("Failed to connect: " + testInfo.getDisplayName());
// Sometimes it doesn't get mounted properly for some reason
Assumptions.assumeTrue(false);
}
var barrier2 = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier2.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting2 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/2test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier2.await();
Thread.sleep(3000);
Log.info("Killing");
if (crashType.equals(CrashType.CRASH)) {
lazyFs2.crash();
}
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting2");
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("org.lmdbjava.LmdbNativeException"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// Sometimes crash doesn't work
Log.info("Failed to crash: " + testInfo.getDisplayName());
if (crashType.equals(CrashType.CRASH))
throw e;
Assumptions.assumeTrue(false);
}
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
lazyFs2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
lazyFs2.start();
container2.start();
waitingConsumer2 = new WaitingConsumer();
loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency(testInfo.getDisplayName());
}
private static enum CrashType {
CRASH,
TORN_OP,
TORN_SEQ
}
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs.integration;
package com.usatiuk.dhfsfuse.integration;
import com.github.dockerjava.api.model.Device;
import org.junit.jupiter.api.*;
@@ -29,18 +29,20 @@ public class ResyncIT {
String c1uuid;
String c2uuid;
Network network;
@BeforeEach
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
Network network = Network.newNetwork();
network = Network.newNetwork();
container1 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network);
.withNetwork(network);
container2 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network);
.withNetwork(network);
Stream.of(container1, container2).parallel().forEach(GenericContainer::start);
@@ -50,18 +52,22 @@ public class ResyncIT {
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFuseIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Listening"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Listening"), 60, TimeUnit.SECONDS);
}
@AfterEach
void stop() {
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
network.close();
}
@Test
void readWriteFileTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
@@ -72,36 +78,36 @@ public class ResyncIT {
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
}
@Test
void manyFiles() throws IOException, InterruptedException, TimeoutException {
var ret = container1.execInContainer("/bin/sh", "-c", "for i in $(seq 1 200); do echo $i > /root/dhfs_default/fuse/test$i; done");
var ret = container1.execInContainer("/bin/sh", "-c", "for i in $(seq 1 200); do echo $i > /dhfs_test/fuse/test$i; done");
Assertions.assertEquals(0, ret.getExitCode());
var foundWc = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l");
var foundWc = container1.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -type f | wc -l");
Assertions.assertEquals(200, Integer.valueOf(foundWc.getStdout().strip()));
ret = container2.execInContainer("/bin/sh", "-c", "for i in $(seq 1 200); do echo $i > /root/dhfs_default/fuse/test-2-$i; done");
ret = container2.execInContainer("/bin/sh", "-c", "for i in $(seq 1 200); do echo $i > /dhfs_test/fuse/test-2-$i; done");
Assertions.assertEquals(0, ret.getExitCode());
foundWc = container2.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l");
foundWc = container2.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -type f | wc -l");
Assertions.assertEquals(200, Integer.valueOf(foundWc.getStdout().strip()));
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
@@ -112,36 +118,36 @@ public class ResyncIT {
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
await().atMost(120, TimeUnit.SECONDS).until(() -> {
var foundWc2 = container2.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l");
var foundWc2 = container2.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -type f | wc -l");
return 400 == Integer.valueOf(foundWc2.getStdout().strip());
});
await().atMost(120, TimeUnit.SECONDS).until(() -> {
var foundWc2 = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l");
var foundWc2 = container1.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -type f | wc -l");
return 400 == Integer.valueOf(foundWc2.getStdout().strip());
});
}
@Test
void folderAfterMove() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/testd1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty1 > /root/dhfs_default/fuse/testd1/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/testd1 /root/dhfs_default/fuse/testd2").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty2 > /root/dhfs_default/fuse/testd2/testf2").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/testd1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty1 > /dhfs_test/fuse/testd1/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/testd1 /dhfs_test/fuse/testd2").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty2 > /dhfs_test/fuse/testd2/testf2").getExitCode());
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
@@ -152,20 +158,20 @@ public class ResyncIT {
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/peers-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty1\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testd2/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty2\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testd2/testf2").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty1\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testd2/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty2\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testd2/testf2").getStdout()));
}
}

View File

@@ -0,0 +1,11 @@
dhfs.objects.persistence.files.root=${HOME}/dhfs_data/dhfs_root_test
dhfs.objects.root=${HOME}/dhfs_data/dhfs_root_d_test
dhfs.fuse.root=${HOME}/dhfs_data/dhfs_fuse_root_test
dhfs.objects.ref_verification=true
dhfs.objects.deletion.delay=0
quarkus.log.category."com.usatiuk.dhfs".level=TRACE
quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE
quarkus.http.test-port=0
quarkus.http.test-ssl-port=0
dhfs.local-discovery=false
dhfs.objects.persistence.snapshot-extra-checks=true

View File

@@ -1,5 +1,8 @@
package com.usatiuk.kleppmanntree;
/**
* Exception thrown when an attempt is made to create a new tree node as a child with a name that already exists.
*/
public class AlreadyExistsException extends RuntimeException {
public AlreadyExistsException(String message) {
super(message);

View File

@@ -1,32 +0,0 @@
package com.usatiuk.kleppmanntree;
import java.io.Serializable;
public class AtomicClock implements Clock<Long>, Serializable {
private long _max = 0;
public AtomicClock(long counter) {
_max = counter;
}
@Override
public Long getTimestamp() {
return ++_max;
}
public void setTimestamp(Long timestamp) {
_max = timestamp;
}
@Override
public Long peekTimestamp() {
return _max;
}
@Override
public Long updateTimestamp(Long receivedTimestamp) {
var old = _max;
_max = Math.max(_max, receivedTimestamp) + 1;
return old;
}
}

View File

@@ -1,9 +1,26 @@
package com.usatiuk.kleppmanntree;
/**
* Clock interface
*/
public interface Clock<TimestampT extends Comparable<TimestampT>> {
/**
* Increment and get the current timestamp.
* @return the incremented timestamp
*/
TimestampT getTimestamp();
/**
* Get the current timestamp without incrementing it.
* @return the current timestamp
*/
TimestampT peekTimestamp();
/**
* Update the timestamp with an externally received timestamp.
* Will set the currently stored timestamp to <code>max(receivedTimestamp, currentTimestamp) + 1</code>
* @param receivedTimestamp the received timestamp
* @return the previous timestamp
*/
TimestampT updateTimestamp(TimestampT receivedTimestamp);
}

View File

@@ -3,6 +3,13 @@ package com.usatiuk.kleppmanntree;
import java.io.Serializable;
import java.util.Comparator;
/**
* CombinedTimestamp is a record that represents a timestamp and a node ID, ordered first by timestamp and then by node ID.
* @param timestamp the timestamp
* @param nodeId the node ID. If null, then only the timestamp is used for ordering.
* @param <TimestampT> the type of the timestamp
* @param <PeerIdT> the type of the node ID
*/
public record CombinedTimestamp<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>>
(TimestampT timestamp,
PeerIdT nodeId) implements Comparable<CombinedTimestamp<TimestampT, PeerIdT>>, Serializable {

View File

@@ -1,7 +1,5 @@
package com.usatiuk.kleppmanntree;
import jakarta.annotation.Nonnull;
import jakarta.annotation.Nullable;
import org.apache.commons.lang3.tuple.Pair;
import java.util.*;
@@ -10,6 +8,14 @@ import java.util.function.Function;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* An implementation of a tree as described in <a href="https://martin.kleppmann.com/papers/move-op.pdf">A highly-available move operation for replicated trees</a>
*
* @param <TimestampT> Type of the timestamp
* @param <PeerIdT> Type of the peer ID
* @param <MetaT> Type of the node metadata
* @param <NodeIdT> Type of the node ID
*/
public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT> {
private static final Logger LOGGER = Logger.getLogger(KleppmannTree.class.getName());
@@ -17,8 +23,15 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
private final PeerInterface<PeerIdT> _peers;
private final Clock<TimestampT> _clock;
private final OpRecorder<TimestampT, PeerIdT, MetaT, NodeIdT> _opRecorder;
private HashMap<NodeIdT, TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT>> _undoCtx = null;
/**
* Constructor with all the dependencies
*
* @param storage Storage interface
* @param peers Peer interface
* @param clock Clock interface
* @param opRecorder Operation recorder interface
*/
public KleppmannTree(StorageInterface<TimestampT, PeerIdT, MetaT, NodeIdT> storage,
PeerInterface<PeerIdT> peers,
Clock<TimestampT> clock,
@@ -29,6 +42,13 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
_opRecorder = opRecorder;
}
/**
* Traverse the tree from the given node ID using the given list of names
*
* @param fromId The starting node ID
* @param names The list of names to traverse
* @return The resulting node ID or null if not found
*/
private NodeIdT traverseImpl(NodeIdT fromId, List<String> names) {
if (names.isEmpty()) return fromId;
@@ -42,14 +62,21 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
return traverseImpl(childId, names.subList(1, names.size()));
}
public NodeIdT traverse(NodeIdT fromId, List<String> names) {
return traverseImpl(fromId, names.subList(1, names.size()));
}
/**
* Traverse the tree from its root node using the given list of names
*
* @param names The list of names to traverse
* @return The resulting node ID or null if not found
*/
public NodeIdT traverse(List<String> names) {
return traverseImpl(_storage.getRootId(), names);
}
/**
* Undo the effect of a log effect
*
* @param effect The log effect to undo
*/
private void undoEffect(LogEffect<TimestampT, PeerIdT, MetaT, NodeIdT> effect) {
if (effect.oldInfo() != null) {
var node = _storage.getById(effect.childId());
@@ -89,10 +116,14 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
node.withParent(null)
.withLastEffectiveOp(null)
);
_undoCtx.put(node.key(), node);
}
}
/**
* Undo the effects of a log record
*
* @param op The log record to undo
*/
private void undoOp(LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> op) {
LOGGER.finer(() -> "Will undo op: " + op);
if (op.effects() != null)
@@ -100,16 +131,32 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
undoEffect(e);
}
/**
* Redo the operation in a log record
*
* @param entry The log record to redo
*/
private void redoOp(Map.Entry<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>> entry) {
var newEffects = doOp(entry.getValue().op(), false);
_storage.getLog().replace(entry.getKey(), newEffects);
}
/**
* Perform the operation and put it in the log
*
* @param op The operation to perform
* @param failCreatingIfExists Whether to fail if there is a name conflict,
* otherwise replace the existing node
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
*/
private void doAndPut(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op, boolean failCreatingIfExists) {
var res = doOp(op, failCreatingIfExists);
_storage.getLog().put(res.op().timestamp(), res);
}
/**
* Try to trim the log to the causality threshold
*/
private void tryTrimLog() {
var log = _storage.getLog();
var timeLog = _storage.getPeerTimestampLog();
@@ -165,22 +212,52 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
}
}
/**
* Move a node to a new parent with new metadata
*
* @param newParent The new parent node ID
* @param newMeta The new metadata
* @param child The child node ID
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
*/
public <LocalMetaT extends MetaT> void move(NodeIdT newParent, LocalMetaT newMeta, NodeIdT child) {
move(newParent, newMeta, child, true);
}
/**
* Move a node to a new parent with new metadata
*
* @param newParent The new parent node ID
* @param newMeta The new metadata
* @param child The child node ID
* @param failCreatingIfExists Whether to fail if there is a name conflict,
* otherwise replace the existing node
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
*/
public void move(NodeIdT newParent, MetaT newMeta, NodeIdT child, boolean failCreatingIfExists) {
var createdMove = createMove(newParent, newMeta, child);
applyOp(_peers.getSelfId(), createdMove, failCreatingIfExists);
_opRecorder.recordOp(createdMove);
}
/**
* Apply an external operation from a remote peer
*
* @param from The peer ID
* @param op The operation to apply
*/
public void applyExternalOp(PeerIdT from, OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op) {
_clock.updateTimestamp(op.timestamp().timestamp());
applyOp(from, op, false);
}
// Returns true if the timestamp is newer than what's seen, false otherwise
/**
* Update the causality threshold timestamp for a peer
*
* @param from The peer ID
* @param newTimestamp The timestamp received from it
* @return True if the timestamp was updated, false otherwise
*/
private boolean updateTimestampImpl(PeerIdT from, TimestampT newTimestamp) {
TimestampT oldRef = _storage.getPeerTimestampLog().getForPeer(from);
if (oldRef != null && oldRef.compareTo(newTimestamp) >= 0) { // FIXME?
@@ -191,6 +268,12 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
return true;
}
/**
* Update the causality threshold timestamp for a peer
*
* @param from The peer ID
* @param timestamp The timestamp received from it
*/
public void updateExternalTimestamp(PeerIdT from, TimestampT timestamp) {
var gotExt = _storage.getPeerTimestampLog().getForPeer(from);
var gotSelf = _storage.getPeerTimestampLog().getForPeer(_peers.getSelfId());
@@ -201,6 +284,15 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
tryTrimLog();
}
/**
* Apply an operation from a peer
*
* @param from The peer ID
* @param op The operation to apply
* @param failCreatingIfExists Whether to fail if there is a name conflict,
* otherwise replace the existing node
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
*/
private void applyOp(PeerIdT from, OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op, boolean failCreatingIfExists) {
if (!updateTimestampImpl(op.timestamp().nodeId(), op.timestamp().timestamp())) return;
@@ -217,45 +309,52 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
}
assert cmp != 0;
if (cmp < 0) {
try {
if (log.containsKey(op.timestamp())) return;
var toUndo = log.newestSlice(op.timestamp(), false);
_undoCtx = new HashMap<>();
for (var entry : toUndo.reversed()) {
undoOp(entry.getValue());
}
try {
doAndPut(op, failCreatingIfExists);
} finally {
for (var entry : toUndo) {
redoOp(entry);
}
if (!_undoCtx.isEmpty()) {
for (var e : _undoCtx.entrySet()) {
LOGGER.log(Level.FINE, "Dropping node " + e.getKey());
_storage.removeNode(e.getKey());
}
}
_undoCtx = null;
}
} finally {
tryTrimLog();
if (log.containsKey(op.timestamp())) return;
var toUndo = log.newestSlice(op.timestamp(), false);
for (var entry : toUndo.reversed()) {
undoOp(entry.getValue());
}
doAndPut(op, failCreatingIfExists);
for (var entry : toUndo) {
redoOp(entry);
}
tryTrimLog();
} else {
doAndPut(op, failCreatingIfExists);
tryTrimLog();
}
}
/**
* Get a new timestamp, incrementing the one in storage
*
* @return A new timestamp
*/
private CombinedTimestamp<TimestampT, PeerIdT> getTimestamp() {
return new CombinedTimestamp<>(_clock.getTimestamp(), _peers.getSelfId());
}
/**
* Create a new move operation
*
* @param newParent The new parent node ID
* @param newMeta The new metadata
* @param node The child node ID
* @return A new move operation
*/
private <LocalMetaT extends MetaT> OpMove<TimestampT, PeerIdT, LocalMetaT, NodeIdT> createMove(NodeIdT newParent, LocalMetaT newMeta, NodeIdT node) {
return new OpMove<>(getTimestamp(), newParent, newMeta, node);
}
/**
* Perform the operation and return the log record
*
* @param op The operation to perform
* @param failCreatingIfExists Whether to fail if there is a name conflict,
* otherwise replace the existing node
* @return The log record
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
*/
private LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> doOp(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op, boolean failCreatingIfExists) {
LOGGER.finer(() -> "Doing op: " + op);
LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> computed;
@@ -264,8 +363,7 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
} catch (AlreadyExistsException aex) {
throw aex;
} catch (Exception e) {
LOGGER.log(Level.SEVERE, "Error computing effects for op" + op.toString(), e);
computed = new LogRecord<>(op, null);
throw new RuntimeException("Error computing effects for op " + op.toString(), e);
}
if (computed.effects() != null)
@@ -273,28 +371,24 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
return computed;
}
/**
* Get a new node from storage
*
* @param key The node ID
* @param parent The parent node ID
* @param meta The metadata
* @return A new tree node
*/
private TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT> getNewNode(NodeIdT key, NodeIdT parent, MetaT meta) {
if (_undoCtx != null) {
var node = _undoCtx.get(key);
if (node != null) {
try {
if (!node.children().isEmpty()) {
LOGGER.log(Level.WARNING, "Not empty children for undone node " + key);
}
node = node.withParent(parent).withMeta(meta);
} catch (Exception e) {
LOGGER.log(Level.SEVERE, "Error while fixing up node " + key, e);
node = null;
}
}
if (node != null) {
_undoCtx.remove(key);
return node;
}
}
return _storage.createNewNode(key, parent, meta);
}
/**
* Apply the effects of a log record
*
* @param sourceOp The source operation
* @param effects The list of log effects
*/
private void applyEffects(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> sourceOp, List<LogEffect<TimestampT, PeerIdT, MetaT, NodeIdT>> effects) {
for (var effect : effects) {
LOGGER.finer(() -> "Applying effect: " + effect + " from op " + sourceOp);
@@ -335,6 +429,15 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
}
}
/**
* Compute the effects of a move operation
*
* @param op The operation to process
* @param failCreatingIfExists Whether to fail if there is a name conflict,
* otherwise replace the existing node
* @return The log record with the computed effects
* @throws AlreadyExistsException If the node already exists and failCreatingIfExists is true
*/
private LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> computeEffects(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op, boolean failCreatingIfExists) {
var node = _storage.getById(op.childId());
@@ -372,10 +475,6 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
var conflictNode = _storage.getById(conflictNodeId);
MetaT conflictNodeMeta = conflictNode.meta();
if (Objects.equals(conflictNodeMeta, op.newMeta())) {
return new LogRecord<>(op, null);
}
LOGGER.finer(() -> "Node creation conflict: " + conflictNode);
String newConflictNodeName = op.newName() + ".conflict." + conflictNode.key();
@@ -400,18 +499,14 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
if (oldMeta != null
&& op.newMeta() != null
&& !oldMeta.getClass().equals(op.newMeta().getClass())) {
LOGGER.log(Level.SEVERE, "Class mismatch for meta for node " + node.key());
return new LogRecord<>(op, null);
throw new RuntimeException("Class mismatch for meta for node " + node.key());
}
var replaceNodeId = newParent.children().get(op.newName());
if (replaceNodeId != null) {
var replaceNode = _storage.getById(replaceNodeId);
var replaceNodeMeta = replaceNode.meta();
if (Objects.equals(replaceNodeMeta, op.newMeta())) {
return new LogRecord<>(op, null);
}
LOGGER.finer(() -> "Node replacement: " + replaceNode);
return new LogRecord<>(op, List.of(
@@ -426,6 +521,13 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
));
}
/**
* Check if a node is an ancestor of another node
*
* @param child The child node ID
* @param parent The parent node ID
* @return True if the child is an ancestor of the parent, false otherwise
*/
private boolean isAncestor(NodeIdT child, NodeIdT parent) {
var node = _storage.getById(parent);
NodeIdT curParent;
@@ -436,6 +538,11 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
return false;
}
/**
* Walk the tree and apply the given consumer to each node
*
* @param consumer The consumer to apply to each node
*/
public void walkTree(Consumer<TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT>> consumer) {
ArrayDeque<NodeIdT> queue = new ArrayDeque<>();
queue.push(_storage.getRootId());
@@ -449,6 +556,12 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
}
}
/**
* Find the parent of a node that matches the given predicate
*
* @param kidPredicate The predicate to match the child node
* @return A pair containing the name of the child and the ID of the parent, or null if not found
*/
public Pair<String, NodeIdT> findParent(Function<TreeNode<TimestampT, PeerIdT, MetaT, NodeIdT>, Boolean> kidPredicate) {
ArrayDeque<NodeIdT> queue = new ArrayDeque<>();
queue.push(_storage.getRootId());
@@ -469,6 +582,13 @@ public class KleppmannTree<TimestampT extends Comparable<TimestampT>, PeerIdT ex
return null;
}
/**
* Record the bootstrap operations for a given peer
* Will visit all nodes of the tree and add their effective operations to both the queue to be sent to the peer,
* and to the global operation log.
*
* @param host The peer ID
*/
public void recordBoostrapFor(PeerIdT host) {
TreeMap<CombinedTimestamp<TimestampT, PeerIdT>, OpMove<TimestampT, PeerIdT, MetaT, NodeIdT>> result = new TreeMap<>();

View File

@@ -2,6 +2,18 @@ package com.usatiuk.kleppmanntree;
import java.io.Serializable;
/**
* LogEffect is a record that represents the effect of a log entry on a tree node.
* @param oldInfo the old information about the node, before it was moved. Null if the node did not exist before
* @param effectiveOp the operation that had caused this effect to be applied
* @param newParentId the ID of the new parent node
* @param newMeta the new metadata of the node
* @param childId the ID of the child node
* @param <TimestampT> the type of the timestamp
* @param <PeerIdT> the type of the peer ID
* @param <MetaT> the type of the node metadata
* @param <NodeIdT> the type of the node ID
*/
public record LogEffect<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT>(
LogEffectOld<TimestampT, PeerIdT, MetaT, NodeIdT> oldInfo,
OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> effectiveOp,
@@ -10,14 +22,14 @@ public record LogEffect<TimestampT extends Comparable<TimestampT>, PeerIdT exten
NodeIdT childId) implements Serializable {
public String oldName() {
if (oldInfo.oldMeta() != null) {
return oldInfo.oldMeta().getName();
return oldInfo.oldMeta().name();
}
return childId.toString();
}
public String newName() {
if (newMeta != null) {
return newMeta.getName();
return newMeta.name();
}
return childId.toString();
}

View File

@@ -2,6 +2,16 @@ package com.usatiuk.kleppmanntree;
import java.io.Serializable;
/**
* Represents the old information about a node before it was moved.
* @param oldEffectiveMove the old effective move that had caused this effect to be applied
* @param oldParent the ID of the old parent node
* @param oldMeta the old metadata of the node
* @param <TimestampT> the type of the timestamp
* @param <PeerIdT> the type of the peer ID
* @param <MetaT> the type of the node metadata
* @param <NodeIdT> the type of the node ID
*/
public record LogEffectOld<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT>
(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> oldEffectiveMove,
NodeIdT oldParent,

View File

@@ -4,29 +4,82 @@ import org.apache.commons.lang3.tuple.Pair;
import java.util.List;
/**
* LogInterface is an interface that allows accessing the log of operations
* @param <TimestampT> the type of the timestamp
* @param <PeerIdT> the type of the peer ID
* @param <MetaT> the type of the node metadata
* @param <NodeIdT> the type of the node ID
*/
public interface LogInterface<
TimestampT extends Comparable<TimestampT>,
PeerIdT extends Comparable<PeerIdT>,
MetaT extends NodeMeta,
NodeIdT> {
/**
* Peek the oldest log entry.
* @return the oldest log entry
*/
Pair<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>> peekOldest();
/**
* Take the oldest log entry.
* @return the oldest log entry
*/
Pair<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>> takeOldest();
/**
* Peek the newest log entry.
* @return the newest log entry
*/
Pair<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>> peekNewest();
/**
* Return all log entries that are newer than the given timestamp.
* @param since the timestamp to compare with
* @param inclusive if true, include the log entry with the given timestamp
* @return a list of log entries that are newer than the given timestamp
*/
List<Pair<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>>>
newestSlice(CombinedTimestamp<TimestampT, PeerIdT> since, boolean inclusive);
/**
* Return all the log entries
* @return a list of all log entries
*/
List<Pair<CombinedTimestamp<TimestampT, PeerIdT>, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT>>> getAll();
/**
* Checks if the log is empty.
* @return true if the log is empty, false otherwise
*/
boolean isEmpty();
/**
* Checks if the log contains the given timestamp.
* @param timestamp the timestamp to check
* @return true if the log contains the given timestamp, false otherwise
*/
boolean containsKey(CombinedTimestamp<TimestampT, PeerIdT> timestamp);
/**
* Get the size of the log.
* @return the size of the log (number of entries)
*/
long size();
/**
* Add a log entry to the log.
* @param timestamp the timestamp of the log entry
* @param record the log entry
* @throws IllegalStateException if the log entry already exists
*/
void put(CombinedTimestamp<TimestampT, PeerIdT> timestamp, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> record);
/**
* Replace a log entry in the log.
* @param timestamp the timestamp of the log entry
* @param record the log entry
*/
void replace(CombinedTimestamp<TimestampT, PeerIdT> timestamp, LogRecord<TimestampT, PeerIdT, MetaT, NodeIdT> record);
}

View File

@@ -3,6 +3,15 @@ package com.usatiuk.kleppmanntree;
import java.io.Serializable;
import java.util.List;
/**
* Represents a log record in the Kleppmann tree.
* @param op the operation that is stored in this log record
* @param effects the effects of the operation (resulting moves)
* @param <TimestampT> the type of the timestamp
* @param <PeerIdT> the type of the peer ID
* @param <MetaT> the type of the node metadata
* @param <NodeIdT> the type of the node ID
*/
public record LogRecord<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT>
(OpMove<TimestampT, PeerIdT, MetaT, NodeIdT> op,
List<LogEffect<TimestampT, PeerIdT, MetaT, NodeIdT>> effects) implements Serializable {

View File

@@ -2,8 +2,24 @@ package com.usatiuk.kleppmanntree;
import java.io.Serializable;
/**
* Represents metadata associated with a node in the Kleppmann tree.
* This interface is used to define the metadata that can be associated with nodes in the tree.
* Implementations of this interface should provide a name for the node and a method to create a copy of it with a new name.
*/
public interface NodeMeta extends Serializable {
String getName();
/**
* Returns the name of the node.
*
* @return the name of the node
*/
String name();
/**
* Creates a copy of the metadata with a new name.
*
* @param name the new name for the metadata
* @return a new instance of NodeMeta with the specified name
*/
NodeMeta withName(String name);
}

View File

@@ -2,12 +2,30 @@ package com.usatiuk.kleppmanntree;
import java.io.Serializable;
/**
* Operation that moves a child node to a new parent node.
*
* @param timestamp the timestamp of the operation
* @param newParentId the ID of the new parent node
* @param newMeta the new metadata of the node, can be null
* @param childId the ID of the child node (the node that is being moved)
* @param <TimestampT> the type of the timestamp
* @param <PeerIdT> the type of the peer ID
* @param <MetaT> the type of the node metadata
* @param <NodeIdT> the type of the node ID
*/
public record OpMove<TimestampT extends Comparable<TimestampT>, PeerIdT extends Comparable<PeerIdT>, MetaT extends NodeMeta, NodeIdT>
(CombinedTimestamp<TimestampT, PeerIdT> timestamp, NodeIdT newParentId, MetaT newMeta,
NodeIdT childId) implements Serializable {
/**
* Returns the new name of the node: name extracted from the new metadata if available,
* otherwise the child ID converted to string.
*
* @return the new name of the node
*/
public String newName() {
if (newMeta != null)
return newMeta.getName();
return newMeta.name();
return childId.toString();
}
}

Some files were not shown because too many files have changed in this diff Show More