236 Commits

Author SHA1 Message Date
87c404828c add powershell run scripts 2025-05-04 17:44:57 +02:00
b074e8eb44 Dhfs-fs: proper not found unlink exception 2025-05-04 17:13:43 +02:00
eb5b0ae03c cleanup run wrapper 2025-05-03 17:18:28 +02:00
c329c1f982 Objects: nested transactions 2025-05-03 13:57:44 +02:00
4e7b13227b Sync-base: "kick out" inactive peers 2025-05-03 13:14:06 +02:00
db51d7280c Revert "Sync-base: get rid of JDataRemotePush"
This reverts commit 07133a71
2025-05-03 11:25:23 +02:00
70fecb389b Objects: cleanup transaction put a little 2025-05-02 12:50:43 +02:00
6e9a2b25f6 Utils: cleanup UnsafeAccessor a little 2025-05-02 12:39:50 +02:00
b84ef95703 Revert "Objects: simplify tx commit hooks"
This reverts commit c0735801b9.
2025-05-01 13:42:06 +02:00
c0735801b9 Objects: simplify tx commit hooks 2025-05-01 13:27:39 +02:00
b506ced9d5 Objects: simplify WritebackObjectPersistentStore 2025-05-01 10:29:11 +02:00
46bc9fa810 Objects: remove transactionobject 2025-05-01 09:14:50 +02:00
8ab034402d Revert "Objects: simplify cache"
This reverts commit d94d11ec8b.
2025-04-29 20:36:19 +02:00
d94d11ec8b Objects: simplify cache 2025-04-29 20:22:21 +02:00
5beaad2d32 Objects: better iterators 2025-04-29 16:53:26 +02:00
c4484d21e5 Objects: simplify tx commit a little 2025-04-29 16:50:41 +02:00
2766ef1bae Add --enable-preview to run.xml and run 2025-04-29 12:49:03 +02:00
58de85c078 Sync-base: WritebackObjectPersistentStore cleanup 2025-04-29 12:46:39 +02:00
cc9da86440 Sync-base: JObjectKeyImpl import fix 2025-04-29 12:44:41 +02:00
e6c9e6aee9 Dhfs-fuse: implement write_buf for one less copy 2025-04-29 12:44:18 +02:00
62265355c4 Dhfs-fs: a little cleanup in DhfsFileServiceImpl 2025-04-29 12:44:00 +02:00
854bce1627 Utils: UninitializedByteBuffer 2025-04-29 12:43:18 +02:00
1b19c77bb6 Utils: slightly faster add in HashSetDelayedBlockingQueue 2025-04-29 12:40:45 +02:00
7aa968a569 Dhfs-fuse: fix import 2025-04-29 00:45:34 +02:00
e348c39be1 Utils: add UnsafeAccessor to JnrPtrByteOutput
oops
2025-04-28 23:50:31 +02:00
1b54830651 Objects: don't lock some objects twice for no reason 2025-04-28 23:49:45 +02:00
bc5f0b816c Objects: add putNew
to avoid searching for nonexistent objects
2025-04-28 23:47:53 +02:00
9ff914bdaa Utils: move UnsafeAccessor to utils 2025-04-28 23:36:42 +02:00
1cee6f62b8 Utils: less dumb DataLocker 2025-04-28 23:34:30 +02:00
81703a9406 Sync-base: some microoptimizations 2025-04-28 15:44:36 +02:00
1757034e0b Sync-base: speed up RemoteObjPusherTxHook
they are immutable, no need to do real equals, they can't be same if different
2025-04-28 15:09:23 +02:00
d9765a51d8 Sync-base: freeze JKleppmannTreeNodeHolder root nodes 2025-04-28 13:00:50 +02:00
99ef560b95 Sync-base: static final hooks
so that compiler can fold them
2025-04-28 12:59:21 +02:00
f87eb365c3 Sync-base: remove our referrers from canDelete 2025-04-26 16:07:23 +02:00
8d3244fe64 Webui: use node-forge for hashing
apparently crypto works only on ssl websites
2025-04-26 14:09:05 +02:00
0a8985c93f Short readme 2025-04-26 11:19:06 +02:00
a8cf483eee Simplify ObjectPersistentStore 2025-04-26 11:11:51 +02:00
f7338f4e80 Dhfs-app: check that kill tests ls/cat return success 2025-04-26 10:44:33 +02:00
b89b182c58 Dhfs-fuse: make lmdb map size configurable 2025-04-25 23:11:02 +02:00
ad4ce72fdd Dhfs-fuse: attempt at windows support 2025-04-25 22:17:55 +02:00
26ba65fdce Sync-base: make Pushing invalidations log message trace
it's too big
2025-04-25 22:02:21 +02:00
697add66d5 Kelppmanntree: fix a dumb bug
directories are always the same duh
2025-04-25 22:00:33 +02:00
a53fc5e973 Kelppmanntree: remove undocontext 2025-04-25 21:37:50 +02:00
b034591091 Sync-base: OpHandler interface 2025-04-25 15:04:07 +02:00
07133a7186 Sync-base: get rid of JDataRemotePush 2025-04-25 14:57:06 +02:00
8cbecf1714 Dhfs-fs: remove optional from read 2025-04-25 13:40:48 +02:00
16ba692019 Recordify tree metadata 2025-04-25 13:35:54 +02:00
e5be1e6164 Cleanup poms 2025-04-25 13:13:21 +02:00
c74fdfc5a6 Dhfs-app: test fixes 2 2025-04-25 12:59:25 +02:00
c4268ab35b Dhfs-app: test fixes 2025-04-25 11:23:12 +02:00
2ab6e3c3f7 Sync-base: Handle getting peer info failure nicely 2025-04-25 11:15:36 +02:00
ec8546bd69 Show Peer address in WebUI 2025-04-25 11:13:30 +02:00
6ecef94b90 Webui: a little nicer 2025-04-25 11:07:08 +02:00
e7f22d783f Webui: proper async hash 2025-04-25 11:03:39 +02:00
bed55162d7 Peer certificate check when adding 2025-04-25 10:48:55 +02:00
f43c6db4f0 Run code format 2025-04-25 09:58:46 +02:00
56a15f4672 Sync-base: cleanup JKleppmannTree meta 2025-04-25 09:57:44 +02:00
85a1fa09ab KleppmannTree: a little cleanup 2025-04-25 09:45:35 +02:00
cca0b410cf Some packages cleanup 2025-04-25 09:16:31 +02:00
d94abfee97 Sync-base: op extractor interface 2025-04-25 09:16:31 +02:00
dependabot[bot]
6bd92ad7cd Bump the npm_and_yarn group across 1 directory with 2 updates
Bumps the npm_and_yarn group with 2 updates in the /webui directory: [react-router](https://github.com/remix-run/react-router/tree/HEAD/packages/react-router) and [react-router-dom](https://github.com/remix-run/react-router/tree/HEAD/packages/react-router-dom).


Updates `react-router` from 7.4.1 to 7.5.2
- [Release notes](https://github.com/remix-run/react-router/releases)
- [Changelog](https://github.com/remix-run/react-router/blob/main/packages/react-router/CHANGELOG.md)
- [Commits](https://github.com/remix-run/react-router/commits/react-router@7.5.2/packages/react-router)

Updates `react-router-dom` from 7.4.1 to 7.5.2
- [Release notes](https://github.com/remix-run/react-router/releases)
- [Changelog](https://github.com/remix-run/react-router/blob/main/packages/react-router-dom/CHANGELOG.md)
- [Commits](https://github.com/remix-run/react-router/commits/react-router-dom@7.5.2/packages/react-router-dom)

---
updated-dependencies:
- dependency-name: react-router
  dependency-version: 7.5.2
  dependency-type: direct:production
  dependency-group: npm_and_yarn
- dependency-name: react-router-dom
  dependency-version: 7.5.2
  dependency-type: direct:production
  dependency-group: npm_and_yarn
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-25 09:14:39 +02:00
1965d93f25 Dhfs-app: handle empty op push correctly 2025-04-24 22:38:01 +02:00
f6685f45f9 Dhfs-app: increase sync timeout 2025-04-24 22:34:04 +02:00
060ab1767d Dhfs-app: don't crash too late (never) 2025-04-24 19:46:41 +02:00
89d87095c8 Dhfs-app: class level parallel docker tests
otherwise the logs are unreadable
2025-04-24 17:42:24 +02:00
7425c1f312 Dhfs-app: add failed to connect logs 2025-04-24 17:40:38 +02:00
428eca325f Dhfs-app: increase lazyfs start timeout 2025-04-24 17:19:41 +02:00
005bc35496 Dhfs-app: assume lazyfs doesn't crash too early 2025-04-24 16:32:25 +02:00
6685575ca5 Dhfs-app: please work now 2025-04-24 15:48:57 +02:00
1ae813aacd Sync-base: cleanup old grpc channels 2025-04-24 15:46:23 +02:00
e81671251a Dhfs-fs: nevermind, no sleep 2025-04-24 15:30:18 +02:00
add26bb156 Dhfs-fs: larger sleep 2025-04-24 15:23:31 +02:00
4060045f15 Dhfs-fs: no need for removing/adding chunk logs now 2025-04-24 15:14:18 +02:00
75b484d5b2 Dhfs-app: fix asyncFence if there were no transactions 2025-04-24 15:14:03 +02:00
1d9dc8ed4d Dhfs-app: lazyfs test fixes 2025-04-24 15:13:45 +02:00
7a85704862 Dhfs-app: stopatlevel1 for docker tests 2025-04-24 14:14:52 +02:00
367eedd540 Dhfs-app: less verbose docker tests 2025-04-24 12:53:26 +02:00
d01b9204f7 Dhfs-app: better checkConsistency 2025-04-24 12:36:11 +02:00
67fdacc3ff Dhfs-app: remove docker networks in test 2025-04-24 12:28:56 +02:00
6ed9051be1 Dhfs-app: better kill tests 2025-04-24 12:13:56 +02:00
abf95ba847 Dhfs-app: kill test second container 2025-04-24 11:23:32 +02:00
6a9f64439f Executor fix 2025-04-24 10:37:59 +02:00
ceb9342b45 Dhfs-app: try to crash lazyfs a bit later 2025-04-24 09:22:22 +02:00
ca354ba09c Webui: don't show complete address 2025-04-23 17:24:48 +02:00
81af021292 Wait for lazyfs to crash before unmount 2025-04-23 17:19:44 +02:00
0c04079258 Improved peer UI 2025-04-23 16:37:45 +02:00
2e2eb3ac97 Dhfs-app: lazyfs torn op testing 2025-04-23 15:07:09 +02:00
e2e756e7c5 Objects: getFromSource just ever so slightly faster
one map access
2025-04-23 14:50:34 +02:00
04e932ed62 Dhfs-app: LazyFs directory test non-repeated
some docker ip pool problems
2025-04-23 14:17:42 +02:00
aeec66389d Dhfs-app: LazyFs directory test 2025-04-23 14:00:44 +02:00
adc7356d4a Sync-base: fix leaking non-flushed ops 2025-04-23 14:00:30 +02:00
16da05292f Objects: better onflush for no write transactions 2025-04-23 13:58:55 +02:00
b0149b7251 Objects: less logs in iterators
less crap, and there are tests now
2025-04-23 11:06:26 +02:00
24416c1e87 Dhfs-app: less badlazyfs crash test 2025-04-23 10:38:52 +02:00
34db870fc6 Objects: simplify TombstoneMergingKvIterator 2025-04-22 23:53:41 +02:00
0e62a29ce0 Objects: cache peeked key in LmdbKvIterator 2025-04-22 23:53:18 +02:00
7de5f91fd2 Dhfs-app: lazyfs crash test 2025-04-22 23:25:26 +02:00
ac68208b1a Sync-base: don't crash if invalidation queue is corrupted 2025-04-22 21:38:15 +02:00
4e0675940e Dhfs-app: better fileConflictTest 2025-04-22 21:37:51 +02:00
4f5f347b3c Use stable jnr-fuse version 2025-04-21 11:30:14 +02:00
bd5395e03f Dhfs-fs: mtime fix 2025-04-21 11:29:35 +02:00
f56f564e8b Objects: simplify TransactionManager 2025-04-21 11:15:48 +02:00
eaa413e200 Objects: interfacify MaybeTombstone Data 2025-04-19 17:25:06 +02:00
f3e4d99fcb Objects: seal JDataVersionedWrapper 2025-04-19 12:07:36 +02:00
1c71b26ed8 Objects: 1 less field in JDataVersionedWrapperLazy 2025-04-19 12:06:33 +02:00
e6f95ef028 Remove supportlib
nice idea, but ram usage explosion seems to cancel out the benefits
2025-04-19 11:32:35 +02:00
59e8f6a6b4 Objects: one less copy when serializing
only cache what was really read, otherwise its lifetime is the same as transaction
2025-04-19 11:03:26 +02:00
0292df7f0e Objects: faster JObjectKey 2025-04-19 11:02:30 +02:00
a6a4101bb0 Objects: use bytebuffer to read
a little less GC pressure
2025-04-18 13:21:04 +02:00
59fa5dcf28 Fixie for HashSetDelayedBlockingQueueTest 2025-04-18 13:08:55 +02:00
0f5fb8b8b6 Objects: PBT MergingIterator test 2025-04-18 13:08:40 +02:00
c087dd8971 More microoptimizations 3! 2025-04-18 12:13:22 +02:00
14ddddd0ff Sync-base: use serialized certificate in self data
makes it easier to switch serialization
2025-04-18 11:06:40 +02:00
9859378627 Sync-base: move "_data" to suffix
makes cache much less bad
2025-04-18 11:06:15 +02:00
e167c21d40 More microoptimizations 2! 2025-04-17 11:48:43 +02:00
7dc8f486ea More microoptimizations! 2025-04-17 10:02:26 +02:00
da1a996e6f Support: un-simplify allocateUninitialized 2025-04-17 09:20:56 +02:00
bb52a3af0e Objects: waste less cpu in transaction commit 2025-04-17 00:26:58 +02:00
de0b868349 Objects: one less sorted tree traversal in advanceIterator
totally overengineering
2025-04-17 00:14:56 +02:00
d4d4e150c1 Objects: use LATIN1 strings for keys
should be a bit faster to match the internal string representation
2025-04-17 00:12:37 +02:00
c9b0400d50 Objects: faster MergingKvIterator 2025-04-16 23:41:30 +02:00
94218330b1 Simplify allocateUninitialized 2025-04-16 16:26:58 +02:00
dbe2a72f7c Objects: don't create a db_ver_obj bytebuffer every time 2025-04-16 15:39:52 +02:00
643c53c894 Objects: less string concats 2025-04-15 17:07:03 +02:00
29fdd3eb08 Objects: don't calculate bundle size all the time from scratch 2025-04-15 17:02:26 +02:00
e6ead10e7f Objects: use direct bytebuffers when reading 2025-04-15 16:35:31 +02:00
04c5685fd5 Use @Singleton instead of @ApplicationScoped in hot paths
Definitely a microoptimization but noticeable on the flamegraph
2025-04-15 16:29:49 +02:00
7061117f56 Fs: file writing with less cpu wasted 2025-04-15 16:14:40 +02:00
67852fb37e Objects: less awful put, with 2 less copies 2025-04-15 11:17:23 +02:00
d48cc18e85 Server: only try to purge directory
won't bother getting it working with rootful docker
2025-04-13 22:03:31 +02:00
77177414eb Server: slight directory fixes 2025-04-13 20:37:12 +02:00
83e0f6eb0a Server: fix not being able to delete temp dir in tests 2025-04-13 20:05:51 +02:00
a5727c01b1 Server: push resync after crash 2025-04-13 19:57:16 +02:00
711c4f5e28 Server: simple kill test 2025-04-13 19:36:34 +02:00
45556f2b74 Objects: rename name to value in JObjectKey 2025-04-13 17:10:33 +02:00
146870c281 Objects: support parallel test running
crashes with env maxreaders reached though
2025-04-13 14:54:25 +02:00
9178e7ee2d Separate dhfs-fs/fuse/sync-base 2025-04-13 14:30:00 +02:00
7c605135c5 Some native image support
but it still doesn't work because of JNR
2025-04-13 13:09:36 +02:00
491afd454b Objects: serialize written objects in parallel 2025-04-13 12:08:40 +02:00
bb65aab166 Server: remember opened files ids 2025-04-13 11:52:50 +02:00
a4810c7ee4 Objects: fix cache reading twice 2025-04-12 20:49:29 +02:00
e42e076b77 Objects: disable dhfs.objects.lru.print-stats by default
quite annoying
2025-04-09 11:02:46 +02:00
513cbd717d Server: remove "dhfs.objects" from proto files 2025-04-05 20:17:29 +02:00
075867daaa Server: less bad chunk merging 2025-04-05 17:50:43 +02:00
8e4ea67e53 Server: a little jmap cleanup 2025-04-04 17:26:12 +02:00
fb128882cb Server: use StringUtils.leftPad for JMapLongKey toString
much faster, without regex parsing every time!
2025-04-03 23:00:36 +02:00
cb8c50000a Objects: simplify merging iterator even more^2
initialMaxValue streams can be simplified too
2025-04-03 22:31:59 +02:00
4c5cbfb5bf Objects: simplify merging iterator even more
no need for the hashmap step of iterator construction
2025-04-03 22:23:23 +02:00
6bcec4a260 Objects: simplify merging iterator
remove the first match "optimization", as it doesn't really
matter with the separate read object methods
2025-04-03 22:13:34 +02:00
df00584367 Objects: getting begin/end iterators 2025-04-03 15:05:09 +02:00
ea4f041d6e Objects: remove outdated snapshot stuff 2025-04-03 13:02:55 +02:00
3c37638db2 Objects: less locks in writeback 2025-04-02 16:14:50 +02:00
0e12a59f23 Objects: a little lazier caching 2025-04-02 15:35:02 +02:00
735dd605d7 Objects: get rid of locks in lmdb store 2025-04-02 15:13:58 +02:00
194166109e Server: don't forget to resend invalidations in case of error 2025-04-01 13:10:11 +02:00
68111a0c4f Server: use formatter for logs in grpc server 2025-04-01 12:53:21 +02:00
b872c32a05 Server: more threads by default 2025-04-01 12:44:56 +02:00
0e14b1cd39 Server: more canDelete logs 2025-04-01 12:19:16 +02:00
17843952f2 Server: push multiple ops, really^2 2025-04-01 12:14:30 +02:00
ffef8959df Server: push multiple ops, really 2025-03-31 16:53:52 +02:00
cb909478dc Server: parallel op sending try lock 2025-03-31 16:35:13 +02:00
06335b4b99 Server: parallel op sending 2025-03-31 16:32:13 +02:00
8351bec59a Revert "Server: parallel op sending"
This reverts commit 0f8002dc2c.
2025-03-31 16:20:58 +02:00
29663f575d Server: try downloading from all if we have a dummy object
just speeds up sync a little
2025-03-31 16:06:17 +02:00
0f8002dc2c Server: parallel op sending 2025-03-31 16:05:09 +02:00
5c50d572d0 Server: no delay op sending 2025-03-31 16:05:00 +02:00
edebb6d8f0 Server: don't try downloading non-remote-objects 2025-03-31 16:04:31 +02:00
5d620c64c5 Server: fix utimens on directories (ignore it!) 2025-03-31 15:28:49 +02:00
b998871e7f Objects: make cache status print debug 2025-03-30 22:08:00 +02:00
69eb96b10c API draft to manually set peer addresses 2025-03-30 16:41:31 +02:00
8b3c0a6f2c Server: snapshot interface key fix 2025-03-29 12:29:28 +01:00
4a19f69c38 Revert "Server: proxying"
This reverts commit 6a8394852e.
2025-03-28 17:26:55 +01:00
8d40019f2c Revert "Server: hopefully working reverse proxy connection"
This reverts commit 312cf18b27.
2025-03-28 17:22:06 +01:00
312cf18b27 Server: hopefully working reverse proxy connection 2025-03-28 12:02:20 +01:00
6a8394852e Server: proxying 2025-03-27 21:42:21 +01:00
3e69e5dfb9 Update Quarkus to 3.20 2025-03-27 15:55:55 +01:00
8fdbaf5aa7 Objects: move objects to com.usatiuk.objects 2025-03-27 15:41:16 +01:00
4d44e3541b Server: move server outside objects 2025-03-27 15:34:59 +01:00
18d5a7f90e Server: move server impl to a separate bean 2025-03-27 13:56:45 +01:00
adcc5f464f Server: fix OOM when truncating large files
use the same chunk for zero fill
2025-03-27 12:23:13 +01:00
d9ded36891 Utils: fix a race in hash set queue test 2025-03-27 12:13:04 +01:00
038b873364 Server: fix syncing when moving directories
as with log trimming we might be moving to folders that don't exist
2025-03-27 12:12:51 +01:00
8f7869d87a Server: 100 ops push 2025-03-24 23:04:44 +01:00
e0b4f97349 Server: tree op push ack (to motivate garbage collection) 2025-03-23 23:18:49 +01:00
035f64df5a Server: make DhfsFileServiceSimpleTestImpl abstract
so that IDEA doesn't try to run it
2025-03-23 22:36:15 +01:00
4c5fd91050 Server: some refcounting fixes 2025-03-23 22:32:32 +01:00
8559c9b984 Server: more logs and fix possible race when resyncing with more peers 2025-03-23 18:11:46 +01:00
e80e33568b Server: try to download all on start only when it's enabled, duh 2025-03-23 16:24:13 +01:00
03850d3522 Server: use linked list for autosync/sync lists 2025-03-23 16:05:29 +01:00
527395447c Server: fix autosync OOM 2025-03-23 16:04:19 +01:00
9108b27dd3 Auto-formatted field order fix 2025-03-23 15:34:01 +01:00
3bd0c4e2bb Remove server-old 2025-03-23 15:29:45 +01:00
c977b5f6c9 Run code cleanup 2025-03-23 15:29:32 +01:00
c5a875c27f Server: separate ref types
as we need to get the real "parent" object when sending some requests
2025-03-23 15:29:06 +01:00
ba6bb756bb Server: remove hackRefresh thing 2025-03-23 14:31:51 +01:00
a63e7e59b3 Utils: die on lock timeout 2025-03-23 14:27:36 +01:00
9a02a554a1 Server: hopefully working autosync 2025-03-23 14:27:20 +01:00
892e5ca9b7 run wrapper fixes 2025-03-23 14:03:18 +01:00
c12bff3ee7 Revert "CI: downgrade maven because of some locks bug"
This reverts commit 59a0b9a856.
2025-03-22 23:18:37 +01:00
59a0b9a856 CI: downgrade maven because of some locks bug 2025-03-22 23:15:09 +01:00
817d12a161 CI: parallel maven 2025-03-22 23:10:01 +01:00
258c257778 Server: increase manyFiles test timeout 2025-03-22 23:08:57 +01:00
b0bb9121e7 Server: re-enable parallel tests 2025-03-22 23:05:25 +01:00
a224c6bd51 Server: dumber deleteTest 2025-03-22 23:05:17 +01:00
13ecdd3106 Server: fix initial sync for normal objects, and fix ResyncIT 2025-03-22 22:53:33 +01:00
8a07f37566 Server: a bunch of fixes 2025-03-22 21:31:20 +01:00
dc0e73b1aa KleppmannTree: fix failed moves being recorded in op log 2025-03-22 14:57:01 +01:00
16eb1d28d9 Objects: add a "never lock option" to avoid deadlocks 2025-03-21 23:43:29 +01:00
4f397cd2d4 KleppmannTree: fix undo with rename 2025-03-21 23:31:59 +01:00
6a20550353 Server: better logging and (hopefully) fix ktree op forwarding messing up timestamps 2025-03-21 20:23:59 +01:00
92bca1e4e1 Server: fix peer sync not quite working with >2 nodes 2025-03-21 12:44:00 +01:00
4bfa93fca4 Server: handle PeerInfo conflicts (by ignoring them) 2025-03-20 22:26:28 +01:00
7d762c70fa Server: seemingly almost working file conflict resolution
though the removeAddHostTest doesn't seem to quite like it
2025-03-20 22:26:21 +01:00
20daa857e6 Server: slightly less broken peer removal 2025-03-17 23:36:34 +01:00
97c0f002fb Objects: don't run tx commit hooks inside the same transaction 2025-03-17 23:29:10 +01:00
f260bb0491 Server: improve run options 2025-03-17 08:44:11 +01:00
a2e75dbdc7 Server: some peer sync fixes 2025-03-16 20:07:45 +01:00
fa64dac9aa Server: peer connected/disconnected event listeners 2025-03-16 12:58:56 +01:00
8fbdf50732 Server: seemingly working file sync 2025-03-16 00:13:42 +01:00
be1f5d12c9 Server: fix logs in PeerManager
somehow it doesn't like the format of toString of some objects
2025-03-16 00:13:15 +01:00
1fd3b9e5e0 Server: don't always take tree lock in PeerInfoService 2025-03-16 00:12:45 +01:00
8499e20823 Server: fix for non-absolute WebUi paths 2025-03-16 00:11:52 +01:00
842bd49246 Objects: lmdb iterator find first fix 2025-03-16 00:08:05 +01:00
1b0af6e883 Objects: remove IterProdFn close() 2025-03-16 00:07:16 +01:00
667f8b3b42 Objects: a bunch of moving around 2025-03-14 22:39:54 +01:00
0aca2c5dbb Objects: move serializer stuff a little 2025-03-14 22:31:45 +01:00
223ba20418 Objects: run tx commit callbacks in empty transactions 2025-03-14 22:23:36 +01:00
ae17ab6ce9 Server: JMapHolder cleanup on delete 2025-03-14 16:22:22 +01:00
6e37320e7c Objects: seemingly more reasonably working tx hooks 2025-03-14 16:21:07 +01:00
d37dc944d0 Objects: remove findAllObjects 2025-03-14 15:03:11 +01:00
d483eba20d Objects: working cache for object reads without iterator 2025-03-14 00:01:21 +01:00
4cbb4ce2be Server: don't write-lock fs tree when opening files 2025-03-13 23:54:24 +01:00
5f85e944e3 Objects: fix read only transactions locking everything 2025-03-13 23:36:34 +01:00
4c90a74fea Server: don't keep file size separately 2025-03-13 23:10:43 +01:00
38ab6de85b Objects: fix tests not being run by maven 2025-03-13 21:15:53 +01:00
29fd2826a3 Objects: simplify snapshots 2025-03-13 19:35:19 +01:00
564 changed files with 13565 additions and 22163 deletions

View File

@@ -20,26 +20,21 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: "recursive"
- name: Install sudo for ACT
run: apt-get update && apt-get install -y sudo
if: env.ACT=='true'
- name: Install fuse and maven
run: sudo apt-get update && sudo apt-get install -y libfuse2
- name: Install FUSE
run: sudo apt-get update && sudo apt-get install -y libfuse2 libfuse3-dev libfuse3-3 fuse3
- name: Download maven
run: |
cd "$HOME"
mkdir maven-bin
curl -s -L https://dlcdn.apache.org/maven/maven-3/3.9.9/binaries/apache-maven-3.9.9-bin.tar.gz | tar xvz --strip-components=1 -C maven-bin
echo "$HOME"/maven-bin/bin >> $GITHUB_PATH
- name: User allow other for fuse
run: echo "user_allow_other" | sudo tee -a /etc/fuse.conf
- name: Maven info
run: |
echo $GITHUB_PATH
echo $PATH
mvn -v
- name: Dump fuse.conf
run: cat /etc/fuse.conf
- name: Set up JDK 21
uses: actions/setup-java@v4
@@ -48,8 +43,11 @@ jobs:
distribution: "zulu"
cache: maven
- name: Build LazyFS
run: cd thirdparty/lazyfs/ && ./build.sh
- name: Test with Maven
run: cd dhfs-parent && mvn --batch-mode --update-snapshots package verify
run: cd dhfs-parent && mvn -T $(nproc) --batch-mode --update-snapshots package verify
# - name: Build with Maven
# run: cd dhfs-parent && mvn --batch-mode --update-snapshots package # -Dquarkus.log.category.\"com.usatiuk.dhfs\".min-level=DEBUG
@@ -57,7 +55,7 @@ jobs:
- uses: actions/upload-artifact@v4
with:
name: DHFS Server Package
path: dhfs-parent/server/target/quarkus-app
path: dhfs-parent/dhfs-app/target/quarkus-app
- uses: actions/upload-artifact@v4
if: ${{ always() }}
@@ -89,102 +87,6 @@ jobs:
name: Webui
path: webui/dist
build-native-libs:
strategy:
matrix:
include:
- os: ubuntu-latest
cross: "linux/amd64"
- os: ubuntu-latest
cross: "linux/arm64"
- os: macos-latest
runs-on: ${{ matrix.os }}
env:
DO_LOCAL_BUILD: ${{ matrix.os == 'macos-latest' }}
DOCKER_PLATFORM: ${{ matrix.cross || 'NATIVE' }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set SANITIZED_DOCKER_PLATFORM
run: echo "SANITIZED_DOCKER_PLATFORM=$(echo $DOCKER_PLATFORM | tr / _ )" >> $GITHUB_ENV
- name: Set DOCKER_BUILDER_IMAGE
run: echo "DOCKER_BUILDER_IMAGE=dhfs_lib_builder-${{matrix.os}}-$SANITIZED_DOCKER_PLATFORM" >> $GITHUB_ENV
- name: Build config
run: |
echo DO_LOCAL_BUILD: $DO_LOCAL_BUILD
echo DOCKER_PLATFORM: $DOCKER_PLATFORM
echo SANITIZED_DOCKER_PLATFORM: $SANITIZED_DOCKER_PLATFORM
echo DOCKER_BUILDER_IMAGE: $DOCKER_BUILDER_IMAGE
- name: Set up JDK 21
if: ${{ env.DO_LOCAL_BUILD == 'TRUE' }}
uses: actions/setup-java@v4
with:
java-version: "21"
distribution: "zulu"
cache: maven
- name: Set up Docker Buildx
if: ${{ env.DO_LOCAL_BUILD != 'TRUE' }}
uses: docker/setup-buildx-action@v3
- name: Set up QEMU
if: ${{ env.DO_LOCAL_BUILD != 'TRUE' }}
uses: docker/setup-qemu-action@v3
- name: Build Docker builder image
if: ${{ env.DO_LOCAL_BUILD != 'TRUE' }}
uses: docker/build-push-action@v5
with:
context: ./libdhfs_support/builder
file: ./libdhfs_support/builder/Dockerfile
push: false
platforms: ${{ env.DOCKER_PLATFORM }}
tags: ${{ env.DOCKER_BUILDER_IMAGE }}
cache-from: type=gha,scope=build-${{ env.DOCKER_BUILDER_IMAGE }}
cache-to: type=gha,mode=max,scope=build-${{ env.DOCKER_BUILDER_IMAGE }}
load: true
- name: Build the library
run: |
CMAKE_ARGS="-DCMAKE_BUILD_TYPE=Release" libdhfs_support/builder/cross-build.sh both build "$(pwd)/result"
- name: Upload build
uses: actions/upload-artifact@v4
with:
name: NativeLib-${{ matrix.os }}-${{ env.SANITIZED_DOCKER_PLATFORM }}
path: result
merge-native-libs:
runs-on: ubuntu-latest
needs: [build-native-libs]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Download artifacts
uses: actions/download-artifact@v4
with:
path: downloaded-libs
- name: Merge all
run: rsync -av downloaded-libs/NativeLib*/* result/
- name: Check that libs exists
run: |
test -f "result/Linux-x86_64/libdhfs_support.so" || exit 1
- name: Upload
uses: actions/upload-artifact@v4
with:
name: NativeLibs
path: result
publish-docker:
runs-on: ubuntu-latest
permissions:
@@ -194,7 +96,7 @@ jobs:
# with sigstore/fulcio when running outside of PRs.
id-token: write
needs: [build-webui, merge-native-libs, build-dhfs]
needs: [build-webui, build-dhfs]
steps:
- name: Checkout repository
@@ -212,12 +114,6 @@ jobs:
name: Webui
path: webui-dist-downloaded
- name: Download native libs
uses: actions/download-artifact@v4
with:
name: NativeLibs
path: dhfs-native-downloaded
- name: Show all the files
run: find .
@@ -293,7 +189,7 @@ jobs:
# with sigstore/fulcio when running outside of PRs.
id-token: write
needs: [build-webui, merge-native-libs, build-dhfs]
needs: [build-webui, build-dhfs]
steps:
- name: Checkout repository
@@ -309,11 +205,6 @@ jobs:
name: Webui
path: webui-dist-downloaded
- uses: actions/download-artifact@v4
with:
name: NativeLibs
path: dhfs-native-downloaded
- name: Show all the files
run: find .
@@ -321,14 +212,11 @@ jobs:
run: mkdir -p run-wrapper-out/dhfs/data && mkdir -p run-wrapper-out/dhfs/fuse && mkdir -p run-wrapper-out/dhfs/app
- name: Copy DHFS
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/DHFS Package"
run: cp -r ./dhfs-package-downloaded "run-wrapper-out/dhfs/app/Server"
- name: Copy Webui
run: cp -r ./webui-dist-downloaded "run-wrapper-out/dhfs/app/Webui"
- name: Copy Webui
run: cp -r ./dhfs-native-downloaded "run-wrapper-out/dhfs/app/NativeLibs"
- name: Copy run wrapper
run: cp -r ./run-wrapper/* "run-wrapper-out/dhfs/app/"

3
.gitmodules vendored Normal file
View File

@@ -0,0 +1,3 @@
[submodule "thirdparty/lazyfs/lazyfs"]
path = thirdparty/lazyfs/lazyfs
url = git@github.com:dsrhaslab/lazyfs.git

View File

@@ -9,8 +9,6 @@ COPY ./dhfs-package-downloaded/*.jar .
COPY ./dhfs-package-downloaded/app .
COPY ./dhfs-package-downloaded/quarkus .
WORKDIR /usr/src/app/native-libs
COPY ./dhfs-native-downloaded/. .
WORKDIR /usr/src/app/webui
COPY ./webui-dist-downloaded/. .

View File

@@ -14,6 +14,9 @@ Syncthing and allowing you to stream your files like Google Drive File Stream
This is a simple wrapper around the jar/web ui distribution that allows you to run/stop
the DHFS server in the background, and update itself (hopefully!)
## How to use it and how it works?
## How to use it?
TODO 😁
Unpack the run-wrapper and run the `run` script. The filesystem should be mounted to the `fuse` folder in the run-wrapper root directory.
Then, a web interface will be available at `losthost:8080`, that can be used to connect with other peers.

View File

@@ -1,11 +1,11 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Main 2" type="QsApplicationConfigurationType" factoryName="QuarkusApplication">
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.Main" />
<module name="server" />
<option name="VM_PARAMETERS" value="--add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Dcom.usatiuk.dhfs.supportlib.native-path=$ProjectFileDir$/target/classes/native -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011" />
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsapp.Main" />
<module name="dhfs-app" />
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+DebugNonSafepoints --enable-preview --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/2/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/2/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/2/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9020 -Dquarkus.http.ssl-port=9021 -Ddhfs.peerdiscovery.preset-uuid=22000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=11000000-0000-0000-0000-000000000000:127.0.0.1:9010:9011" />
<extension name="coverage">
<pattern>
<option name="PATTERN" value="com.usatiuk.dhfs.*" />
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
<option name="ENABLED" value="true" />
</pattern>
</extension>

View File

@@ -1,11 +1,11 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Main" type="QsApplicationConfigurationType" factoryName="QuarkusApplication" nameIsGenerated="true">
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfs.Main" />
<module name="server" />
<option name="VM_PARAMETERS" value="--add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Dcom.usatiuk.dhfs.supportlib.native-path=$ProjectFileDir$/target/classes/native -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=false -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021" />
<option name="MAIN_CLASS_NAME" value="com.usatiuk.dhfsapp.Main" />
<module name="dhfs-app" />
<option name="VM_PARAMETERS" value="-XX:+UnlockDiagnosticVMOptions -XX:+UseParallelGC --enable-preview -XX:+DebugNonSafepoints --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-exports java.base/jdk.internal.access=ALL-UNNAMED -ea -Xmx2G -Ddhfs.webui.root=$ProjectFileDir$/../webui/dist -Ddhfs.fuse.root=${HOME}/dhfs_test/1/fuse -Ddhfs.objects.persistence.files.root=${HOME}/dhfs_test/1/data -Ddhfs.objects.persistence.stuff.root=${HOME}/dhfs_test/1/data/stuff -Ddhfs.objects.peerdiscovery.broadcast=true -Dquarkus.http.port=9010 -Dquarkus.http.ssl-port=9011 -Ddhfs.peerdiscovery.preset-uuid=11000000-0000-0000-0000-000000000000 -Ddhfs.peerdiscovery.static-peers=22000000-0000-0000-0000-000000000000:127.0.0.1:9020:9021 -Dquarkus.http.host=0.0.0.0" />
<extension name="coverage">
<pattern>
<option name="PATTERN" value="com.usatiuk.dhfs.*" />
<option name="PATTERN" value="com.usatiuk.dhfs.remoteobj.*" />
<option name="ENABLED" value="true" />
</pattern>
</extension>

View File

@@ -1,60 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<artifactId>autoprotomap-deployment</artifactId>
<name>Autoprotomap - Deployment</name>
<dependencies>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-arc-deployment</artifactId>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5-internal</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc-deployment</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<artifactId>maven-compiler-plugin</artifactId>
<executions>
<execution>
<id>default-compile</id>
<configuration>
<annotationProcessorPaths>
<path>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-extension-processor</artifactId>
<version>${quarkus.platform.version}</version>
</path>
</annotationProcessorPaths>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@@ -1,78 +0,0 @@
package com.usatiuk.autoprotomap.deployment;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
import io.quarkus.arc.deployment.GeneratedBeanBuildItem;
import io.quarkus.arc.deployment.GeneratedBeanGizmoAdaptor;
import io.quarkus.deployment.annotations.BuildProducer;
import io.quarkus.deployment.annotations.BuildStep;
import io.quarkus.deployment.builditem.ApplicationIndexBuildItem;
import io.quarkus.gizmo.ClassCreator;
import io.quarkus.gizmo.SignatureBuilder;
import jakarta.inject.Singleton;
import org.jboss.jandex.ClassType;
import org.jboss.jandex.Type;
class AutoprotomapProcessor {
@BuildStep
ProtoIndexBuildItem index(ApplicationIndexBuildItem jandex) {
var ret = new ProtoIndexBuildItem();
var annot = jandex.getIndex().getAnnotations(ProtoMirror.class);
for (var a : annot) {
var protoTarget = jandex.getIndex().getClassByName(((ClassType) a.value().value()).name());
// if (!messageImplementors.contains(protoTarget))
// throw new IllegalArgumentException("Expected " + protoTarget + " to be a proto message");
System.out.println("Found: " + a.name().toString() + " at " + protoTarget.name().toString() + " of " + a.target().asClass().name().toString());
ret.protoMsgToObj.put(protoTarget, a.target().asClass());
}
return ret;
}
@BuildStep
void generateProtoSerializer(ApplicationIndexBuildItem jandex,
ProtoIndexBuildItem protoIndex,
BuildProducer<GeneratedBeanBuildItem> generatedClasses) {
try {
for (var o : protoIndex.protoMsgToObj.entrySet()) {
System.out.println("Generating " + o.getKey().toString() + " -> " + o.getValue().toString());
var gizmoAdapter = new GeneratedBeanGizmoAdaptor(generatedClasses);
var msgType = io.quarkus.gizmo.Type.classType(o.getKey().name());
var objType = io.quarkus.gizmo.Type.classType(o.getValue().name());
var type = io.quarkus.gizmo.Type.ParameterizedType.parameterizedType(
io.quarkus.gizmo.Type.classType(ProtoSerializer.class),
msgType, objType);
var msgJType = Type.create(o.getKey().name(), Type.Kind.CLASS);
var objJType = Type.create(o.getValue().name(), Type.Kind.CLASS);
try (ClassCreator classCreator = ClassCreator.builder()
.className("com.usatiuk.autoprotomap.generated.for" + o.getKey().simpleName())
.signature(SignatureBuilder.forClass().addInterface(type))
.classOutput(gizmoAdapter)
.setFinal(true)
.build()) {
classCreator.addAnnotation(Singleton.class);
var generator = new ProtoSerializerGenerator(
jandex.getIndex(),
protoIndex,
classCreator,
msgJType,
objJType
);
generator.generate();
}
}
} catch (Throwable e) {
StringBuilder sb = new StringBuilder();
sb.append(e + "\n");
for (var el : e.getStackTrace()) {
sb.append(el.toString() + "\n");
}
System.out.println(sb);
}
}
}

View File

@@ -1,18 +0,0 @@
package com.usatiuk.autoprotomap.deployment;
public class Constants {
public static final String FIELD_PREFIX = "_";
public static String capitalize(String str) {
return str.substring(0, 1).toUpperCase() + str.substring(1);
}
public static String stripPrefix(String str, String prefix) {
if (str.startsWith(prefix)) {
return str.substring(prefix.length());
}
return str;
}
}

View File

@@ -1,6 +0,0 @@
package com.usatiuk.autoprotomap.deployment;
@FunctionalInterface
public interface Effect {
void apply();
}

View File

@@ -1,10 +0,0 @@
package com.usatiuk.autoprotomap.deployment;
import io.quarkus.builder.item.SimpleBuildItem;
import org.apache.commons.collections4.BidiMap;
import org.apache.commons.collections4.bidimap.DualHashBidiMap;
import org.jboss.jandex.ClassInfo;
public final class ProtoIndexBuildItem extends SimpleBuildItem {
BidiMap<ClassInfo, ClassInfo> protoMsgToObj = new DualHashBidiMap<>();
}

View File

@@ -1,342 +0,0 @@
package com.usatiuk.autoprotomap.deployment;
import com.google.protobuf.ByteString;
import com.google.protobuf.Message;
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
import io.quarkus.gizmo.*;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import org.jboss.jandex.Type;
import org.jboss.jandex.*;
import org.objectweb.asm.Opcodes;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Objects;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.IntConsumer;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static com.usatiuk.autoprotomap.deployment.Constants.*;
public class ProtoSerializerGenerator {
private final Index index;
private final ProtoIndexBuildItem protoIndex;
private final ClassCreator classCreator;
private final HashSet<Pair<ClassInfo, ClassInfo>> externalSerializers = new HashSet<>();
private final Type topMessageType;
private final Type topObjectType;
public ProtoSerializerGenerator(Index index, ProtoIndexBuildItem protoIndex, ClassCreator classCreator, Type topMessageType, Type topObjectType) {
this.index = index;
this.protoIndex = protoIndex;
this.classCreator = classCreator;
this.topMessageType = topMessageType;
this.topObjectType = topObjectType;
}
private FieldDescriptor getOutsideSerializer(ClassInfo messageClass, ClassInfo objectClass) {
var name = messageClass.name().withoutPackagePrefix() + objectClass.name().withoutPackagePrefix() + "serializer";
var msgType = io.quarkus.gizmo.Type.classType(messageClass.name());
var objType = io.quarkus.gizmo.Type.classType(objectClass.name());
var type = io.quarkus.gizmo.Type.ParameterizedType.parameterizedType(
io.quarkus.gizmo.Type.classType(ProtoSerializer.class),
msgType, objType);
var sig = SignatureBuilder.forField().setType(type).build();
var fd = FieldDescriptor.of(classCreator.getClassName(), name, ProtoSerializer.class);
if (externalSerializers.add(Pair.of(messageClass, objectClass))) {
var fc = classCreator.getFieldCreator(fd);
fc.addAnnotation(Inject.class);
fc.setSignature(sig);
fc.setModifiers(Opcodes.ACC_PUBLIC);
}
return fd;
}
private void traverseHierarchy(Index index, ClassInfo klass, Consumer<ClassInfo> visitor) {
var cur = klass;
while (true) {
visitor.accept(cur);
var next = cur.superClassType().name();
if (next.equals(DotName.OBJECT_NAME) || next.equals(DotName.RECORD_NAME)) break;
cur = index.getClassByName(next);
}
}
private ArrayList<FieldInfo> findAllFields(Index index, ClassInfo klass) {
ArrayList<FieldInfo> ret = new ArrayList<>();
traverseHierarchy(index, klass, cur -> {
ret.addAll(cur.fields());
});
return ret;
}
private void generateBuilderUse(BytecodeCreator bytecodeCreator,
ResultHandle builder,
Type messageType, Type objectType,
ResultHandle object) {
var builderType = Type.create(DotName.createComponentized(messageType.name(), "Builder", true), Type.Kind.CLASS);
var objectClass = index.getClassByName(objectType.name().toString());
Function<String, String> getterGetter = objectClass.isRecord()
? Function.identity()
: s -> "get" + capitalize(stripPrefix(s, FIELD_PREFIX));
for (var f : findAllFields(index, objectClass)) {
var consideredFieldName = stripPrefix(f.name(), FIELD_PREFIX);
Supplier<ResultHandle> get = () -> {
if ((f.flags() & Opcodes.ACC_PUBLIC) != 0)
return bytecodeCreator.readInstanceField(f, object);
else {
var fieldGetter = getterGetter.apply(f.name());
return bytecodeCreator.invokeVirtualMethod(
MethodDescriptor.ofMethod(objectType.toString(), fieldGetter, f.type().name().toString()), object);
}
};
Effect doSimpleCopy = () -> {
var setter = MethodDescriptor.ofMethod(builderType.name().toString(), "set" + capitalize(consideredFieldName),
builderType.name().toString(), f.type().toString());
var val = get.get();
bytecodeCreator.invokeVirtualMethod(setter, builder, val);
};
switch (f.type().kind()) {
case CLASS -> {
if (f.type().equals(Type.create(String.class)) || f.type().equals(Type.create(ByteString.class))) {
doSimpleCopy.apply();
} else {
var builderGetter = "get" + capitalize(f.name()) + "Builder";
var protoType = protoIndex.protoMsgToObj.inverseBidiMap().get(index.getClassByName(f.type().name()));
var nestedBuilderType = Type.create(DotName.createComponentized(protoType.name(), "Builder", true), Type.Kind.CLASS);
var nestedBuilder = bytecodeCreator.invokeVirtualMethod(
MethodDescriptor.ofMethod(builderType.toString(), builderGetter, nestedBuilderType.name().toString()), builder);
var val = get.get();
generateBuilderUse(bytecodeCreator, nestedBuilder, Type.create(protoType.name(), Type.Kind.CLASS), f.type(), val);
}
}
case PRIMITIVE -> {
doSimpleCopy.apply();
}
case WILDCARD_TYPE -> throw new UnsupportedOperationException("Wildcards not supported yet");
case PARAMETERIZED_TYPE ->
throw new UnsupportedOperationException("Parametrized types not supported yet");
case ARRAY -> throw new UnsupportedOperationException("Arrays not supported yet");
default -> throw new IllegalStateException("Unexpected type: " + f.type());
}
}
}
private ResultHandle generateConstructorUse(
BytecodeCreator bytecodeCreator,
ClassCreator classCreator,
Type messageType, Type objectType,
ResultHandle message
) {
var constructor = findAllArgsConstructor(index, index.getClassByName(objectType.name()));
if (constructor == null) {
throw new IllegalStateException("No constructor found for type: " + objectType.name());
}
var argMap = new ResultHandle[constructor.parametersCount()];
for (int i = 0; i < argMap.length; i++) {
var type = constructor.parameterType(i);
var strippedName = stripPrefix(constructor.parameterName(i), FIELD_PREFIX);
IntConsumer doSimpleCopy = (arg) -> {
var call = MethodDescriptor.ofMethod(messageType.name().toString(), "get" + capitalize(strippedName),
type.name().toString());
argMap[arg] = bytecodeCreator.invokeVirtualMethod(call, message);
};
switch (type.kind()) {
case CLASS -> {
if (type.equals(Type.create(String.class)) || type.equals(Type.create(ByteString.class))) {
doSimpleCopy.accept(i);
} else {
var nestedProtoType = protoIndex.protoMsgToObj.inverseBidiMap().get(index.getClassByName(type.name()));
var call = MethodDescriptor.ofMethod(messageType.name().toString(), "get" + capitalize(strippedName),
nestedProtoType.name().toString());
var nested = bytecodeCreator.invokeVirtualMethod(call, message);
argMap[i] = generateConstructorUse(bytecodeCreator, classCreator, Type.create(nestedProtoType.name(), Type.Kind.CLASS), type, nested);
}
}
case PRIMITIVE -> {
doSimpleCopy.accept(i);
}
case WILDCARD_TYPE -> throw new UnsupportedOperationException("Wildcards not supported yet");
case PARAMETERIZED_TYPE ->
throw new UnsupportedOperationException("Parametrized types not supported yet");
case ARRAY -> throw new UnsupportedOperationException("Arrays not supported yet");
default -> throw new IllegalStateException("Unexpected type: " + type);
}
}
return bytecodeCreator.newInstance(constructor, argMap);
}
private MethodInfo findAllArgsConstructor(Index index, ClassInfo klass) {
ArrayList<FieldInfo> fields = findAllFields(index, klass);
var fieldCount = fields.size();
var fieldNames = fields.stream().map(f -> stripPrefix(f.name(), FIELD_PREFIX)).sorted().toList();
var fieldNameToType = fields.stream().collect(Collectors.toMap(f -> stripPrefix(f.name(), FIELD_PREFIX), FieldInfo::type));
for (var m : klass.constructors()) {
if (m.parametersCount() != fieldCount) continue;
var parameterNames = m.parameters().stream().map(n -> stripPrefix(n.name(), FIELD_PREFIX)).sorted().toList();
if (!Objects.equals(fieldNames, parameterNames)) continue;
for (var p : m.parameters()) {
if (!Objects.equals(fieldNameToType.get(stripPrefix(p.name(), FIELD_PREFIX)), p.type())) continue;
}
return m;
}
return null;
}
public void generateAbstract() {
var kids = Stream.concat(index.getAllKnownSubclasses(topObjectType.name()).stream(),
index.getAllKnownImplementors(topObjectType.name()).stream())
.filter(k -> !k.isAbstract() && !k.isInterface()).toList();
try (MethodCreator method = classCreator.getMethodCreator("serialize",
Message.class, Object.class)) {
method.setModifiers(Opcodes.ACC_PUBLIC);
var builderType = Type.create(DotName.createComponentized(topMessageType.name(), "Builder", true), Type.Kind.CLASS);
var builder = method.invokeStaticMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(), "newBuilder", builderType.name().toString()));
var arg = method.getMethodParam(0);
for (var nestedObjClass : kids) {
System.out.println("Generating " + nestedObjClass.name() + " serializer for " + topObjectType.name());
var nestedObjType = Type.create(nestedObjClass.name(), Type.Kind.CLASS);
var nestedMessageClass = protoIndex.protoMsgToObj.inverseBidiMap().get(nestedObjClass);
boolean doExternalCall = false;
if (nestedMessageClass == null) {
var msgInfo = index.getClassByName(topMessageType.name());
nestedMessageClass = index.getClassByName(msgInfo.method("get" + capitalize(nestedObjType.name().withoutPackagePrefix())).returnType().name());
doExternalCall = true;
}
var nestedMessageType = Type.create(nestedMessageClass.name(), Type.Kind.CLASS);
var statement = method.ifTrue(method.instanceOf(arg, nestedObjClass.name().toString()));
try (var branch = statement.trueBranch()) {
if (doExternalCall) {
var externalSerializer = getOutsideSerializer(nestedMessageClass, nestedObjClass);
var serializerLoaded = branch.readInstanceField(externalSerializer, branch.getThis());
var serialized = branch.invokeInterfaceMethod(
MethodDescriptor.ofMethod(ProtoSerializer.class,
"serialize", Message.class, Object.class),
serializerLoaded, arg);
branch.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(),
"set" + capitalize(nestedObjType.name().withoutPackagePrefix()),
builderType.name().toString(), nestedMessageType.name().toString()), builder, serialized);
} else {
var nestedBuilderType = Type.create(DotName.createComponentized(nestedMessageType.name(), "Builder", true), Type.Kind.CLASS);
var nestedBuilder = branch.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(),
"get" + capitalize(nestedObjType.name().withoutPackagePrefix()) + "Builder",
nestedBuilderType.name().toString()), builder);
generateBuilderUse(branch, nestedBuilder, nestedMessageType, nestedObjType, arg);
}
var result = branch.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(), "build", topMessageType.name().toString()), builder);
branch.returnValue(result);
}
}
method.throwException(IllegalArgumentException.class, "Unknown object type");
}
try (MethodCreator method = classCreator.getMethodCreator("deserialize",
Object.class, Message.class)) {
method.setModifiers(Opcodes.ACC_PUBLIC);
var arg = method.getMethodParam(0);
for (var nestedObjClass : kids) {
System.out.println("Generating " + nestedObjClass.name() + " deserializer for " + topObjectType.name());
var nestedObjType = Type.create(nestedObjClass.name(), Type.Kind.CLASS);
var nestedMessageClass = protoIndex.protoMsgToObj.inverseBidiMap().get(nestedObjClass);
boolean doExternalCall = false;
if (nestedMessageClass == null) {
var msgInfo = index.getClassByName(topMessageType.name());
nestedMessageClass = index.getClassByName(msgInfo.method("get" + capitalize(nestedObjType.name().withoutPackagePrefix())).returnType().name());
doExternalCall = true;
}
var nestedMessageType = Type.create(nestedMessageClass.name(), Type.Kind.CLASS);
var typeCheck = method.invokeVirtualMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(),
"has" + capitalize(nestedObjType.name().withoutPackagePrefix()), boolean.class), arg);
var statement = method.ifTrue(typeCheck);
try (var branch = statement.trueBranch()) {
var nestedMessage = branch.invokeVirtualMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(),
"get" + capitalize(nestedObjType.name().withoutPackagePrefix()), nestedMessageType.name().toString()), arg);
if (doExternalCall) {
var externalSerializer = getOutsideSerializer(nestedMessageClass, nestedObjClass);
var serializerLoaded = branch.readInstanceField(externalSerializer, branch.getThis());
branch.returnValue(branch.invokeInterfaceMethod(
MethodDescriptor.ofMethod(ProtoSerializer.class,
"deserialize", Object.class, Message.class),
serializerLoaded, nestedMessage));
} else {
branch.returnValue(generateConstructorUse(branch, classCreator, nestedMessageType, nestedObjType, nestedMessage));
}
}
}
method.throwException(IllegalArgumentException.class, "Unknown object type");
}
}
public void generate() {
var objInfo = index.getClassByName(topObjectType.name());
if (objInfo.isAbstract() || objInfo.isInterface()) {
generateAbstract();
return;
}
try (MethodCreator method = classCreator.getMethodCreator("serialize",
Message.class, Object.class)) {
method.setModifiers(Opcodes.ACC_PUBLIC);
var builderType = Type.create(DotName.createComponentized(topMessageType.name(), "Builder", true), Type.Kind.CLASS);
var builder = method.invokeStaticMethod(MethodDescriptor.ofMethod(topMessageType.name().toString(), "newBuilder", builderType.name().toString()));
var arg = method.getMethodParam(0);
generateBuilderUse(method, builder, topMessageType, topObjectType, arg);
var result = method.invokeVirtualMethod(MethodDescriptor.ofMethod(builderType.name().toString(), "build", topMessageType.name().toString()), builder);
method.returnValue(result);
}
try (MethodCreator method = classCreator.getMethodCreator("deserialize",
Object.class, Message.class)) {
method.setModifiers(Opcodes.ACC_PUBLIC);
var arg = method.getMethodParam(0);
method.returnValue(generateConstructorUse(method, classCreator, topMessageType, topObjectType, arg));
}
}
}

View File

@@ -1,22 +0,0 @@
package com.usatiuk.autoprotomap.test;
import io.quarkus.test.QuarkusDevModeTest;
import org.jboss.shrinkwrap.api.ShrinkWrap;
import org.jboss.shrinkwrap.api.spec.JavaArchive;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.RegisterExtension;
public class AutoprotomapDevModeTest {
// Start hot reload (DevMode) test with your extension loaded
@RegisterExtension
static final QuarkusDevModeTest devModeTest = new QuarkusDevModeTest()
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class));
@Test
public void writeYourOwnDevModeTest() {
// Write your dev mode tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-hot-reload for more information
Assertions.assertTrue(true, "Add dev mode assertions to " + getClass().getName());
}
}

View File

@@ -1,22 +0,0 @@
package com.usatiuk.autoprotomap.test;
import io.quarkus.test.QuarkusUnitTest;
import org.jboss.shrinkwrap.api.ShrinkWrap;
import org.jboss.shrinkwrap.api.spec.JavaArchive;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.RegisterExtension;
public class AutoprotomapTest {
// Start unit test with your extension loaded
@RegisterExtension
static final QuarkusUnitTest unitTest = new QuarkusUnitTest()
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class));
@Test
public void writeYourOwnUnitTest() {
// Write your unit tests here - see the testing extension guide https://quarkus.io/guides/writing-extensions#testing-extensions for more information
Assertions.assertTrue(true, "Add some assertions to " + getClass().getName());
}
}

View File

@@ -1,107 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<artifactId>autoprotomap-integration-tests</artifactId>
<name>Autoprotomap - Integration Tests</name>
<properties>
<skipITs>true</skipITs>
</properties>
<dependencies>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-deployment</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>build</goal>
<goal>generate-code</goal>
<goal>generate-code-tests</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>integration-test</goal>
<goal>verify</goal>
</goals>
</execution>
</executions>
<configuration>
<systemPropertyVariables>
<native.image.path>${project.build.directory}/${project.build.finalName}-runner
</native.image.path>
<java.util.logging.manager>org.jboss.logmanager.LogManager</java.util.logging.manager>
<maven.home>${maven.home}</maven.home>
</systemPropertyVariables>
</configuration>
</plugin>
</plugins>
</build>
<profiles>
<profile>
<id>native-image</id>
<activation>
<property>
<name>native</name>
</property>
</activation>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<skipTests>${native.surefire.skip}</skipTests>
</configuration>
</plugin>
</plugins>
</build>
<properties>
<skipITs>false</skipITs>
<quarkus.native.enabled>true</quarkus.native.enabled>
</properties>
</profile>
</profiles>
</project>

View File

@@ -1,7 +0,0 @@
package com.usatiuk.autoprotomap.it;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
@ProtoMirror(AbstractProto.class)
public abstract class AbstractObject {
}

View File

@@ -1,10 +0,0 @@
package com.usatiuk.autoprotomap.it;
import lombok.AllArgsConstructor;
import lombok.Getter;
@AllArgsConstructor
@Getter
public class CustomObject extends AbstractObject {
public int testNum = 0;
}

View File

@@ -1,17 +0,0 @@
package com.usatiuk.autoprotomap.it;
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
import jakarta.inject.Singleton;
@Singleton
public class CustomObjectSerializer implements ProtoSerializer<CustomObjectProto, CustomObject> {
@Override
public CustomObject deserialize(CustomObjectProto message) {
return new CustomObject(2);
}
@Override
public CustomObjectProto serialize(CustomObject object) {
return CustomObjectProto.newBuilder().setTest(1).build();
}
}

View File

@@ -1,8 +0,0 @@
package com.usatiuk.autoprotomap.it;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
@ProtoMirror(InterfaceObjectProto.class)
public interface InterfaceObject {
String key();
}

View File

@@ -1,15 +0,0 @@
package com.usatiuk.autoprotomap.it;
import com.google.protobuf.ByteString;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
import lombok.AllArgsConstructor;
import lombok.Getter;
@ProtoMirror(NestedObjectProto.class)
@AllArgsConstructor
@Getter
public class NestedObject extends AbstractObject {
public SimpleObject object;
public String _nestedName;
public ByteString _nestedSomeBytes;
}

View File

@@ -1,7 +0,0 @@
package com.usatiuk.autoprotomap.it;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
@ProtoMirror(RecordObjectProto.class)
public record RecordObject(String key) implements InterfaceObject {
}

View File

@@ -1,7 +0,0 @@
package com.usatiuk.autoprotomap.it;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
@ProtoMirror(RecordObject2Proto.class)
public record RecordObject2(String key, int value) implements InterfaceObject {
}

View File

@@ -1,15 +0,0 @@
package com.usatiuk.autoprotomap.it;
import com.google.protobuf.ByteString;
import com.usatiuk.autoprotomap.runtime.ProtoMirror;
import lombok.AllArgsConstructor;
import lombok.Getter;
@ProtoMirror(SimpleObjectProto.class)
@AllArgsConstructor
@Getter
public class SimpleObject extends AbstractObject {
public int numfield = 0;
private String name;
public ByteString someBytes;
}

View File

@@ -1,47 +0,0 @@
syntax = "proto3";
option java_multiple_files = true;
option java_package = "com.usatiuk.autoprotomap.it";
option java_outer_classname = "TestProto";
package autoprotomap.test;
message SimpleObjectProto {
int32 numfield = 1;
string name = 2;
bytes someBytes = 3;
}
message NestedObjectProto {
SimpleObjectProto object = 1;
string nestedName = 2;
bytes nestedSomeBytes = 3;
}
message CustomObjectProto {
int64 test = 1;
}
message AbstractProto {
oneof obj {
NestedObjectProto nestedObject = 1;
SimpleObjectProto simpleObject = 2;
CustomObjectProto customObject = 3;
}
}
message RecordObjectProto {
string key = 1;
}
message RecordObject2Proto {
string key = 1;
int32 value = 2;
}
message InterfaceObjectProto {
oneof obj {
RecordObjectProto recordObject = 1;
RecordObject2Proto recordObject2 = 2;
}
}

View File

@@ -1 +0,0 @@
quarkus.package.jar.decompiler.enabled=true

View File

@@ -1,7 +0,0 @@
package com.usatiuk.autoprotomap.it;
import io.quarkus.test.junit.QuarkusIntegrationTest;
@QuarkusIntegrationTest
public class AutoprotomapResourceIT extends AutoprotomapResourceTest {
}

View File

@@ -1,113 +0,0 @@
package com.usatiuk.autoprotomap.it;
import com.google.protobuf.ByteString;
import com.usatiuk.autoprotomap.runtime.ProtoSerializer;
import io.quarkus.test.junit.QuarkusTest;
import jakarta.inject.Inject;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
@QuarkusTest
public class AutoprotomapResourceTest {
@Inject
ProtoSerializer<SimpleObjectProto, SimpleObject> simpleProtoSerializer;
@Inject
ProtoSerializer<NestedObjectProto, NestedObject> nestedProtoSerializer;
@Inject
ProtoSerializer<AbstractProto, AbstractObject> abstractProtoSerializer;
@Inject
ProtoSerializer<InterfaceObjectProto, InterfaceObject> interfaceProtoSerializer;
@Test
public void testSimple() {
var ret = simpleProtoSerializer.serialize(new SimpleObject(1234, "simple test", ByteString.copyFrom(new byte[]{1, 2, 3})));
Assertions.assertEquals(1234, ret.getNumfield());
Assertions.assertEquals("simple test", ret.getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getSomeBytes());
var des = simpleProtoSerializer.deserialize(ret);
Assertions.assertEquals(1234, des.getNumfield());
Assertions.assertEquals("simple test", des.getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getSomeBytes());
}
@Test
public void testNested() {
var ret = nestedProtoSerializer.serialize(
new NestedObject(
new SimpleObject(333, "nested so", ByteString.copyFrom(new byte[]{1, 2, 3})),
"nested obj", ByteString.copyFrom(new byte[]{4, 5, 6})));
Assertions.assertEquals(333, ret.getObject().getNumfield());
Assertions.assertEquals("nested so", ret.getObject().getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getObject().getSomeBytes());
Assertions.assertEquals("nested obj", ret.getNestedName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), ret.getNestedSomeBytes());
var des = nestedProtoSerializer.deserialize(ret);
Assertions.assertEquals(333, des.object.numfield);
Assertions.assertEquals(333, des.getObject().getNumfield());
Assertions.assertEquals("nested so", des.getObject().getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getObject().getSomeBytes());
Assertions.assertEquals("nested obj", des.get_nestedName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), des.get_nestedSomeBytes());
}
@Test
public void testAbstractSimple() {
var ret = abstractProtoSerializer.serialize(new SimpleObject(1234, "simple test", ByteString.copyFrom(new byte[]{1, 2, 3})));
Assertions.assertEquals(1234, ret.getSimpleObject().getNumfield());
Assertions.assertEquals("simple test", ret.getSimpleObject().getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getSimpleObject().getSomeBytes());
var des = (SimpleObject) abstractProtoSerializer.deserialize(ret);
Assertions.assertEquals(1234, des.getNumfield());
Assertions.assertEquals("simple test", des.getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getSomeBytes());
}
@Test
public void testAbstractCustom() {
var ret = abstractProtoSerializer.serialize(new CustomObject(1234));
Assertions.assertEquals(1, ret.getCustomObject().getTest());
var des = (CustomObject) abstractProtoSerializer.deserialize(ret);
Assertions.assertEquals(2, des.getTestNum());
}
@Test
public void testAbstractNested() {
var ret = abstractProtoSerializer.serialize(
new NestedObject(
new SimpleObject(333, "nested so", ByteString.copyFrom(new byte[]{1, 2, 3})),
"nested obj", ByteString.copyFrom(new byte[]{4, 5, 6})));
Assertions.assertEquals(333, ret.getNestedObject().getObject().getNumfield());
Assertions.assertEquals("nested so", ret.getNestedObject().getObject().getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), ret.getNestedObject().getObject().getSomeBytes());
Assertions.assertEquals("nested obj", ret.getNestedObject().getNestedName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), ret.getNestedObject().getNestedSomeBytes());
var des = (NestedObject) abstractProtoSerializer.deserialize(ret);
Assertions.assertEquals(333, des.object.numfield);
Assertions.assertEquals(333, des.getObject().getNumfield());
Assertions.assertEquals("nested so", des.getObject().getName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{1, 2, 3}), des.getObject().getSomeBytes());
Assertions.assertEquals("nested obj", des.get_nestedName());
Assertions.assertEquals(ByteString.copyFrom(new byte[]{4, 5, 6}), des.get_nestedSomeBytes());
}
@Test
public void testInterface() {
var ret = interfaceProtoSerializer.serialize(new RecordObject("record test"));
Assertions.assertEquals("record test", ret.getRecordObject().getKey());
var des = (RecordObject) interfaceProtoSerializer.deserialize(ret);
Assertions.assertEquals("record test", des.key());
var ret2 = interfaceProtoSerializer.serialize(new RecordObject2("record test 2", 1234));
Assertions.assertEquals("record test 2", ret2.getRecordObject2().getKey());
Assertions.assertEquals(1234, ret2.getRecordObject2().getValue());
var des2 = (RecordObject2) interfaceProtoSerializer.deserialize(ret2);
Assertions.assertEquals("record test 2", des2.key());
Assertions.assertEquals(1234, des2.value());
}
}

View File

@@ -1,24 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-parent</artifactId>
<version>1.0-SNAPSHOT</version>
<packaging>pom</packaging>
<name>Autoprotomap - Parent</name>
<modules>
<module>deployment</module>
<module>runtime</module>
<module>integration-tests</module>
</modules>
</project>

View File

@@ -1,63 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<artifactId>autoprotomap</artifactId>
<name>Autoprotomap - Runtime</name>
<dependencies>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-arc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-extension-maven-plugin</artifactId>
<version>${quarkus.platform.version}</version>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>extension-descriptor</goal>
</goals>
<configuration>
<deployment>${project.groupId}:${project.artifactId}-deployment:${project.version}
</deployment>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-compiler-plugin</artifactId>
<executions>
<execution>
<id>default-compile</id>
<configuration>
<annotationProcessorPaths>
<path>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-extension-processor</artifactId>
<version>${quarkus.platform.version}</version>
</path>
</annotationProcessorPaths>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@@ -1,12 +0,0 @@
package com.usatiuk.autoprotomap.runtime;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.CLASS)
@Target(ElementType.TYPE)
public @interface ProtoMirror {
Class<?> value() default Object.class;
}

View File

@@ -1,9 +0,0 @@
name: Autoprotomap
#description: Do something useful.
metadata:
# keywords:
# - autoprotomap
# guide: ... # To create and publish this guide, see https://github.com/quarkiverse/quarkiverse/wiki#documenting-your-extension
# categories:
# - "miscellaneous"
# status: "preview"

View File

@@ -0,0 +1,172 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>dhfs-app</artifactId>
<version>1.0-SNAPSHOT</version>
<parent>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<dependencies>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-params</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcprov-jdk18on</artifactId>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcpkix-jdk18on</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-security</artifactId>
</dependency>
<dependency>
<groupId>net.openhft</groupId>
<artifactId>zero-allocation-hashing</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-arc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-client</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-client-jsonb</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-jsonb</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-scheduler</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
</dependency>
<dependency>
<groupId>org.jboss.slf4j</groupId>
<artifactId>slf4j-jboss-logmanager</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
</dependency>
<dependency>
<groupId>org.pcollections</groupId>
<artifactId>pcollections</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math3</artifactId>
<version>3.6.1</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>dhfs-fuse</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>utils</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<forkCount>1C</forkCount>
<reuseForks>false</reuseForks>
<parallel>classes</parallel>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
false
</junit.jupiter.execution.parallel.enabled>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<configuration>
<forkCount>1C</forkCount>
<reuseForks>false</reuseForks>
<parallel>classes</parallel>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
false
</junit.jupiter.execution.parallel.enabled>
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>${quarkus.platform.group-id}</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.platform.version}</version>
<extensions>true</extensions>
<executions>
<execution>
<id>quarkus-plugin</id>
<goals>
<goal>build</goal>
<goal>generate-code</goal>
<goal>generate-code-tests</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs;
package com.usatiuk.dhfsapp;
import io.quarkus.runtime.Quarkus;
import io.quarkus.runtime.QuarkusApplication;

View File

@@ -0,0 +1,35 @@
quarkus.grpc.server.use-separate-server=false
dhfs.objects.peerdiscovery.port=42069
dhfs.objects.peerdiscovery.interval=4s
dhfs.objects.peerdiscovery.broadcast=true
dhfs.objects.sync.timeout=30
dhfs.objects.sync.ping.timeout=5
dhfs.objects.invalidation.threads=16
dhfs.objects.invalidation.delay=1000
dhfs.objects.reconnect_interval=5s
dhfs.objects.write_log=false
dhfs.objects.periodic-push-op-interval=5m
dhfs.fuse.root=${HOME}/dhfs_default/fuse
dhfs.objects.persistence.stuff.root=${HOME}/dhfs_default/data/stuff
dhfs.fuse.debug=false
dhfs.fuse.enabled=true
dhfs.files.allow_recursive_delete=false
dhfs.files.target_chunk_size=524288
dhfs.files.max_chunk_size=524288
dhfs.files.target_chunk_alignment=17
dhfs.objects.deletion.delay=1000
dhfs.objects.deletion.can-delete-retry-delay=10000
dhfs.objects.ref_verification=true
dhfs.files.use_hash_for_chunks=false
dhfs.objects.autosync.threads=16
dhfs.objects.autosync.download-all=false
dhfs.objects.move-processor.threads=16
dhfs.objects.ref-processor.threads=16
dhfs.objects.opsender.batch-size=100
dhfs.objects.lock_timeout_secs=2
dhfs.local-discovery=true
dhfs.peerdiscovery.timeout=10000
quarkus.log.category."com.usatiuk".min-level=TRACE
quarkus.log.category."com.usatiuk".level=TRACE
quarkus.http.insecure-requests=enabled
quarkus.http.ssl.client-auth=required

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs;
package com.usatiuk.dhfsapp;
import io.quarkus.test.junit.QuarkusTestProfile;
@@ -9,7 +9,8 @@ import java.util.HashMap;
import java.util.Map;
abstract public class TempDataProfile implements QuarkusTestProfile {
protected void getConfigOverrides(Map<String, String> toPut) {}
protected void getConfigOverrides(Map<String, String> toPut) {
}
@Override
final public Map<String, String> getConfigOverrides() {
@@ -21,7 +22,6 @@ abstract public class TempDataProfile implements QuarkusTestProfile {
}
var ret = new HashMap<String, String>();
ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString());
ret.put("dhfs.objects.root", tempDirWithPrefix.resolve("dhfs_root_d_test").toString());
ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString());
getConfigOverrides(ret);
return ret;

View File

@@ -0,0 +1,44 @@
package com.usatiuk.dhfsapp;
import io.quarkus.logging.Log;
import io.quarkus.runtime.ShutdownEvent;
import io.quarkus.runtime.StartupEvent;
import jakarta.annotation.Priority;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Objects;
@ApplicationScoped
public class TestDataCleaner {
@ConfigProperty(name = "dhfs.objects.persistence.files.root")
String tempDirectory;
public static void purgeDirectory(File dir) {
try {
for (File file : Objects.requireNonNull(dir.listFiles())) {
if (file.isDirectory())
purgeDirectory(file);
file.delete();
}
} catch (Exception e) {
Log.error("Couldn't purge directory " + dir, e);
}
}
void init(@Observes @Priority(1) StartupEvent event) throws IOException {
try {
purgeDirectory(Path.of(tempDirectory).toFile());
} catch (Exception ignored) {
Log.warn("Couldn't cleanup test data on init");
}
}
void shutdown(@Observes @Priority(1000000000) ShutdownEvent event) throws IOException {
purgeDirectory(Path.of(tempDirectory).toFile());
}
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs.integration;
package com.usatiuk.dhfsapp.integration;
import com.github.dockerjava.api.model.Device;
import io.quarkus.logging.Log;
@@ -32,9 +32,11 @@ public class DhfsFuseIT {
String c1uuid;
String c2uuid;
Network network;
@BeforeEach
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
Network network = Network.newNetwork();
network = Network.newNetwork();
container1 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
@@ -53,8 +55,8 @@ public class DhfsFuseIT {
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(DhfsFuseIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
@@ -65,131 +67,119 @@ public class DhfsFuseIT {
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
}
private void checkConsistency() {
await().atMost(45, TimeUnit.SECONDS).until(() -> {
Log.info("Listing consistency");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*/*");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*/*");
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/*/*");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/*/*");
Log.info(ls1);
Log.info(cat1);
Log.info(ls2);
Log.info(cat2);
return ls1.equals(ls2) && cat1.equals(cat2);
});
}
@AfterEach
void stop() {
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
network.close();
}
@Test
void readWriteFileTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
}
@Test
void readWriteRewriteFileTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode());
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo rewritten > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() ->
"rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
"rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
}
@Test
void createDelayedTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
var client = DockerClientFactory.instance().client();
client.pauseContainerCmd(container2.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo newfile > /root/dhfs_default/fuse/testf2").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo newfile > /dhfs_test/fuse/testf2").getExitCode());
client.unpauseContainerCmd(container2.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() ->
"newfile\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf2").getStdout()));
"newfile\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf2").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() ->
"newfile\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf2").getStdout()));
"newfile\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf2").getStdout()));
}
@Test
void writeRewriteDelayedTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
var client = DockerClientFactory.instance().client();
client.pauseContainerCmd(container2.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo rewritten > /dhfs_test/fuse/testf1").getExitCode());
client.unpauseContainerCmd(container2.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() ->
"rewritten\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
"rewritten\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() ->
"rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
}
// TODO: How this fits with the tree?
@Test
@Disabled
void deleteDelayedTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
var client = DockerClientFactory.instance().client();
client.pauseContainerCmd(container2.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/testf1").getExitCode());
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Delaying deletion check"), 60, TimeUnit.SECONDS, 1);
client.unpauseContainerCmd(container2.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode());
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 1);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3);
await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container2.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 1 == container1.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode());
"rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
}
@Test
void deleteTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
Log.info("Deleting");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() ->
0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode());
0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
Log.info("Deleted");
// FIXME?
@@ -197,83 +187,105 @@ public class DhfsFuseIT {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3);
await().atMost(45, TimeUnit.SECONDS).until(() ->
1 == container2.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode());
1 == container2.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() ->
1 == container1.execInContainer("/bin/sh", "-c", "test -f /root/dhfs_default/fuse/testf1").getExitCode());
1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
}
@Test
void deleteTestKickedOut() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() ->
"tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
container2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("kicked"), 60, TimeUnit.SECONDS, 1);
Log.info("Deleting");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
Log.info("Deleted");
// FIXME?
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Deleting from persistent"), 60, TimeUnit.SECONDS, 3);
await().atMost(45, TimeUnit.SECONDS).until(() ->
1 == container1.execInContainer("/bin/sh", "-c", "test -f /dhfs_test/fuse/testf1").getExitCode());
}
@Test
void moveFileTest() throws IOException, InterruptedException, TimeoutException {
Log.info("Creating");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
Log.info("Listing");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
Log.info("Moving");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/testf1 /root/dhfs_default/fuse/testf2").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/testf1 /dhfs_test/fuse/testf2").getExitCode());
Log.info("Listing");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
Log.info("Reading");
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf2").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf2").getStdout()));
}
@Test
void moveDirTest() throws IOException, InterruptedException, TimeoutException {
Log.info("Creating");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/testdir").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testdir/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testdir/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/testdir").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testdir/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testdir/testf1").getStdout()));
Log.info("Listing");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
Log.info("Moving");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/testdir2").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/testdir /root/dhfs_default/fuse/testdir2/testdirm").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/testdir2").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/testdir /dhfs_test/fuse/testdir2/testdirm").getExitCode());
Log.info("Listing");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
Log.info("Reading");
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testdir2/testdirm/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testdir2/testdirm/testf1").getStdout()));
}
// TODO: This probably shouldn't be working right now
@Test
void removeAddHostTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request DELETE " +
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo jioadsd > /root/dhfs_default/fuse/newfile1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo asvdkljm > /root/dhfs_default/fuse/newfile1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo jioadsd > /dhfs_test/fuse/newfile1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo asvdkljm > /dhfs_test/fuse/newfile1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo dfgvh > /root/dhfs_default/fuse/newfile2").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo dscfg > /root/dhfs_default/fuse/newfile2").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo dfgvh > /dhfs_test/fuse/newfile2").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo dscfg > /dhfs_test/fuse/newfile2").getExitCode());
Log.info("Re-adding");
container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> {
Log.info("Listing removeAddHostTest");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*");
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/");
Log.info(cat1);
Log.info(cat2);
Log.info(ls1);
@@ -286,10 +298,10 @@ public class DhfsFuseIT {
@Test
void dirConflictTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode());
boolean createFail = Stream.of(Pair.of(container1, "echo test1 >> /root/dhfs_default/fuse/testf"),
Pair.of(container2, "echo test2 >> /root/dhfs_default/fuse/testf")).parallel().map(p -> {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
boolean createFail = Stream.of(Pair.of(container1, "echo test1 >> /dhfs_test/fuse/testf"),
Pair.of(container2, "echo test2 >> /dhfs_test/fuse/testf")).parallel().map(p -> {
try {
return p.getLeft().execInContainer("/bin/sh", "-c", p.getRight()).getExitCode();
} catch (Exception e) {
@@ -298,48 +310,75 @@ public class DhfsFuseIT {
}).anyMatch(r -> r != 0);
Assumptions.assumeTrue(!createFail, "Failed creating one or more files");
await().atMost(45, TimeUnit.SECONDS).until(() -> {
var ls = container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse");
var cat = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*");
var ls = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info(ls);
Log.info(cat);
return cat.getStdout().contains("test1") && cat.getStdout().contains("test2");
});
}
@Test
void dirConflictTest2() throws IOException, InterruptedException, TimeoutException {
var client = DockerClientFactory.instance().client();
client.disconnectFromNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
client.disconnectFromNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 1);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 1);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/a && echo fdsaio >> /dhfs_test/fuse/a/testf").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/a && echo exgrg >> /dhfs_test/fuse/a/testf").getExitCode());
client.connectToNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
Log.warn("Waiting for connections");
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 1);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 1);
Log.warn("Connected");
checkConsistency();
var ls1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/a*/*");
Assertions.assertTrue(ls1.getStdout().contains("fdsaio"));
Assertions.assertTrue(ls1.getStdout().contains("exgrg"));
}
@Test
void dirCycleTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/a").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /root/dhfs_default/fuse/b").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo xqr489 >> /root/dhfs_default/fuse/a/testfa").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo ahinou >> /root/dhfs_default/fuse/b/testfb").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls -lavh /root/dhfs_default/fuse").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/a").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/b").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo xqr489 >> /dhfs_test/fuse/a/testfa").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo ahinou >> /dhfs_test/fuse/b/testfb").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls -lavh /dhfs_test/fuse").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> {
var c2ls = container2.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f -exec cat {} \\;");
var c2ls = container2.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -type f -exec cat {} \\;");
return c2ls.getExitCode() == 0 && c2ls.getStdout().contains("xqr489") && c2ls.getStdout().contains("ahinou");
});
var client = DockerClientFactory.instance().client();
client.pauseContainerCmd(container1.getContainerId()).exec();
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/a /root/dhfs_default/fuse/b").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/a /dhfs_test/fuse/b").getExitCode());
client.pauseContainerCmd(container2.getContainerId()).exec();
client.unpauseContainerCmd(container1.getContainerId()).exec();
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mv /root/dhfs_default/fuse/b /root/dhfs_default/fuse/a").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/b /dhfs_test/fuse/a").getExitCode());
client.unpauseContainerCmd(container2.getContainerId()).exec();
await().atMost(45, TimeUnit.SECONDS).until(() -> {
Log.info("Listing dirCycleTest");
Log.info(container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse"));
Log.info(container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/a"));
Log.info(container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/b"));
Log.info(container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse"));
Log.info(container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/a"));
Log.info(container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse/b"));
Log.info(container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse"));
Log.info(container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/a"));
Log.info(container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/b"));
Log.info(container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse"));
Log.info(container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/a"));
Log.info(container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/b"));
var c1ls2 = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -maxdepth 3 -type f -exec cat {} \\;");
var c1ls2 = container1.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -maxdepth 3 -type f -exec cat {} \\;");
Log.info(c1ls2);
var c2ls2 = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -maxdepth 3 -type f -exec cat {} \\;");
var c2ls2 = container1.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -maxdepth 3 -type f -exec cat {} \\;");
Log.info(c2ls2);
return c1ls2.getStdout().contains("xqr489") && c1ls2.getStdout().contains("ahinou")
@@ -349,4 +388,82 @@ public class DhfsFuseIT {
}
@Test
void removeAndMove() throws IOException, InterruptedException, TimeoutException {
var client = DockerClientFactory.instance().client();
Log.info("Creating");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
Log.info("Listing");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
client.pauseContainerCmd(container1.getContainerId()).exec();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 1);
Log.info("Removing");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/testf1").getExitCode());
client.pauseContainerCmd(container2.getContainerId()).exec();
client.unpauseContainerCmd(container1.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 1);
Log.info("Moving");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/testf1 /dhfs_test/fuse/testf2").getExitCode());
Log.info("Listing");
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/").getExitCode());
Log.info("Reading");
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf2").getStdout()));
client.unpauseContainerCmd(container2.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 1);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 1);
// Either removed, or moved
// TODO: it always seems to be removed?
Log.info("Reading both");
await().atMost(45, TimeUnit.SECONDS).until(() -> {
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse/");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info("cat1: " + cat1);
Log.info("cat2: " + cat2);
Log.info("ls1: " + ls1);
Log.info("ls2: " + ls2);
if (!ls1.getStdout().equals(ls2.getStdout())) {
Log.info("Different ls?");
return false;
}
if (ls1.getStdout().trim().isEmpty() && ls2.getStdout().trim().isEmpty()) {
Log.info("Both empty");
return true;
}
if (!cat1.getStdout().equals(cat2.getStdout())) {
Log.info("Different cat?");
return false;
}
if (!(cat1.getExitCode() == 0 && cat2.getExitCode() == 0 && ls1.getExitCode() == 0 && ls2.getExitCode() == 0)) {
return false;
}
boolean hasMoved = cat1.getStdout().contains("tesempty") && cat2.getStdout().contains("tesempty")
&& ls1.getStdout().contains("testf2") && !ls1.getStdout().contains("testf1")
&& ls2.getStdout().contains("testf2") && !ls2.getStdout().contains("testf1");
boolean removed = !cat1.getStdout().contains("tesempty") && !cat2.getStdout().contains("tesempty")
&& !ls1.getStdout().contains("testf2") && !ls1.getStdout().contains("testf1")
&& !ls2.getStdout().contains("testf2") && !ls2.getStdout().contains("testf1");
if (hasMoved && removed) {
Log.info("Both removed and moved");
return false;
}
return hasMoved || removed;
});
}
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs.integration;
package com.usatiuk.dhfsapp.integration;
import com.github.dockerjava.api.model.Device;
import io.quarkus.logging.Log;
@@ -35,13 +35,15 @@ public class DhfsFusex3IT {
String c2uuid;
String c3uuid;
Network network;
// This calculation is somewhat racy, so keep it hardcoded for now
long emptyFileCount = 9;
@BeforeEach
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
// TODO: Dedup
Network network = Network.newNetwork();
network = Network.newNetwork();
container1 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
@@ -59,9 +61,9 @@ public class DhfsFusex3IT {
Stream.of(container1, container2, container3).parallel().forEach(GenericContainer::start);
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
c3uuid = container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
c3uuid = container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
Log.info(container1.getContainerId() + "=" + c1uuid);
Log.info(container2.getContainerId() + "=" + c2uuid);
@@ -91,26 +93,26 @@ public class DhfsFusex3IT {
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
var c2curl1 = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
var c2curl3 = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c3uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c3uuid);
var c3curl = container3.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
@@ -119,8 +121,8 @@ public class DhfsFusex3IT {
private boolean checkEmpty() throws IOException, InterruptedException {
for (var container : List.of(container1, container2, container3)) {
var found = container.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/data/objs -type f");
var foundWc = container.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/data/objs -type f | wc -l");
var found = container.execInContainer("/bin/sh", "-c", "find /dhfs_test/data/objs -type f");
var foundWc = container.execInContainer("/bin/sh", "-c", "find /dhfs_test/data/objs -type f | wc -l");
Log.info("Remaining objects in " + container.getContainerId() + ": " + found.toString() + " " + foundWc.toString());
if (!(found.getExitCode() == 0 && foundWc.getExitCode() == 0 && Integer.parseInt(foundWc.getStdout().strip()) == emptyFileCount))
return false;
@@ -131,48 +133,52 @@ public class DhfsFusex3IT {
@AfterEach
void stop() {
Stream.of(container1, container2, container3).parallel().forEach(GenericContainer::stop);
network.close();
}
@Test
void readWriteFileTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
}
// FIXME:
@Test
@Disabled
void largerFileDeleteTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /root/dhfs_default/fuse && curl -O https://ash-speed.hetzner.com/100MB.bin").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "head -c 10 /root/dhfs_default/fuse/100MB.bin").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/100MB.bin").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /dhfs_test/fuse && dd if=/dev/urandom of=10MB.bin bs=1M count=10").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "head -c 10 /dhfs_test/fuse/10MB.bin").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/10MB.bin").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> checkEmpty());
}
@Test
@Disabled
void largerFileDeleteTestNoDelays() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /root/dhfs_default/fuse && curl -O https://ash-speed.hetzner.com/100MB.bin").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "head -c 10 /root/dhfs_default/fuse/100MB.bin").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "rm /root/dhfs_default/fuse/100MB.bin").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /dhfs_test/fuse && dd if=/dev/urandom of=10MB.bin bs=1M count=10").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "head -c 10 /dhfs_test/fuse/10MB.bin").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "rm /dhfs_test/fuse/10MB.bin").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> checkEmpty());
}
@Test
void gccHelloWorldTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo '#include<stdio.h>\nint main(){printf(\"hello world\"); return 0;}' > /root/dhfs_default/fuse/hello.c").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /root/dhfs_default/fuse && gcc hello.c").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo '#include<stdio.h>\nint main(){printf(\"hello world\"); return 0;}' > /dhfs_test/fuse/hello.c").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "cd /dhfs_test/fuse && gcc hello.c").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> {
var helloOut = container1.execInContainer("/bin/sh", "-c", "/root/dhfs_default/fuse/a.out");
var helloOut = container1.execInContainer("/bin/sh", "-c", "/dhfs_test/fuse/a.out");
Log.info(helloOut);
return helloOut.getExitCode() == 0 && helloOut.getStdout().equals("hello world");
});
await().atMost(45, TimeUnit.SECONDS).until(() -> {
var helloOut = container2.execInContainer("/bin/sh", "-c", "/root/dhfs_default/fuse/a.out");
var helloOut = container2.execInContainer("/bin/sh", "-c", "/dhfs_test/fuse/a.out");
Log.info(helloOut);
return helloOut.getExitCode() == 0 && helloOut.getStdout().equals("hello world");
});
await().atMost(45, TimeUnit.SECONDS).until(() -> {
var helloOut = container3.execInContainer("/bin/sh", "-c", "/root/dhfs_default/fuse/a.out");
var helloOut = container3.execInContainer("/bin/sh", "-c", "/dhfs_test/fuse/a.out");
Log.info(helloOut);
return helloOut.getExitCode() == 0 && helloOut.getStdout().equals("hello world");
});
@@ -180,20 +186,22 @@ public class DhfsFusex3IT {
@Test
void removeHostTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
var c3curl = container3.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request DELETE " +
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /root/dhfs_default/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
Thread.sleep(10000);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo rewritten > /dhfs_test/fuse/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "rewritten\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
}
@Test
@@ -203,15 +211,15 @@ public class DhfsFusex3IT {
client.pauseContainerCmd(container2.getContainerId()).exec();
// Pauses needed as otherwise docker buffers some incoming packets
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /root/dhfs_default/fuse/testf").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /dhfs_test/fuse/testf").getExitCode());
client.pauseContainerCmd(container3.getContainerId()).exec();
client.unpauseContainerCmd(container2.getContainerId()).exec();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /root/dhfs_default/fuse/testf").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /dhfs_test/fuse/testf").getExitCode());
client.pauseContainerCmd(container2.getContainerId()).exec();
client.unpauseContainerCmd(container1.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /root/dhfs_default/fuse/testf").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /dhfs_test/fuse/testf").getExitCode());
client.unpauseContainerCmd(container2.getContainerId()).exec();
client.unpauseContainerCmd(container3.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
@@ -220,8 +228,8 @@ public class DhfsFusex3IT {
await().atMost(45, TimeUnit.SECONDS).until(() -> {
for (var c : List.of(container1, container2, container3)) {
var ls = c.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse");
var cat = c.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*");
var ls = c.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat = c.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info(ls);
Log.info(cat);
if (!(cat.getStdout().contains("test1") && cat.getStdout().contains("test2") && cat.getStdout().contains("test3")))
@@ -231,49 +239,67 @@ public class DhfsFusex3IT {
});
await().atMost(45, TimeUnit.SECONDS).until(() -> {
return container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals(
container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) &&
container3.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals(
container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) &&
container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout().equals(
container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout());
return container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getStdout().equals(
container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getStdout()) &&
container3.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getStdout().equals(
container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse").getStdout()) &&
container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*").getStdout().equals(
container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*").getStdout());
});
}
@Test
void fileConflictTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf").getStdout()));
var client = DockerClientFactory.instance().client();
client.pauseContainerCmd(container1.getContainerId()).exec();
client.pauseContainerCmd(container2.getContainerId()).exec();
// Pauses needed as otherwise docker buffers some incoming packets
client.disconnectFromNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
client.disconnectFromNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
client.disconnectFromNetworkCmd().withContainerId(container3.getContainerId()).withNetworkId(network.getId()).exec();
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /root/dhfs_default/fuse/testf").getExitCode());
client.pauseContainerCmd(container3.getContainerId()).exec();
client.unpauseContainerCmd(container2.getContainerId()).exec();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /root/dhfs_default/fuse/testf").getExitCode());
client.pauseContainerCmd(container2.getContainerId()).exec();
client.unpauseContainerCmd(container1.getContainerId()).exec();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS, 2);
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /root/dhfs_default/fuse/testf").getExitCode());
client.unpauseContainerCmd(container2.getContainerId()).exec();
client.unpauseContainerCmd(container3.getContainerId()).exec();
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container3.execInContainer("/bin/sh", "-c", "echo test3 >> /dhfs_test/fuse/testf").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container2.execInContainer("/bin/sh", "-c", "echo test2 >> /dhfs_test/fuse/testf").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo test1 >> /dhfs_test/fuse/testf").getExitCode());
client.connectToNetworkCmd().withContainerId(container1.getContainerId()).withNetworkId(network.getId()).exec();
client.connectToNetworkCmd().withContainerId(container2.getContainerId()).withNetworkId(network.getId()).exec();
client.connectToNetworkCmd().withContainerId(container3.getContainerId()).withNetworkId(network.getId()).exec();
Log.warn("Waiting for connections");
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
waitingConsumer3.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS, 2);
Log.warn("Connected");
// TODO: There's some issue with cache, so avoid file reads
await().atMost(45, TimeUnit.SECONDS).until(() -> {
Log.info("Listing consistency 1");
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var ls3 = container3.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
Log.info(ls1);
Log.info(ls2);
Log.info(ls3);
return (ls1.getExitCode() == 0 && ls2.getExitCode() == 0 && ls3.getExitCode() == 0)
&& (ls1.getStdout().equals(ls2.getStdout()) && ls2.getStdout().equals(ls3.getStdout()));
});
await().atMost(45, TimeUnit.SECONDS).until(() -> {
Log.info("Listing");
for (var c : List.of(container1, container2, container3)) {
var ls = c.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse");
var cat = c.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*");
var ls = c.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat = c.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info(ls);
Log.info(cat);
if (!(cat.getExitCode() == 0 && ls.getExitCode() == 0))
return false;
if (!(cat.getStdout().contains("test1") && cat.getStdout().contains("test2") && cat.getStdout().contains("test3")))
return false;
}
@@ -281,12 +307,24 @@ public class DhfsFusex3IT {
});
await().atMost(45, TimeUnit.SECONDS).until(() -> {
return container1.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals(
container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) &&
container3.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout().equals(
container2.execInContainer("/bin/sh", "-c", "ls /root/dhfs_default/fuse").getStdout()) &&
container3.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout().equals(
container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/*").getStdout());
Log.info("Listing consistency");
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var ls3 = container3.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat3 = container3.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info(ls1);
Log.info(cat1);
Log.info(ls2);
Log.info(cat2);
Log.info(ls3);
Log.info(cat3);
return (ls1.getExitCode() == 0 && ls2.getExitCode() == 0 && ls3.getExitCode() == 0)
&& (cat1.getExitCode() == 0 && cat2.getExitCode() == 0 && cat3.getExitCode() == 0)
&& (cat1.getStdout().equals(cat2.getStdout()) && cat2.getStdout().equals(cat3.getStdout()))
&& (ls1.getStdout().equals(ls2.getStdout()) && ls2.getStdout().equals(ls3.getStdout()));
});
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs.integration;
package com.usatiuk.dhfsapp.integration;
import io.quarkus.logging.Log;
import org.jetbrains.annotations.NotNull;
@@ -66,24 +66,31 @@ public class DhfsImage implements Future<String> {
.run("apt update && apt install -y libfuse2 curl gcc")
.copy("/app", "/app")
.copy("/libs", "/libs")
.cmd("java", "-ea", "-Xmx128M",
.cmd("java", "-ea", "-Xmx256M", "-XX:TieredStopAtLevel=1", "-XX:+UseParallelGC",
"--add-exports", "java.base/sun.nio.ch=ALL-UNNAMED",
"--add-exports", "java.base/jdk.internal.access=ALL-UNNAMED",
"--add-opens=java.base/java.nio=ALL-UNNAMED",
"--enable-preview",
"-Ddhfs.objects.peerdiscovery.interval=1s",
"-Ddhfs.objects.invalidation.delay=100",
"-Ddhfs.objects.deletion.delay=0",
"-Ddhfs.objects.deletion.can-delete-retry-delay=1000",
"-Ddhfs.objects.ref_verification=true",
"-Ddhfs.objects.write_log=true",
"-Ddhfs.objects.sync.timeout=10",
"-Ddhfs.objects.sync.timeout=30",
"-Ddhfs.objects.sync.ping.timeout=5",
"-Ddhfs.objects.reconnect_interval=1s",
"-Dcom.usatiuk.dhfs.supportlib.native-path=/libs",
"-Ddhfs.objects.last-seen.timeout=30",
"-Ddhfs.objects.last-seen.update=10",
"-Ddhfs.sync.cert-check=false",
"-Dquarkus.log.category.\"com.usatiuk\".level=TRACE",
"-Dquarkus.log.category.\"com.usatiuk.dhfs\".level=TRACE",
"-Dquarkus.log.category.\"com.usatiuk.objects.transaction\".level=INFO",
"-Ddhfs.objects.periodic-push-op-interval=5s",
"-Ddhfs.fuse.root=/dhfs_test/fuse",
"-Ddhfs.objects.persistence.files.root=/dhfs_test/data",
"-Ddhfs.objects.persistence.stuff.root=/dhfs_test/data/stuff",
"-jar", "/app/quarkus-run.jar")
.run("mkdir /dhfs_test && chmod 777 /dhfs_test")
.build())
.withFileFromPath("/app", Paths.get(buildPath, "quarkus-app"))
.withFileFromPath("/libs", Paths.get(nativeLibsDirectory));

View File

@@ -0,0 +1,235 @@
package com.usatiuk.dhfsapp.integration;
import com.github.dockerjava.api.model.Device;
import com.usatiuk.dhfsapp.TestDataCleaner;
import io.quarkus.logging.Log;
import org.junit.jupiter.api.*;
import org.slf4j.LoggerFactory;
import org.testcontainers.DockerClientFactory;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.Network;
import org.testcontainers.containers.output.Slf4jLogConsumer;
import org.testcontainers.containers.output.WaitingConsumer;
import org.testcontainers.containers.wait.strategy.Wait;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.time.Duration;
import java.util.Objects;
import java.util.UUID;
import java.util.concurrent.*;
import java.util.stream.Stream;
import static org.awaitility.Awaitility.await;
public class KillIT {
GenericContainer<?> container1;
GenericContainer<?> container2;
WaitingConsumer waitingConsumer1;
WaitingConsumer waitingConsumer2;
String c1uuid;
String c2uuid;
File data1;
File data2;
Network network;
ExecutorService executor;
@BeforeEach
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
executor = Executors.newCachedThreadPool();
data1 = Files.createTempDirectory("").toFile();
data2 = Files.createTempDirectory("").toFile();
network = Network.newNetwork();
container1 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
.withFileSystemBind(data1.getAbsolutePath(), "/dhfs_test/data");
container2 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
.withFileSystemBind(data2.getAbsolutePath(), "/dhfs_test/data");
Stream.of(container1, container2).parallel().forEach(GenericContainer::start);
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
}
@AfterEach
void stop() {
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
TestDataCleaner.purgeDirectory(data1);
TestDataCleaner.purgeDirectory(data2);
executor.close();
network.close();
}
private void checkConsistency() {
await().atMost(45, TimeUnit.SECONDS).until(() -> {
Log.info("Listing consistency");
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info(ls1);
Log.info(cat1);
Log.info(ls2);
Log.info(cat2);
return ls1.equals(ls2) && cat1.equals(cat2) && ls1.getExitCode() == 0 && ls2.getExitCode() == 0 && cat1.getExitCode() == 0 && cat2.getExitCode() == 0;
});
}
@Test
void killTest(TestInfo testInfo) throws Exception {
var barrier = new CyclicBarrier(2);
var ret1 = executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.await();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(10000);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
container1.start();
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency();
}
@Test
void killTestDirs(TestInfo testInfo) throws Exception {
var barrier = new CyclicBarrier(2);
var ret1 = executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.await();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(10000);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
container1.start();
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency();
}
@Test
void killTest2(TestInfo testInfo) throws Exception {
var barrier = new CyclicBarrier(2);
var ret1 = executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.await();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(10000);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting");
container2.start();
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency();
}
@Test
void killTestDirs2(TestInfo testInfo) throws Exception {
var barrier = new CyclicBarrier(2);
var ret1 = executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.await();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(10000);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting");
container2.start();
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(KillIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency();
}
}

View File

@@ -0,0 +1,215 @@
package com.usatiuk.dhfsapp.integration;
import io.quarkus.logging.Log;
import java.io.*;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
public class LazyFs {
private static final String lazyFsPath;
static {
lazyFsPath = System.getProperty("lazyFsPath");
System.out.println("LazyFs Path: " + lazyFsPath);
}
private final String mountRoot;
private final String dataRoot;
private final String name;
private final File configFile;
private final File fifoFile;
private Thread errPiper;
private Thread outPiper;
private CountDownLatch startLatch;
private Process fs;
public LazyFs(String name, String mountRoot, String dataRoot) {
this.name = name;
this.mountRoot = mountRoot;
this.dataRoot = dataRoot;
try {
configFile = File.createTempFile("lazyfs", ".conf");
configFile.deleteOnExit();
fifoFile = new File("/tmp/" + ThreadLocalRandom.current().nextLong() + ".faultsfifo");
fifoFile.deleteOnExit();
} catch (IOException e) {
throw new RuntimeException(e);
}
Runtime.getRuntime().addShutdownHook(new Thread(this::stop));
}
private String fifoPath() {
return fifoFile.getAbsolutePath();
}
public void start(String extraOpts) {
var lfsPath = Path.of(lazyFsPath).resolve("build").resolve("lazyfs");
if (!lfsPath.toFile().isFile())
throw new IllegalStateException("LazyFs binary does not exist: " + lfsPath.toAbsolutePath());
if (!lfsPath.toFile().canExecute())
throw new IllegalStateException("LazyFs binary is not executable: " + lfsPath.toAbsolutePath());
try (var rwFile = new RandomAccessFile(configFile, "rw");
var channel = rwFile.getChannel()) {
channel.truncate(0);
var config = "[faults]\n" +
"fifo_path=\"" + fifoPath() + "\"\n" +
"[cache]\n" +
"apply_eviction=false\n" +
"[cache.simple]\n" +
"custom_size=\"1gb\"\n" +
"blocks_per_page=1\n" +
"[filesystem]\n" +
"log_all_operations=false\n" +
"logfile=\"\"\n" + extraOpts;
rwFile.write(config.getBytes());
Log.info("LazyFs config: \n" + config);
} catch (Exception e) {
throw new RuntimeException(e);
}
var argList = new ArrayList<String>();
argList.add(lfsPath.toString());
argList.add(Path.of(mountRoot).toString());
argList.add("--config-path");
argList.add(configFile.getAbsolutePath());
argList.add("-o");
argList.add("allow_other");
argList.add("-o");
argList.add("modules=subdir");
argList.add("-o");
argList.add("subdir=" + Path.of(dataRoot).toAbsolutePath().toString());
try {
Log.info("Starting LazyFs " + argList);
fs = Runtime.getRuntime().exec(argList.toArray(String[]::new));
} catch (Exception e) {
throw new RuntimeException(e);
}
startLatch = new CountDownLatch(1);
outPiper = new Thread(() -> {
try {
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.getInputStream()))) {
String line;
while ((line = input.readLine()) != null) {
if (line.contains("running LazyFS"))
startLatch.countDown();
System.out.println(line);
}
}
} catch (Exception e) {
Log.info("Exception in LazyFs piper", e);
}
Log.info("LazyFs out piper finished");
});
outPiper.start();
errPiper = new Thread(() -> {
try {
try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.getErrorStream()))) {
String line;
while ((line = input.readLine()) != null) {
System.out.println(line);
}
}
} catch (Exception e) {
Log.info("Exception in LazyFs piper", e);
}
Log.info("LazyFs err piper finished");
});
errPiper.start();
try {
if (!startLatch.await(30, TimeUnit.SECONDS))
throw new RuntimeException("StartLatch timed out");
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
Log.info("LazyFs started");
}
public void start() {
start("");
}
private String mdbPath() {
return Path.of(dataRoot).resolve("objects").resolve("data.mdb").toAbsolutePath().toString();
}
public void startTornOp() {
start("\n" +
"[[injection]]\n" +
"type=\"torn-seq\"\n" +
"op=\"write\"\n" +
"file=\"" + mdbPath() + "\"\n" +
"persist=[1,4]\n" +
"occurrence=3");
}
public void startTornSeq() {
start("[[injection]]\n" +
"type=\"torn-op\"\n" +
"file=\"" + mdbPath() + "\"\n" +
"occurrence=3\n" +
"parts=3 #or parts_bytes=[4096,3600,1260]\n" +
"persist=[1,3]");
}
public void crash() {
try {
var cmd = "echo \"lazyfs::crash::timing=after::op=write::from_rgx=*\" > " + fifoPath();
Log.info("Running command: " + cmd);
Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd}).waitFor();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public void stop() {
try {
synchronized (this) {
Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + mountRoot}).waitFor();
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
// Doesn't actually work?
//
// public void crashop() {
// try {
// var cmd = "echo \"lazyfs::torn-op::file=" + Path.of(lazyFsDataPath).toAbsolutePath().toString() + "/objects/data.mdb::persist=1,3::parts=3::occurrence=5\" > /tmp/faults.fifo";
// System.out.println("Running command: " + cmd);
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd});
// Thread.sleep(1000);
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + dataRoot});
// Thread.sleep(1000);
// } catch (Exception e) {
// throw new RuntimeException(e);
// }
// }
//
// public void crashseq() {
// try {
// var cmd = "echo \"lazyfs::torn-seq::op=write::file=" + Path.of(lazyFsDataPath).toAbsolutePath().toString() + "/objects/data.mdb::persist=1,4::occurrence=2\" > /tmp/faults.fifo";
// System.out.println("Running command: " + cmd);
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", cmd});
// Thread.sleep(1000);
// Runtime.getRuntime().exec(new String[]{"/bin/sh", "-c", "fusermount3 -u " + dataRoot});
// Thread.sleep(1000);
// } catch (Exception e) {
// throw new RuntimeException(e);
// }
// }
}

View File

@@ -0,0 +1,489 @@
package com.usatiuk.dhfsapp.integration;
import com.github.dockerjava.api.model.Device;
import com.usatiuk.dhfsapp.TestDataCleaner;
import io.quarkus.logging.Log;
import org.junit.jupiter.api.*;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.EnumSource;
import org.slf4j.LoggerFactory;
import org.testcontainers.DockerClientFactory;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.Network;
import org.testcontainers.containers.output.Slf4jLogConsumer;
import org.testcontainers.containers.output.WaitingConsumer;
import org.testcontainers.containers.wait.strategy.Wait;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.time.Duration;
import java.util.Objects;
import java.util.UUID;
import java.util.concurrent.*;
import java.util.stream.Stream;
import static org.awaitility.Awaitility.await;
public class LazyFsIT {
GenericContainer<?> container1;
GenericContainer<?> container2;
WaitingConsumer waitingConsumer1;
WaitingConsumer waitingConsumer2;
String c1uuid;
String c2uuid;
File data1;
File data2;
File data1Lazy;
File data2Lazy;
LazyFs lazyFs1;
LazyFs lazyFs2;
ExecutorService executor;
Network network;
@BeforeEach
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
executor = Executors.newCachedThreadPool();
data1 = Files.createTempDirectory("dhfsdata").toFile();
data2 = Files.createTempDirectory("dhfsdata").toFile();
data1Lazy = Files.createTempDirectory("lazyfsroot").toFile();
data2Lazy = Files.createTempDirectory("lazyfsroot").toFile();
network = Network.newNetwork();
lazyFs1 = new LazyFs(testInfo.getDisplayName(), data1.toString(), data1Lazy.toString());
lazyFs1.start();
lazyFs2 = new LazyFs(testInfo.getDisplayName(), data2.toString(), data2Lazy.toString());
lazyFs2.start();
container1 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
.withFileSystemBind(data1.getAbsolutePath(), "/dhfs_test/data");
container2 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
.withCreateContainerCmdModifier(cmd -> Objects.requireNonNull(cmd.getHostConfig()).withDevices(Device.parse("/dev/fuse")))
.waitingFor(Wait.forLogMessage(".*Listening.*", 1).withStartupTimeout(Duration.ofSeconds(60))).withNetwork(network)
.withFileSystemBind(data2.getAbsolutePath(), "/dhfs_test/data");
Stream.of(container1, container2).parallel().forEach(GenericContainer::start);
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
}
@AfterEach
void stop() {
lazyFs1.stop();
lazyFs2.stop();
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
TestDataCleaner.purgeDirectory(data1);
TestDataCleaner.purgeDirectory(data1Lazy);
TestDataCleaner.purgeDirectory(data2);
TestDataCleaner.purgeDirectory(data2Lazy);
executor.close();
network.close();
}
private void checkConsistency(String testName) {
await().atMost(45, TimeUnit.SECONDS).until(() -> {
var ls1 = container1.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat1 = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
var ls2 = container2.execInContainer("/bin/sh", "-c", "ls /dhfs_test/fuse");
var cat2 = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/*");
Log.info("Listing consistency " + testName + "\n"
+ ls1 + "\n"
+ cat1 + "\n"
+ ls2 + "\n"
+ cat2 + "\n");
return ls1.equals(ls2) && cat1.equals(cat2) && ls1.getExitCode() == 0 && ls2.getExitCode() == 0 && cat1.getExitCode() == 0 && cat2.getExitCode() == 0;
});
}
@ParameterizedTest
@EnumSource(CrashType.class)
void killTest(CrashType crashType, TestInfo testInfo) throws Exception {
var barrier = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(3000);
Log.info("Killing");
lazyFs1.crash();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
lazyFs1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
switch (crashType) {
case CRASH -> lazyFs1.start();
case TORN_OP -> lazyFs1.startTornOp();
case TORN_SEQ -> lazyFs1.startTornSeq();
}
container1.start();
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
Log.info("Failed to connect: " + testInfo.getDisplayName());
// Sometimes it doesn't get mounted properly for some reason
Assumptions.assumeTrue(false);
}
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test2; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
Log.info("Killing");
if (crashType.equals(CrashType.CRASH)) {
Thread.sleep(3000);
lazyFs1.crash();
}
try {
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// Sometimes crash doesn't work
Log.info("Failed to crash: " + testInfo.getDisplayName());
if (crashType.equals(CrashType.CRASH))
throw e;
Assumptions.assumeTrue(false);
}
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
lazyFs1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
lazyFs1.start();
container1.start();
waitingConsumer1 = new WaitingConsumer();
loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency(testInfo.getDisplayName());
}
@ParameterizedTest
@EnumSource(CrashType.class)
void killTestDirs(CrashType crashType, TestInfo testInfo) throws Exception {
var barrier = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(3000);
Log.info("Killing");
lazyFs1.crash();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
lazyFs1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
switch (crashType) {
case CRASH -> lazyFs1.start();
case TORN_OP -> lazyFs1.startTornOp();
case TORN_SEQ -> lazyFs1.startTornSeq();
}
container1.start();
waitingConsumer1 = new WaitingConsumer();
var loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
Log.info("Failed to connect: " + testInfo.getDisplayName());
// Sometimes it doesn't get mounted properly for some reason
Assumptions.assumeTrue(false);
}
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while true; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/2test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
Log.info("Killing");
if (crashType.equals(CrashType.CRASH)) {
Thread.sleep(3000);
lazyFs1.crash();
}
try {
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// Sometimes crash doesn't work
Log.info("Failed to crash: " + testInfo.getDisplayName());
if (crashType.equals(CrashType.CRASH))
throw e;
Assumptions.assumeTrue(false);
}
client.killContainerCmd(container1.getContainerId()).exec();
container1.stop();
lazyFs1.stop();
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
lazyFs1.start();
container1.start();
waitingConsumer1 = new WaitingConsumer();
loggingConsumer1 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("1-" + testInfo.getDisplayName());
container1.followOutput(loggingConsumer1.andThen(waitingConsumer1));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency(testInfo.getDisplayName());
}
@ParameterizedTest
@EnumSource(CrashType.class)
void killTest2(CrashType crashType, TestInfo testInfo) throws Exception {
var barrier = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting1 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test1; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(3000);
Log.info("Killing");
lazyFs2.crash();
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting1");
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
lazyFs2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
switch (crashType) {
case CRASH -> lazyFs2.start();
case TORN_OP -> lazyFs2.startTornOp();
case TORN_SEQ -> lazyFs2.startTornSeq();
}
container2.start();
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
Log.info("Failed to connect: " + testInfo.getDisplayName());
// Sometimes it doesn't get mounted properly for some reason
Assumptions.assumeTrue(false);
}
var barrier2 = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier2.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting2 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test2; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier2.await();
Log.info("Killing");
Thread.sleep(3000);
if (crashType.equals(CrashType.CRASH)) {
lazyFs2.crash();
}
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting2");
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// Sometimes crash doesn't work
Log.info("Failed to crash: " + testInfo.getDisplayName());
if (crashType.equals(CrashType.CRASH))
throw e;
Assumptions.assumeTrue(false);
}
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
lazyFs2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
lazyFs2.start();
container2.start();
waitingConsumer2 = new WaitingConsumer();
loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency(testInfo.getDisplayName());
}
@ParameterizedTest
@EnumSource(CrashType.class)
void killTestDirs2(CrashType crashType, TestInfo testInfo) throws Exception {
var barrier = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting1 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier.await();
Thread.sleep(3000);
Log.info("Killing");
lazyFs2.crash();
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting1");
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
var client = DockerClientFactory.instance().client();
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
lazyFs2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
switch (crashType) {
case CRASH -> lazyFs2.start();
case TORN_OP -> lazyFs2.startTornOp();
case TORN_SEQ -> lazyFs2.startTornSeq();
}
container2.start();
waitingConsumer2 = new WaitingConsumer();
var loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
Log.info("Failed to connect: " + testInfo.getDisplayName());
// Sometimes it doesn't get mounted properly for some reason
Assumptions.assumeTrue(false);
}
var barrier2 = new CountDownLatch(1);
executor.submit(() -> {
try {
Log.info("Writing to container 1");
barrier2.countDown();
container1.execInContainer("/bin/sh", "-c", "counter=0; while [ ! -f /tmp/stopprinting2 ]; do counter=`expr $counter + 1`; echo $counter >> /dhfs_test/fuse/2test$counter; done");
} catch (Exception e) {
throw new RuntimeException(e);
}
});
barrier2.await();
Thread.sleep(3000);
Log.info("Killing");
if (crashType.equals(CrashType.CRASH)) {
lazyFs2.crash();
}
container1.execInContainer("/bin/sh", "-c", "touch /tmp/stopprinting2");
try {
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Caused by: org.lmdbjava"), 60, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// Sometimes crash doesn't work
Log.info("Failed to crash: " + testInfo.getDisplayName());
if (crashType.equals(CrashType.CRASH))
throw e;
Assumptions.assumeTrue(false);
}
client.killContainerCmd(container2.getContainerId()).exec();
container2.stop();
lazyFs2.stop();
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Lost connection to"), 60, TimeUnit.SECONDS);
Log.info("Restart");
lazyFs2.start();
container2.start();
waitingConsumer2 = new WaitingConsumer();
loggingConsumer2 = new Slf4jLogConsumer(LoggerFactory.getLogger(LazyFsIT.class)).withPrefix("2-" + testInfo.getDisplayName());
container2.followOutput(loggingConsumer2.andThen(waitingConsumer2));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
checkConsistency(testInfo.getDisplayName());
}
private static enum CrashType {
CRASH,
TORN_OP,
TORN_SEQ
}
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs.integration;
package com.usatiuk.dhfsapp.integration;
import com.github.dockerjava.api.model.Device;
import org.junit.jupiter.api.*;
@@ -29,9 +29,11 @@ public class ResyncIT {
String c1uuid;
String c2uuid;
Network network;
@BeforeEach
void setup(TestInfo testInfo) throws IOException, InterruptedException, TimeoutException {
Network network = Network.newNetwork();
network = Network.newNetwork();
container1 = new GenericContainer<>(DhfsImage.getInstance())
.withPrivilegedMode(true)
@@ -55,81 +57,118 @@ public class ResyncIT {
@AfterEach
void stop() {
Stream.of(container1, container2).parallel().forEach(GenericContainer::stop);
network.close();
}
@Test
void readWriteFileTest() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /root/dhfs_default/fuse/testf1").getExitCode());
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty > /dhfs_test/fuse/testf1").getExitCode());
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/fuse/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testf1").getStdout()));
}
@Test
void manyFiles() throws IOException, InterruptedException, TimeoutException {
var ret = container1.execInContainer("/bin/sh", "-c", "for i in $(seq 1 200); do echo $i > /root/dhfs_default/fuse/test$i; done");
var ret = container1.execInContainer("/bin/sh", "-c", "for i in $(seq 1 200); do echo $i > /dhfs_test/fuse/test$i; done");
Assertions.assertEquals(0, ret.getExitCode());
var foundWc = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l");
var foundWc = container1.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -type f | wc -l");
Assertions.assertEquals(200, Integer.valueOf(foundWc.getStdout().strip()));
ret = container2.execInContainer("/bin/sh", "-c", "for i in $(seq 1 200); do echo $i > /root/dhfs_default/fuse/test-2-$i; done");
ret = container2.execInContainer("/bin/sh", "-c", "for i in $(seq 1 200); do echo $i > /dhfs_test/fuse/test-2-$i; done");
Assertions.assertEquals(0, ret.getExitCode());
foundWc = container2.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l");
foundWc = container2.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -type f | wc -l");
Assertions.assertEquals(200, Integer.valueOf(foundWc.getStdout().strip()));
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /root/dhfs_default/data/stuff/self_uuid").getStdout();
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Ignoring new address"), 60, TimeUnit.SECONDS);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c2uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{\"uuid\":\"" + c1uuid + "\"}' " +
" http://localhost:8080/objects-manage/known-peers");
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() -> {
var foundWc2 = container2.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l");
await().atMost(120, TimeUnit.SECONDS).until(() -> {
var foundWc2 = container2.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -type f | wc -l");
return 400 == Integer.valueOf(foundWc2.getStdout().strip());
});
await().atMost(45, TimeUnit.SECONDS).until(() -> {
var foundWc2 = container1.execInContainer("/bin/sh", "-c", "find /root/dhfs_default/fuse -type f | wc -l");
await().atMost(120, TimeUnit.SECONDS).until(() -> {
var foundWc2 = container1.execInContainer("/bin/sh", "-c", "find /dhfs_test/fuse -type f | wc -l");
return 400 == Integer.valueOf(foundWc2.getStdout().strip());
});
}
@Test
void folderAfterMove() throws IOException, InterruptedException, TimeoutException {
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mkdir /dhfs_test/fuse/testd1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty1 > /dhfs_test/fuse/testd1/testf1").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "mv /dhfs_test/fuse/testd1 /dhfs_test/fuse/testd2").getExitCode());
await().atMost(45, TimeUnit.SECONDS).until(() -> 0 == container1.execInContainer("/bin/sh", "-c", "echo tesempty2 > /dhfs_test/fuse/testd2/testf2").getExitCode());
c1uuid = container1.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
c2uuid = container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/data/stuff/self_uuid").getStdout();
Assertions.assertDoesNotThrow(() -> UUID.fromString(c1uuid));
Assertions.assertDoesNotThrow(() -> UUID.fromString(c2uuid));
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("New address"), 60, TimeUnit.SECONDS);
var c1curl = container1.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c2uuid);
var c2curl = container2.execInContainer("/bin/sh", "-c",
"curl --header \"Content-Type: application/json\" " +
" --request PUT " +
" --data '{}' " +
" http://localhost:8080/peers-manage/known-peers/" + c1uuid);
waitingConsumer2.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
waitingConsumer1.waitUntil(frame -> frame.getUtf8String().contains("Connected"), 60, TimeUnit.SECONDS);
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty1\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testd2/testf1").getStdout()));
await().atMost(45, TimeUnit.SECONDS).until(() -> "tesempty2\n".equals(container2.execInContainer("/bin/sh", "-c", "cat /dhfs_test/fuse/testd2/testf2").getStdout()));
}
}

View File

@@ -5,7 +5,6 @@ dhfs.objects.ref_verification=true
dhfs.objects.deletion.delay=0
quarkus.log.category."com.usatiuk.dhfs".level=TRACE
quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE
quarkus.class-loading.parent-first-artifacts=com.usatiuk.dhfs:supportlib
quarkus.http.test-port=0
quarkus.http.test-ssl-port=0
dhfs.local-discovery=false

View File

@@ -31,6 +31,7 @@ services:
- ./target/quarkus-app:/app
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
--add-exports java.base/jdk.internal.access=ALL-UNNAMED
--add-opens=java.base/java.nio=ALL-UNNAMED
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
-Ddhfs.objects.root=/dhfs_root/d
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0

164
dhfs-parent/dhfs-fs/pom.xml Normal file
View File

@@ -0,0 +1,164 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>dhfs-fs</artifactId>
<version>1.0-SNAPSHOT</version>
<parent>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<dependencies>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcprov-jdk18on</artifactId>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcpkix-jdk18on</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-security</artifactId>
</dependency>
<dependency>
<groupId>net.openhft</groupId>
<artifactId>zero-allocation-hashing</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-grpc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-arc</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-client</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-client-jsonb</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-rest-jsonb</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-scheduler</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
</dependency>
<dependency>
<groupId>org.jboss.slf4j</groupId>
<artifactId>slf4j-jboss-logmanager</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
</dependency>
<dependency>
<groupId>org.pcollections</groupId>
<artifactId>pcollections</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math3</artifactId>
<version>3.6.1</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>sync-base</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<forkCount>1C</forkCount>
<reuseForks>false</reuseForks>
<parallel>classes</parallel>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
false
</junit.jupiter.execution.parallel.enabled>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<configuration>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
true
</junit.jupiter.execution.parallel.enabled>
<junit.jupiter.execution.parallel.mode.default>
concurrent
</junit.jupiter.execution.parallel.mode.default>
<junit.jupiter.execution.parallel.config.dynamic.factor>
0.5
</junit.jupiter.execution.parallel.config.dynamic.factor>
<junit.platform.output.capture.stdout>true</junit.platform.output.capture.stdout>
<junit.platform.output.capture.stderr>true</junit.platform.output.capture.stderr>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>
<groupId>${quarkus.platform.group-id}</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.platform.version}</version>
<extensions>true</extensions>
<executions>
<execution>
<id>quarkus-plugin</id>
<goals>
<goal>generate-code</goal>
<goal>generate-code-tests</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@@ -0,0 +1,13 @@
package com.usatiuk.dhfsfs.objects;
import com.google.protobuf.ByteString;
import com.usatiuk.dhfs.remoteobj.JDataRemote;
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
import com.usatiuk.objects.JObjectKey;
public record ChunkData(JObjectKey key, ByteString data) implements JDataRemote, JDataRemoteDto {
@Override
public int estimateSize() {
return data.size();
}
}

View File

@@ -0,0 +1,26 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.ProtoSerializer;
import com.usatiuk.dhfs.persistence.ChunkDataP;
import com.usatiuk.dhfs.persistence.JObjectKeyP;
import com.usatiuk.objects.JObjectKey;
import jakarta.inject.Singleton;
@Singleton
public class ChunkDataProtoSerializer implements ProtoSerializer<ChunkDataP, ChunkData> {
@Override
public ChunkData deserialize(ChunkDataP message) {
return new ChunkData(
JObjectKey.of(message.getKey().getName()),
message.getData()
);
}
@Override
public ChunkDataP serialize(ChunkData object) {
return ChunkDataP.newBuilder()
.setKey(JObjectKeyP.newBuilder().setName(object.key().value()).build())
.setData(object.data())
.build();
}
}

View File

@@ -0,0 +1,51 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.jmap.JMapHolder;
import com.usatiuk.dhfs.jmap.JMapLongKey;
import com.usatiuk.dhfs.remoteobj.JDataRemote;
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
import com.usatiuk.objects.JObjectKey;
import java.util.Collection;
import java.util.Set;
public record File(JObjectKey key, long mode, long cTime, long mTime,
boolean symlink
) implements JDataRemote, JMapHolder<JMapLongKey> {
public File withSymlink(boolean symlink) {
return new File(key, mode, cTime, mTime, symlink);
}
public File withMode(long mode) {
return new File(key, mode, cTime, mTime, symlink);
}
public File withCTime(long cTime) {
return new File(key, mode, cTime, mTime, symlink);
}
public File withMTime(long mTime) {
return new File(key, mode, cTime, mTime, symlink);
}
public File withCurrentMTime() {
return new File(key, mode, cTime, System.currentTimeMillis(), symlink);
}
@Override
public Collection<JObjectKey> collectRefsTo() {
return Set.of();
// return Set.copyOf(chunks().values());
}
@Override
public int estimateSize() {
return 64;
// return chunks.size() * 64;
}
@Override
public Class<? extends JDataRemoteDto> dtoClass() {
return FileDto.class;
}
}

View File

@@ -0,0 +1,15 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.remoteobj.JDataRemote;
import com.usatiuk.dhfs.remoteobj.JDataRemoteDto;
import com.usatiuk.objects.JObjectKey;
import org.apache.commons.lang3.tuple.Pair;
import java.util.List;
public record FileDto(File file, List<Pair<Long, JObjectKey>> chunks) implements JDataRemoteDto {
@Override
public Class<? extends JDataRemote> objClass() {
return File.class;
}
}

View File

@@ -0,0 +1,24 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.syncmap.DtoMapper;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
@ApplicationScoped
public class FileDtoMapper implements DtoMapper<File, FileDto> {
@Inject
JMapHelper jMapHelper;
@Inject
FileHelper fileHelper;
@Override
public FileDto toDto(File obj) {
return new FileDto(obj, fileHelper.getChunks(obj));
}
@Override
public File fromDto(FileDto dto) {
throw new UnsupportedOperationException();
}
}

View File

@@ -0,0 +1,36 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.jmap.JMapLongKey;
import com.usatiuk.objects.JObjectKey;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import java.util.ArrayList;
import java.util.List;
@ApplicationScoped
public class FileHelper {
@Inject
JMapHelper jMapHelper;
public List<Pair<Long, JObjectKey>> getChunks(File file) {
ArrayList<Pair<Long, JObjectKey>> chunks = new ArrayList<>();
try (var it = jMapHelper.getIterator(file)) {
while (it.hasNext()) {
var cur = it.next();
chunks.add(Pair.of(cur.getKey().key(), cur.getValue().ref()));
}
}
return List.copyOf(chunks);
}
public void replaceChunks(File file, List<Pair<Long, JObjectKey>> chunks) {
jMapHelper.deleteAll(file);
for (var f : chunks) {
jMapHelper.put(file, JMapLongKey.of(f.getLeft()), f.getRight());
}
}
}

View File

@@ -0,0 +1,25 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.ProtoSerializer;
import com.usatiuk.dhfs.persistence.FileDtoP;
import com.usatiuk.utils.SerializationHelper;
import jakarta.inject.Singleton;
import java.io.IOException;
@Singleton
public class FileProtoSerializer implements ProtoSerializer<FileDtoP, FileDto> {
@Override
public FileDto deserialize(FileDtoP message) {
try (var is = message.getSerializedData().newInput()) {
return SerializationHelper.deserialize(is);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public FileDtoP serialize(FileDto object) {
return FileDtoP.newBuilder().setSerializedData(SerializationHelper.serialize(object)).build();
}
}

View File

@@ -0,0 +1,236 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.peersync.PeerId;
import com.usatiuk.dhfs.peersync.PersistentPeerDataService;
import com.usatiuk.dhfs.remoteobj.*;
import com.usatiuk.dhfsfs.service.DhfsFileService;
import com.usatiuk.kleppmanntree.AlreadyExistsException;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.transaction.LockingStrategy;
import com.usatiuk.objects.transaction.Transaction;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.quarkus.logging.Log;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import org.pcollections.HashPMap;
import org.pcollections.HashTreePMap;
import org.pcollections.PMap;
import javax.annotation.Nullable;
import java.util.List;
import java.util.Objects;
@ApplicationScoped
public class FileSyncHandler implements ObjSyncHandler<File, FileDto> {
@Inject
Transaction curTx;
@Inject
PersistentPeerDataService persistentPeerDataService;
@Inject
JMapHelper jMapHelper;
@Inject
RemoteTransaction remoteTx;
@Inject
FileHelper fileHelper;
@Inject
JKleppmannTreeManager jKleppmannTreeManager;
@Inject
DhfsFileService fileService;
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs")).orElseThrow();
}
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC).orElseThrow();
}
private void resolveConflict(PeerId from, JObjectKey key, PMap<PeerId, Long> receivedChangelog,
@Nullable FileDto receivedData) {
var oursCurMeta = curTx.get(RemoteObjectMeta.class, key).orElse(null);
if (!oursCurMeta.knownType().isAssignableFrom(File.class))
throw new IllegalStateException("Object type mismatch: " + oursCurMeta.knownType() + " vs " + File.class);
if (!oursCurMeta.knownType().equals(File.class))
oursCurMeta = oursCurMeta.withKnownType(File.class);
curTx.put(oursCurMeta);
var oursCurFile = remoteTx.getDataLocal(File.class, key).orElse(null);
if (oursCurFile == null)
throw new StatusRuntimeException(Status.ABORTED.withDescription("Conflict but we don't have local copy"));
var theirsFile = receivedData.file();
var oursChunks = fileHelper.getChunks(oursCurFile);
File first;
File second;
List<Pair<Long, JObjectKey>> firstChunks;
List<Pair<Long, JObjectKey>> secondChunks;
PeerId otherHostname;
if (oursCurFile.mTime() >= theirsFile.mTime()) {
first = oursCurFile;
firstChunks = oursChunks;
second = theirsFile;
secondChunks = receivedData.chunks();
otherHostname = from;
} else {
second = oursCurFile;
secondChunks = oursChunks;
first = theirsFile;
firstChunks = receivedData.chunks();
otherHostname = persistentPeerDataService.getSelfUuid();
}
Log.tracev("Conflict resolution: ours: {0}, theirs: {1}, chunks: {2}, {3}", oursCurFile, theirsFile, oursChunks, receivedData.chunks());
Log.tracev("Conflict resolution: first: {0}, second: {1}, chunks: {2}, {3}", first, second, firstChunks, secondChunks);
HashPMap<PeerId, Long> newChangelog = HashTreePMap.from(oursCurMeta.changelog());
for (var entry : receivedChangelog.entrySet()) {
newChangelog = newChangelog.plus(entry.getKey(),
Long.max(newChangelog.getOrDefault(entry.getKey(), 0L), entry.getValue())
);
}
oursCurMeta = oursCurMeta.withChangelog(newChangelog);
curTx.put(oursCurMeta);
boolean chunksDiff = !Objects.equals(firstChunks, secondChunks);
boolean wasChanged = first.mTime() != second.mTime()
|| first.cTime() != second.cTime()
|| first.mode() != second.mode()
|| first.symlink() != second.symlink()
|| chunksDiff;
if (wasChanged) {
oursCurMeta = oursCurMeta.withChangelog(
newChangelog.plus(persistentPeerDataService.getSelfUuid(), newChangelog.getOrDefault(persistentPeerDataService.getSelfUuid(), 0L) + 1)
);
curTx.put(oursCurMeta);
remoteTx.putDataRaw(oursCurFile.withCTime(first.cTime()).withMTime(first.mTime()).withMode(first.mode()).withSymlink(first.symlink()));
fileHelper.replaceChunks(oursCurFile, firstChunks);
var newFile = new File(JObjectKey.random(), second.mode(), second.cTime(), second.mTime(), second.symlink());
remoteTx.putData(newFile);
fileHelper.replaceChunks(newFile, secondChunks);
var parent = fileService.inoToParent(oursCurFile.key());
int i = 0;
do {
try {
getTreeW().move(parent.getRight(),
new JKleppmannTreeNodeMetaFile(
parent.getLeft() + ".fconflict." + persistentPeerDataService.getSelfUuid() + "." + otherHostname.toString() + "." + i,
newFile.key()
),
getTreeW().getNewNodeId()
);
} catch (AlreadyExistsException aex) {
i++;
continue;
}
break;
} while (true);
}
var curKnownRemoteVersion = oursCurMeta.knownRemoteVersions().get(from);
var receivedTotalVer = receivedChangelog.values().stream().mapToLong(Long::longValue).sum();
if (curKnownRemoteVersion == null || curKnownRemoteVersion < receivedTotalVer) {
oursCurMeta = oursCurMeta.withKnownRemoteVersions(oursCurMeta.knownRemoteVersions().plus(from, receivedTotalVer));
curTx.put(oursCurMeta);
}
}
@Override
public void handleRemoteUpdate(PeerId from, JObjectKey key, PMap<PeerId, Long> receivedChangelog,
@Nullable FileDto receivedData) {
var current = curTx.get(RemoteObjectMeta.class, key).orElse(null);
if (current == null) {
current = new RemoteObjectMeta(key, HashTreePMap.empty());
curTx.put(current);
}
var changelogCompare = SyncHelper.compareChangelogs(current.changelog(), receivedChangelog);
switch (changelogCompare) {
case EQUAL -> {
Log.debug("No action on update: " + key + " from " + from);
if (!current.hasLocalData() && receivedData != null) {
current = current.withHaveLocal(true);
curTx.put(current);
curTx.put(curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(current.key()))
.map(w -> w.withData(receivedData.file())).orElse(new RemoteObjectDataWrapper<>(receivedData.file())));
if (!current.knownType().isAssignableFrom(File.class))
throw new IllegalStateException("Object type mismatch: " + current.knownType() + " vs " + File.class);
if (!current.knownType().equals(File.class))
current = current.withKnownType(File.class);
curTx.put(current);
fileHelper.replaceChunks(receivedData.file(), receivedData.chunks());
}
}
case NEWER -> {
Log.debug("Received newer index update than known: " + key + " from " + from);
var newChangelog = receivedChangelog.containsKey(persistentPeerDataService.getSelfUuid()) ?
receivedChangelog : receivedChangelog.plus(persistentPeerDataService.getSelfUuid(), 0L);
current = current.withChangelog(newChangelog);
if (receivedData != null) {
current = current.withHaveLocal(true);
curTx.put(current);
curTx.put(curTx.get(RemoteObjectDataWrapper.class, RemoteObjectMeta.ofDataKey(current.key()))
.map(w -> w.withData(receivedData.file())).orElse(new RemoteObjectDataWrapper<>(receivedData.file())));
if (!current.knownType().isAssignableFrom(File.class))
throw new IllegalStateException("Object type mismatch: " + current.knownType() + " vs " + File.class);
if (!current.knownType().equals(File.class))
current = current.withKnownType(File.class);
curTx.put(current);
fileHelper.replaceChunks(receivedData.file(), receivedData.chunks());
} else {
current = current.withHaveLocal(false);
curTx.put(current);
}
}
case OLDER -> {
Log.debug("Received older index update than known: " + key + " from " + from);
return;
}
case CONFLICT -> {
Log.debug("Conflict on update (inconsistent version): " + key + " from " + from);
assert receivedData != null;
resolveConflict(from, key, receivedChangelog, receivedData);
// TODO:
return;
}
}
var curKnownRemoteVersion = current.knownRemoteVersions().get(from);
var receivedTotalVer = receivedChangelog.values().stream().mapToLong(Long::longValue).sum();
if (curKnownRemoteVersion == null || curKnownRemoteVersion < receivedTotalVer) {
current = current.withKnownRemoteVersions(current.knownRemoteVersions().plus(from, receivedTotalVer));
curTx.put(current);
}
}
}

View File

@@ -0,0 +1,18 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
import com.usatiuk.objects.JObjectKey;
import java.util.Collection;
import java.util.List;
public record JKleppmannTreeNodeMetaDirectory(String name) implements JKleppmannTreeNodeMeta {
public JKleppmannTreeNodeMeta withName(String name) {
return new JKleppmannTreeNodeMetaDirectory(name);
}
@Override
public Collection<JObjectKey> collectRefsTo() {
return List.of();
}
}

View File

@@ -0,0 +1,19 @@
package com.usatiuk.dhfsfs.objects;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
import com.usatiuk.objects.JObjectKey;
import java.util.Collection;
import java.util.List;
public record JKleppmannTreeNodeMetaFile(String name, JObjectKey fileIno) implements JKleppmannTreeNodeMeta {
@Override
public JKleppmannTreeNodeMeta withName(String name) {
return new JKleppmannTreeNodeMetaFile(name, fileIno);
}
@Override
public Collection<JObjectKey> collectRefsTo() {
return List.of(fileIno);
}
}

View File

@@ -1,9 +1,8 @@
package com.usatiuk.dhfs.files.service;
package com.usatiuk.dhfsfs.service;
import com.google.protobuf.ByteString;
import com.google.protobuf.UnsafeByteOperations;
import com.usatiuk.dhfs.files.objects.File;
import com.usatiuk.dhfs.objects.JObjectKey;
import com.usatiuk.objects.JObjectKey;
import org.apache.commons.lang3.tuple.Pair;
import java.util.Optional;
@@ -29,11 +28,9 @@ public interface DhfsFileService {
Iterable<String> readDir(String name);
void updateFileSize(File file);
long size(JObjectKey fileUuid);
Long size(JObjectKey f);
Optional<ByteString> read(JObjectKey fileUuid, long offset, int length);
ByteString read(JObjectKey fileUuid, long offset, int length);
Long write(JObjectKey fileUuid, long offset, ByteString data);

View File

@@ -1,22 +1,28 @@
package com.usatiuk.dhfs.files.service;
package com.usatiuk.dhfsfs.service;
import com.google.protobuf.ByteString;
import com.google.protobuf.UnsafeByteOperations;
import com.usatiuk.dhfs.files.objects.ChunkData;
import com.usatiuk.dhfs.files.objects.File;
import com.usatiuk.dhfs.objects.*;
import com.usatiuk.dhfs.objects.jkleppmanntree.JKleppmannTreeManager;
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNode;
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaDirectory;
import com.usatiuk.dhfs.objects.jkleppmanntree.structs.JKleppmannTreeNodeMetaFile;
import com.usatiuk.dhfs.objects.jmap.JMapEntry;
import com.usatiuk.dhfs.objects.jmap.JMapHelper;
import com.usatiuk.dhfs.objects.jmap.JMapLongKey;
import com.usatiuk.dhfs.objects.persistence.IteratorStart;
import com.usatiuk.dhfs.objects.transaction.LockingStrategy;
import com.usatiuk.dhfs.objects.transaction.Transaction;
import com.usatiuk.dhfs.utils.StatusRuntimeExceptionNoStacktrace;
import com.usatiuk.dhfs.jkleppmanntree.JKleppmannTreeManager;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNode;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeHolder;
import com.usatiuk.dhfs.jkleppmanntree.structs.JKleppmannTreeNodeMeta;
import com.usatiuk.dhfs.jmap.JMapEntry;
import com.usatiuk.dhfs.jmap.JMapHelper;
import com.usatiuk.dhfs.jmap.JMapLongKey;
import com.usatiuk.dhfs.remoteobj.JDataRemote;
import com.usatiuk.dhfs.remoteobj.RemoteObjectMeta;
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
import com.usatiuk.dhfsfs.objects.ChunkData;
import com.usatiuk.dhfsfs.objects.File;
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaDirectory;
import com.usatiuk.dhfsfs.objects.JKleppmannTreeNodeMetaFile;
import com.usatiuk.objects.JData;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.objects.iterators.IteratorStart;
import com.usatiuk.objects.transaction.LockingStrategy;
import com.usatiuk.objects.transaction.Transaction;
import com.usatiuk.objects.transaction.TransactionManager;
import com.usatiuk.utils.StatusRuntimeExceptionNoStacktrace;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.quarkus.logging.Log;
@@ -28,6 +34,7 @@ import jakarta.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.util.*;
@@ -42,20 +49,14 @@ public class DhfsFileServiceImpl implements DhfsFileService {
@Inject
TransactionManager jObjectTxManager;
@ConfigProperty(name = "dhfs.files.target_chunk_alignment")
int targetChunkAlignment;
@ConfigProperty(name = "dhfs.files.target_chunk_size")
int targetChunkSize;
@ConfigProperty(name = "dhfs.files.write_merge_threshold")
float writeMergeThreshold;
@ConfigProperty(name = "dhfs.files.write_merge_max_chunk_to_take")
float writeMergeMaxChunkToTake;
@ConfigProperty(name = "dhfs.files.write_merge_limit")
float writeMergeLimit;
@ConfigProperty(name = "dhfs.files.write_last_chunk_limit")
float writeLastChunkLimit;
@ConfigProperty(name = "dhfs.files.max_chunk_size", defaultValue = "524288")
int maxChunkSize;
@ConfigProperty(name = "dhfs.files.use_hash_for_chunks")
boolean useHashForChunks;
@@ -75,32 +76,43 @@ public class DhfsFileServiceImpl implements DhfsFileService {
@Inject
JMapHelper jMapHelper;
private JKleppmannTreeManager.JKleppmannTree getTree() {
return jKleppmannTreeManager.getTree(new JObjectKey("fs"));
private JKleppmannTreeManager.JKleppmannTree getTreeW() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), () -> new JKleppmannTreeNodeMetaDirectory(""));
}
private JKleppmannTreeManager.JKleppmannTree getTreeR() {
return jKleppmannTreeManager.getTree(JObjectKey.of("fs"), LockingStrategy.OPTIMISTIC, () -> new JKleppmannTreeNodeMetaDirectory(""));
}
private ChunkData createChunk(ByteString bytes) {
var newChunk = new ChunkData(JObjectKey.of(UUID.randomUUID().toString()), bytes);
remoteTx.putData(newChunk);
remoteTx.putDataNew(newChunk);
return newChunk;
}
void init(@Observes @Priority(500) StartupEvent event) {
Log.info("Initializing file service");
getTree();
getTreeW();
}
private JKleppmannTreeNode getDirEntry(String name) {
var res = getTree().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
private JKleppmannTreeNode getDirEntryW(String name) {
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
var ret = curTx.get(JKleppmannTreeNode.class, res).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
return ret;
}
private JKleppmannTreeNode getDirEntryR(String name) {
var res = getTreeR().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
if (res == null) throw new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND);
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("Tree node exists but not found as jObject: " + name)));
return ret;
}
private Optional<JKleppmannTreeNode> getDirEntryOpt(String name) {
var res = getTree().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
var res = getTreeW().traverse(StreamSupport.stream(Path.of(name).spliterator(), false).map(p -> p.toString()).toList());
if (res == null) return Optional.empty();
var ret = curTx.get(JKleppmannTreeNode.class, res);
var ret = curTx.get(JKleppmannTreeNodeHolder.class, res).map(JKleppmannTreeNodeHolder::node);
return ret;
}
@@ -117,7 +129,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
} else {
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
}
} else if (ref instanceof JKleppmannTreeNode) {
} else if (ref instanceof JKleppmannTreeNodeHolder) {
ret = new GetattrRes(100, 100, 0700, GetattrType.DIRECTORY);
} else {
throw new StatusRuntimeException(Status.DATA_LOSS.withDescription("FsNode is not an FsNode: " + ref.key()));
@@ -130,9 +142,9 @@ public class DhfsFileServiceImpl implements DhfsFileService {
public Optional<JObjectKey> open(String name) {
return jObjectTxManager.executeTx(() -> {
try {
var ret = getDirEntry(name);
var ret = getDirEntryR(name);
return switch (ret.meta()) {
case JKleppmannTreeNodeMetaFile f -> Optional.of(f.getFileIno());
case JKleppmannTreeNodeMetaFile f -> Optional.of(f.fileIno());
case JKleppmannTreeNodeMetaDirectory f -> Optional.of(ret.key());
default -> Optional.empty();
};
@@ -154,7 +166,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
public Optional<JObjectKey> create(String name, long mode) {
return jObjectTxManager.executeTx(() -> {
Path path = Path.of(name);
var parent = getDirEntry(path.getParent().toString());
var parent = getDirEntryW(path.getParent().toString());
ensureDir(parent);
@@ -162,11 +174,11 @@ public class DhfsFileServiceImpl implements DhfsFileService {
var fuuid = UUID.randomUUID();
Log.debug("Creating file " + fuuid);
File f = new File(JObjectKey.of(fuuid.toString()), mode, System.currentTimeMillis(), System.currentTimeMillis(), false, 0);
File f = new File(JObjectKey.of(fuuid.toString()), mode, System.currentTimeMillis(), System.currentTimeMillis(), false);
remoteTx.putData(f);
try {
getTree().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTree().getNewNodeId());
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
} catch (Exception e) {
// fobj.getMeta().removeRef(newNodeId);
throw e;
@@ -179,9 +191,9 @@ public class DhfsFileServiceImpl implements DhfsFileService {
@Override
public Pair<String, JObjectKey> inoToParent(JObjectKey ino) {
return jObjectTxManager.executeTx(() -> {
return getTree().findParent(w -> {
return getTreeW().findParent(w -> {
if (w.meta() instanceof JKleppmannTreeNodeMetaFile f)
return f.getFileIno().equals(ino);
return f.fileIno().equals(ino);
return false;
});
});
@@ -191,14 +203,14 @@ public class DhfsFileServiceImpl implements DhfsFileService {
public void mkdir(String name, long mode) {
jObjectTxManager.executeTx(() -> {
Path path = Path.of(name);
var parent = getDirEntry(path.getParent().toString());
var parent = getDirEntryW(path.getParent().toString());
ensureDir(parent);
String dname = path.getFileName().toString();
Log.debug("Creating directory " + name);
getTree().move(parent.key(), new JKleppmannTreeNodeMetaDirectory(dname), getTree().getNewNodeId());
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaDirectory(dname), getTreeW().getNewNodeId());
});
}
@@ -206,25 +218,27 @@ public class DhfsFileServiceImpl implements DhfsFileService {
public void unlink(String name) {
jObjectTxManager.executeTx(() -> {
var node = getDirEntryOpt(name).orElse(null);
if (node == null)
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to unlink: " + name));
if (node.meta() instanceof JKleppmannTreeNodeMetaDirectory f) {
if (!allowRecursiveDelete && !node.children().isEmpty())
throw new DirectoryNotEmptyException();
}
getTree().trash(node.meta(), node.key());
getTreeW().trash(node.meta(), node.key());
});
}
@Override
public Boolean rename(String from, String to) {
return jObjectTxManager.executeTx(() -> {
var node = getDirEntry(from);
var node = getDirEntryW(from);
JKleppmannTreeNodeMeta meta = node.meta();
var toPath = Path.of(to);
var toDentry = getDirEntry(toPath.getParent().toString());
var toDentry = getDirEntryW(toPath.getParent().toString());
ensureDir(toDentry);
getTree().move(toDentry.key(), meta.withName(toPath.getFileName().toString()), node.key());
getTreeW().move(toDentry.key(), meta.withName(toPath.getFileName().toString()), node.key());
return true;
});
}
@@ -234,12 +248,12 @@ public class DhfsFileServiceImpl implements DhfsFileService {
return jObjectTxManager.executeTx(() -> {
var dent = curTx.get(JData.class, uuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
if (dent instanceof JKleppmannTreeNode) {
if (dent instanceof JKleppmannTreeNodeHolder) {
return true;
} else if (dent instanceof RemoteObjectMeta) {
var remote = remoteTx.getData(JDataRemote.class, uuid).orElse(null);
if (remote instanceof File f) {
remoteTx.putData(f.withMode(mode).withMTime(System.currentTimeMillis()));
remoteTx.putData(f.withMode(mode).withCurrentMTime());
return true;
} else {
throw new IllegalArgumentException(uuid + " is not a file");
@@ -253,7 +267,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
@Override
public Iterable<String> readDir(String name) {
return jObjectTxManager.executeTx(() -> {
var found = getDirEntry(name);
var found = getDirEntryW(name);
if (!(found.meta() instanceof JKleppmannTreeNodeMetaDirectory md))
throw new StatusRuntimeException(Status.INVALID_ARGUMENT);
@@ -263,7 +277,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
}
@Override
public Optional<ByteString> read(JObjectKey fileUuid, long offset, int length) {
public ByteString read(JObjectKey fileUuid, long offset, int length) {
return jObjectTxManager.executeTx(() -> {
if (length < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Length should be more than zero: " + length));
@@ -273,12 +287,12 @@ public class DhfsFileServiceImpl implements DhfsFileService {
var file = remoteTx.getData(File.class, fileUuid).orElse(null);
if (file == null) {
Log.error("File not found when trying to read: " + fileUuid);
return Optional.empty();
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to read: " + fileUuid));
}
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
if (!it.hasNext())
return Optional.of(ByteString.empty());
return ByteString.empty();
// if (it.peekNextKey().key() != offset) {
// Log.warnv("Read over the end of file: {0} {1} {2}, next chunk: {3}", fileUuid, offset, length, it.peekNextKey());
@@ -316,10 +330,10 @@ public class DhfsFileServiceImpl implements DhfsFileService {
chunk = it.next();
}
return Optional.of(buf);
return buf;
} catch (Exception e) {
Log.error("Error reading file: " + fileUuid, e);
return Optional.empty();
throw new StatusRuntimeException(Status.INTERNAL.withDescription("Error reading file: " + fileUuid));
}
});
}
@@ -339,22 +353,8 @@ public class DhfsFileServiceImpl implements DhfsFileService {
return readChunk(uuid).size();
}
private void cleanupChunks(File f, Collection<JObjectKey> uuids) {
// FIXME:
// var inFile = useHashForChunks ? new HashSet<>(f.getChunks().values()) : Collections.emptySet();
// for (var cuuid : uuids) {
// try {
// if (inFile.contains(cuuid)) continue;
// jObjectManager.get(cuuid)
// .ifPresent(jObject -> jObject.runWriteLocked(JObjectManager.ResolutionStrategy.NO_RESOLUTION,
// (m, d, b, v) -> {
// m.removeRef(f.getName());
// return null;
// }));
// } catch (Exception e) {
// Log.error("Error when cleaning chunk " + cuuid, e);
// }
// }
private long alignDown(long num, long n) {
return num & -(1L << n);
}
@Override
@@ -363,161 +363,76 @@ public class DhfsFileServiceImpl implements DhfsFileService {
if (offset < 0)
throw new StatusRuntimeException(Status.INVALID_ARGUMENT.withDescription("Offset should be more than zero: " + offset));
// FIXME:
var file = remoteTx.getData(File.class, fileUuid, LockingStrategy.WRITE).orElse(null);
if (file == null) {
Log.error("File not found when trying to write: " + fileUuid);
return -1L;
throw new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to write: " + fileUuid));
}
if (writeLogging) {
Log.info("Writing to file: " + file.key() + " size=" + size(fileUuid) + " "
+ offset + " " + data.size());
}
Map<Long, JObjectKey> removedChunks = new HashMap<>();
if (size(fileUuid) < offset) {
truncate(fileUuid, offset);
file = remoteTx.getData(File.class, fileUuid).orElse(null);
}
long realOffset = targetChunkAlignment >= 0 ? alignDown(offset, targetChunkAlignment) : offset;
long writeEnd = offset + data.size();
long start = realOffset;
long existingEnd = 0;
ByteString pendingPrefix = ByteString.empty();
ByteString pendingSuffix = ByteString.empty();
Pair<JMapLongKey, JMapEntry<JMapLongKey>> first;
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
Log.tracev("Getting last");
try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.of(offset + data.size()))) {
last = it.hasNext() ? it.next() : null;
Log.tracev("Last: {0}", last);
}
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(realOffset))) {
while (it.hasNext()) {
var curEntry = it.next();
long curChunkStart = curEntry.getKey().key();
var curChunkId = curEntry.getValue().ref();
long curChunkEnd = it.hasNext() ? it.peekNextKey().key() : curChunkStart + getChunkSize(curChunkId);
existingEnd = curChunkEnd;
if (curChunkEnd <= realOffset) break;
NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
removedChunks.put(curEntry.getKey().key(), curChunkId);
long start = 0;
if (curChunkStart < offset) {
if (curChunkStart < start)
start = curChunkStart;
try (var it = jMapHelper.getIterator(file, IteratorStart.LE, JMapLongKey.of(offset))) {
first = it.hasNext() ? it.next() : null;
Log.tracev("First: {0}", first);
boolean empty = last == null;
if (first != null && getChunkSize(first.getValue().ref()) + first.getKey().key() <= offset) {
first = null;
last = null;
start = offset;
} else if (!empty) {
assert first != null;
removedChunks.put(first.getKey().key(), first.getValue().ref());
while (it.hasNext() && it.peekNextKey().compareTo(last.getKey()) <= 0) {
var next = it.next();
Log.tracev("Next: {0}", next);
removedChunks.put(next.getKey().key(), next.getValue().ref());
var readChunk = readChunk(curChunkId);
pendingPrefix = pendingPrefix.concat(readChunk.substring(0, Math.min(readChunk.size(), (int) (offset - curChunkStart))));
}
removedChunks.put(last.getKey().key(), last.getValue().ref());
start = first.getKey().key();
if (curChunkEnd > writeEnd) {
var readChunk = readChunk(curChunkId);
pendingSuffix = pendingSuffix.concat(readChunk.substring((int) (writeEnd - curChunkStart), readChunk.size()));
}
if (curChunkEnd >= writeEnd) break;
}
}
// NavigableMap<Long, JObjectKey> beforeFirst = first != null ? chunksAll.headMap(first.getKey(), false) : Collections.emptyNavigableMap();
// NavigableMap<Long, JObjectKey> afterLast = last != null ? chunksAll.tailMap(last.getKey(), false) : Collections.emptyNavigableMap();
Map<Long, JObjectKey> newChunks = new HashMap<>();
// if (first != null && (getChunkSize(first.getValue()) + first.getKey() <= offset)) {
// beforeFirst = chunksAll;
// afterLast = Collections.emptyNavigableMap();
// first = null;
// last = null;
// start = offset;
// } else if (!chunksAll.isEmpty()) {
// var between = chunksAll.subMap(first.getKey(), true, last.getKey(), true);
// removedChunks.putAll(between);
// start = first.getKey();
// }
ByteString pendingWrites = ByteString.empty();
if (first != null && first.getKey().key() < offset) {
var chunkBytes = readChunk(first.getValue().ref());
pendingWrites = pendingWrites.concat(chunkBytes.substring(0, (int) (offset - first.getKey().key())));
}
pendingWrites = pendingWrites.concat(data);
if (last != null) {
var lchunkBytes = readChunk(last.getValue().ref());
if (last.getKey().key() + lchunkBytes.size() > offset + data.size()) {
var startInFile = offset + data.size();
var startInChunk = startInFile - last.getKey().key();
pendingWrites = pendingWrites.concat(lchunkBytes.substring((int) startInChunk, lchunkBytes.size()));
if (existingEnd < offset) {
if (!pendingPrefix.isEmpty()) {
int diff = Math.toIntExact(offset - existingEnd);
pendingPrefix = pendingPrefix.concat(UnsafeByteOperations.unsafeWrap(ByteBuffer.allocateDirect(diff)));
} else {
fillZeros(existingEnd, offset, newChunks);
start = offset;
}
}
ByteString pendingWrites = pendingPrefix.concat(data).concat(pendingSuffix);
int combinedSize = pendingWrites.size();
if (targetChunkSize > 0) {
// if (combinedSize < (targetChunkSize * writeMergeThreshold)) {
// boolean leftDone = false;
// boolean rightDone = false;
// while (!leftDone && !rightDone) {
// if (beforeFirst.isEmpty()) leftDone = true;
// if (!beforeFirst.isEmpty() || !leftDone) {
// var takeLeft = beforeFirst.lastEntry();
//
// var cuuid = takeLeft.getValue();
//
// if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) {
// leftDone = true;
// continue;
// }
//
// if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) {
// leftDone = true;
// continue;
// }
//
// // FIXME: (and test this)
// beforeFirst = beforeFirst.headMap(takeLeft.getKey(), false);
// start = takeLeft.getKey();
// pendingWrites = readChunk(cuuid).concat(pendingWrites);
// combinedSize += getChunkSize(cuuid);
// removedChunks.put(takeLeft.getKey(), takeLeft.getValue());
// }
// if (afterLast.isEmpty()) rightDone = true;
// if (!afterLast.isEmpty() && !rightDone) {
// var takeRight = afterLast.firstEntry();
//
// var cuuid = takeRight.getValue();
//
// if (getChunkSize(cuuid) >= (targetChunkSize * writeMergeMaxChunkToTake)) {
// rightDone = true;
// continue;
// }
//
// if ((combinedSize + getChunkSize(cuuid)) > (targetChunkSize * writeMergeLimit)) {
// rightDone = true;
// continue;
// }
//
// // FIXME: (and test this)
// afterLast = afterLast.tailMap(takeRight.getKey(), false);
// pendingWrites = pendingWrites.concat(readChunk(cuuid));
// combinedSize += getChunkSize(cuuid);
// removedChunks.put(takeRight.getKey(), takeRight.getValue());
// }
// }
// }
}
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
{
int cur = 0;
while (cur < combinedSize) {
int end;
if (targetChunkSize <= 0)
if (combinedSize - cur < maxChunkSize)
end = combinedSize;
else {
if ((combinedSize - cur) > (targetChunkSize * writeLastChunkLimit)) {
end = Math.min(cur + targetChunkSize, combinedSize);
} else {
end = combinedSize;
}
}
else if (targetChunkAlignment < 0)
end = combinedSize;
else
end = Math.min(cur + targetChunkSize, combinedSize);
var thisChunk = pendingWrites.substring(cur, end);
@@ -530,18 +445,16 @@ public class DhfsFileServiceImpl implements DhfsFileService {
}
for (var e : removedChunks.entrySet()) {
Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
}
for (var e : newChunks.entrySet()) {
Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
}
remoteTx.putData(file);
cleanupChunks(file, removedChunks.values());
updateFileSize(file);
remoteTx.putData(file.withCurrentMTime());
return (long) data.size();
});
@@ -560,18 +473,8 @@ public class DhfsFileServiceImpl implements DhfsFileService {
}
if (length == 0) {
try (var it = jMapHelper.getIterator(file, IteratorStart.GE, JMapLongKey.of(0))) {
while (it.hasNext()) {
var next = it.next();
jMapHelper.delete(file, next.getKey());
}
}
// var oldChunks = file.chunks();
//
// file = file.withChunks(TreePMap.empty()).withMTime(System.currentTimeMillis());
jMapHelper.deleteAll(file);
remoteTx.putData(file);
// cleanupChunks(file, oldChunks.values());
updateFileSize(file);
return true;
}
@@ -582,38 +485,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
if (curSize < length) {
long combinedSize = (length - curSize);
long start = curSize;
// Hack
HashMap<Long, ByteString> zeroCache = new HashMap<>();
{
long cur = 0;
while (cur < combinedSize) {
long end;
if (targetChunkSize <= 0)
end = combinedSize;
else {
if ((combinedSize - cur) > (targetChunkSize * 1.5)) {
end = cur + targetChunkSize;
} else {
end = combinedSize;
}
}
if (!zeroCache.containsKey(end - cur))
zeroCache.put(end - cur, UnsafeByteOperations.unsafeWrap(new byte[Math.toIntExact(end - cur)]));
ChunkData newChunkData = createChunk(zeroCache.get(end - cur));
newChunks.put(start, newChunkData.key());
start += newChunkData.data().size();
cur = end;
}
}
fillZeros(curSize, length, newChunks);
} else {
// Pair<JMapLongKey, JMapEntry<JMapLongKey>> first;
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
@@ -664,22 +536,55 @@ public class DhfsFileServiceImpl implements DhfsFileService {
// file = file.withChunks(file.chunks().minusAll(removedChunks.keySet()).plusAll(newChunks)).withMTime(System.currentTimeMillis());
for (var e : removedChunks.entrySet()) {
Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
// Log.tracev("Removing chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.delete(file, JMapLongKey.of(e.getKey()));
}
for (var e : newChunks.entrySet()) {
Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
// Log.tracev("Adding chunk {0}-{1}", e.getKey(), e.getValue());
jMapHelper.put(file, JMapLongKey.of(e.getKey()), e.getValue());
}
remoteTx.putData(file);
cleanupChunks(file, removedChunks.values());
updateFileSize(file);
remoteTx.putData(file.withCurrentMTime());
return true;
});
}
private void fillZeros(long fillStart, long length, Map<Long, JObjectKey> newChunks) {
long combinedSize = (length - fillStart);
long start = fillStart;
// Hack
HashMap<Long, ChunkData> zeroCache = new HashMap<>();
{
long cur = 0;
while (cur < combinedSize) {
long end;
if (targetChunkSize <= 0)
end = combinedSize;
else {
if ((combinedSize - cur) > (targetChunkSize * 1.5)) {
end = cur + targetChunkSize;
} else {
end = combinedSize;
}
}
if (!zeroCache.containsKey(end - cur))
zeroCache.put(end - cur, createChunk(UnsafeByteOperations.unsafeWrap(ByteBuffer.allocateDirect(Math.toIntExact(end - cur)))));
ChunkData newChunkData = zeroCache.get(end - cur);
newChunks.put(start, newChunkData.key());
start += newChunkData.data().size();
cur = end;
}
}
}
@Override
public String readlink(JObjectKey uuid) {
return jObjectTxManager.executeTx(() -> {
@@ -691,7 +596,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
public ByteString readlinkBS(JObjectKey uuid) {
return jObjectTxManager.executeTx(() -> {
var fileOpt = remoteTx.getData(File.class, uuid).orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription("File not found when trying to readlink: " + uuid)));
return read(uuid, 0, Math.toIntExact(size(uuid))).get();
return read(uuid, 0, Math.toIntExact(size(uuid)));
});
}
@@ -699,7 +604,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
public JObjectKey symlink(String oldpath, String newpath) {
return jObjectTxManager.executeTx(() -> {
Path path = Path.of(newpath);
var parent = getDirEntry(path.getParent().toString());
var parent = getDirEntryW(path.getParent().toString());
ensureDir(parent);
@@ -709,12 +614,11 @@ public class DhfsFileServiceImpl implements DhfsFileService {
Log.debug("Creating file " + fuuid);
ChunkData newChunkData = createChunk(UnsafeByteOperations.unsafeWrap(oldpath.getBytes(StandardCharsets.UTF_8)));
File f = new File(JObjectKey.of(fuuid.toString()), 0, System.currentTimeMillis(), System.currentTimeMillis(), true, 0);
File f = new File(JObjectKey.of(fuuid.toString()), 0, System.currentTimeMillis(), System.currentTimeMillis(), true);
jMapHelper.put(f, JMapLongKey.of(0), newChunkData.key());
updateFileSize(f);
getTree().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTree().getNewNodeId());
remoteTx.putData(f);
getTreeW().move(parent.key(), new JKleppmannTreeNodeMetaFile(fname, f.key()), getTreeW().getNewNodeId());
return f.key();
});
}
@@ -722,23 +626,33 @@ public class DhfsFileServiceImpl implements DhfsFileService {
@Override
public Boolean setTimes(JObjectKey fileUuid, long atimeMs, long mtimeMs) {
return jObjectTxManager.executeTx(() -> {
var file = remoteTx.getData(File.class, fileUuid).orElseThrow(
() -> new StatusRuntimeException(Status.NOT_FOUND.withDescription(
"File not found for setTimes: " + fileUuid))
);
var dent = curTx.get(JData.class, fileUuid).orElseThrow(() -> new StatusRuntimeExceptionNoStacktrace(Status.NOT_FOUND));
remoteTx.putData(file.withCTime(atimeMs).withMTime(mtimeMs));
return true;
// FIXME:
if (dent instanceof JKleppmannTreeNodeHolder) {
return true;
} else if (dent instanceof RemoteObjectMeta) {
var remote = remoteTx.getData(JDataRemote.class, fileUuid).orElse(null);
if (remote instanceof File f) {
remoteTx.putData(f.withCTime(atimeMs).withMTime(mtimeMs));
return true;
} else {
throw new IllegalArgumentException(fileUuid + " is not a file");
}
} else {
throw new IllegalArgumentException(fileUuid + " is not a file");
}
});
}
@Override
public void updateFileSize(File file) {
jObjectTxManager.executeTx(() -> {
public long size(JObjectKey fileUuid) {
return jObjectTxManager.executeTx(() -> {
long realSize = 0;
var file = remoteTx.getData(File.class, fileUuid)
.orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND));
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
Log.tracev("Getting last");
try (var it = jMapHelper.getIterator(file, IteratorStart.LT, JMapLongKey.max())) {
last = it.hasNext() ? it.next() : null;
}
@@ -747,19 +661,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
realSize = last.getKey().key() + getChunkSize(last.getValue().ref());
}
if (realSize != file.size()) {
remoteTx.putData(file.withSize(realSize));
}
});
}
@Override
public Long size(JObjectKey uuid) {
return jObjectTxManager.executeTx(() -> {
var read = remoteTx.getData(File.class, uuid)
.orElseThrow(() -> new StatusRuntimeException(Status.NOT_FOUND));
return read.size();
return realSize;
});
}
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs.files.service;
package com.usatiuk.dhfsfs.service;
public class DirectoryNotEmptyException extends RuntimeException {
@Override

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs.files.service;
package com.usatiuk.dhfsfs.service;
public record GetattrRes(long mtime, long ctime, long mode, GetattrType type) {
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs.files.service;
package com.usatiuk.dhfsfs.service;
public enum GetattrType {
FILE,

View File

@@ -1,10 +1,10 @@
quarkus.grpc.server.use-separate-server=false
dhfs.objects.peerdiscovery.port=42069
dhfs.objects.peerdiscovery.interval=5s
dhfs.objects.peerdiscovery.interval=4s
dhfs.objects.peerdiscovery.broadcast=true
dhfs.objects.sync.timeout=30
dhfs.objects.sync.ping.timeout=5
dhfs.objects.invalidation.threads=1
dhfs.objects.invalidation.threads=16
dhfs.objects.invalidation.delay=1000
dhfs.objects.reconnect_interval=5s
dhfs.objects.write_log=false
@@ -15,26 +15,20 @@ dhfs.fuse.debug=false
dhfs.fuse.enabled=true
dhfs.files.allow_recursive_delete=false
dhfs.files.target_chunk_size=2097152
# Writes strictly smaller than this will try to merge with blocks nearby
dhfs.files.write_merge_threshold=0.8
# If a merge would result in a block of greater size than this, stop merging
dhfs.files.write_merge_limit=1.2
# Don't take blocks of this size and above when merging
dhfs.files.write_merge_max_chunk_to_take=1
dhfs.files.write_last_chunk_limit=1.5
dhfs.files.target_chunk_alignment=19
dhfs.objects.deletion.delay=1000
dhfs.objects.deletion.can-delete-retry-delay=10000
dhfs.objects.ref_verification=true
dhfs.files.use_hash_for_chunks=false
dhfs.objects.autosync.threads=2
dhfs.objects.autosync.threads=16
dhfs.objects.autosync.download-all=false
dhfs.objects.move-processor.threads=4
dhfs.objects.ref-processor.threads=4
dhfs.objects.move-processor.threads=16
dhfs.objects.ref-processor.threads=16
dhfs.objects.opsender.batch-size=100
dhfs.objects.lock_timeout_secs=2
dhfs.local-discovery=true
dhfs.peerdiscovery.timeout=5000
quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE
quarkus.log.category."com.usatiuk.dhfs".level=TRACE
dhfs.peerdiscovery.timeout=10000
quarkus.log.category."com.usatiuk".min-level=TRACE
quarkus.log.category."com.usatiuk".level=TRACE
quarkus.http.insecure-requests=enabled
quarkus.http.ssl.client-auth=required

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs.files;
package com.usatiuk.dhfsfs;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;

View File

@@ -1,12 +1,11 @@
package com.usatiuk.dhfs.files;
package com.usatiuk.dhfsfs;
import com.usatiuk.dhfs.TempDataProfile;
import com.usatiuk.dhfs.files.objects.File;
import com.usatiuk.dhfs.files.service.DhfsFileService;
import com.usatiuk.dhfs.objects.RemoteTransaction;
import com.usatiuk.dhfs.objects.TransactionManager;
import com.usatiuk.dhfs.objects.transaction.Transaction;
import com.usatiuk.dhfs.remoteobj.RemoteTransaction;
import com.usatiuk.dhfsfs.objects.File;
import com.usatiuk.dhfsfs.service.DhfsFileService;
import com.usatiuk.kleppmanntree.AlreadyExistsException;
import com.usatiuk.objects.transaction.Transaction;
import com.usatiuk.objects.transaction.TransactionManager;
import jakarta.inject.Inject;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.RepeatedTest;
@@ -27,6 +26,7 @@ class Profiles {
protected void getConfigOverrides(Map<String, String> ret) {
ret.put("dhfs.fuse.enabled", "false");
ret.put("dhfs.files.target_chunk_size", "-1");
ret.put("dhfs.files.target_chunk_alignment", "-1");
}
}
@@ -35,11 +35,12 @@ class Profiles {
protected void getConfigOverrides(Map<String, String> ret) {
ret.put("dhfs.fuse.enabled", "false");
ret.put("dhfs.files.target_chunk_size", "3");
ret.put("dhfs.files.target_chunk_alignment", "2");
}
}
}
public class DhfsFileServiceSimpleTestImpl {
public abstract class DhfsFileServiceSimpleTestImpl {
@Inject
DhfsFileService fileService;
@Inject
@@ -88,7 +89,7 @@ public class DhfsFileServiceSimpleTestImpl {
// for (int start = 0; start < all.length(); start++) {
// for (int end = start; end <= all.length(); end++) {
// var read = fileService.read(fuuid.toString(), start, end - start);
// Assertions.assertArrayEquals(all.substring(start, end).getBytes(), read.get().toByteArray());
// Assertions.assertArrayEquals(all.substring(start, end).getBytes(), read.toByteArray());
// }
// }
// }
@@ -109,17 +110,21 @@ public class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
var curMtime = fileService.getattr(uuid).get().mtime();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 2, 8).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
Assertions.assertArrayEquals(new byte[]{2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 2, 8).toByteArray());
fileService.write(uuid, 4, new byte[]{10, 11, 12});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
fileService.write(uuid, 10, new byte[]{13, 14});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 12, 7, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).toByteArray());
fileService.write(uuid, 6, new byte[]{15, 16});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 10, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).toByteArray());
fileService.write(uuid, 3, new byte[]{17, 18});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 17, 18, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 17, 18, 11, 15, 16, 8, 9, 13, 14}, fileService.read(uuid, 0, 12).toByteArray());
var newMtime = fileService.getattr(uuid).get().mtime();
Assertions.assertTrue(newMtime > curMtime);
fileService.unlink("/writeTest");
Assertions.assertFalse(fileService.open("/writeTest").isPresent());
@@ -133,7 +138,7 @@ public class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
fileService.unlink("/removeTest");
Assertions.assertFalse(fileService.open("/removeTest").isPresent());
@@ -147,11 +152,12 @@ public class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
fileService.truncate(uuid, 20);
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).toByteArray());
fileService.write(uuid, 5, new byte[]{10, 11, 12, 13, 14, 15, 16, 17});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).toByteArray());
}
@RepeatedTest(100)
@@ -163,11 +169,12 @@ public class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
fileService.truncate(uuid, 20);
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).toByteArray());
fileService.write(uuid, 10, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, fileService.read(uuid, 0, 20).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, fileService.read(uuid, 0, 20).toByteArray());
} finally {
fileService.unlink("/truncateTest2");
}
@@ -181,10 +188,10 @@ public class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
fileService.truncate(uuid, 7);
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6,}, fileService.read(uuid, 0, 20).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6,}, fileService.read(uuid, 0, 20).toByteArray());
}
@Test
@@ -194,14 +201,14 @@ public class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
Assertions.assertTrue(fileService.rename("/moveTest", "/movedTest"));
Assertions.assertFalse(fileService.open("/moveTest").isPresent());
Assertions.assertTrue(fileService.open("/movedTest").isPresent());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
fileService.read(fileService.open("/movedTest").get(), 0, 10).get().toByteArray());
fileService.read(fileService.open("/movedTest").get(), 0, 10).toByteArray());
}
@Test
@@ -214,9 +221,9 @@ public class DhfsFileServiceSimpleTestImpl {
var uuid2 = ret2.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
fileService.write(uuid2, 0, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29});
Assertions.assertArrayEquals(new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29}, fileService.read(uuid2, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 29}, fileService.read(uuid2, 0, 10).toByteArray());
jObjectTxManager.run(() -> {
@@ -230,7 +237,7 @@ public class DhfsFileServiceSimpleTestImpl {
Assertions.assertTrue(fileService.open("/moveOverTest2").isPresent());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
fileService.read(fileService.open("/moveOverTest2").get(), 0, 10).get().toByteArray());
fileService.read(fileService.open("/moveOverTest2").get(), 0, 10).toByteArray());
// await().atMost(5, TimeUnit.SECONDS).until(() -> {
// jObjectTxManager.run(() -> {
@@ -248,8 +255,8 @@ public class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{}, fileService.read(uuid, 20, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
Assertions.assertArrayEquals(new byte[]{}, fileService.read(uuid, 20, 10).toByteArray());
}
@Test
@@ -259,13 +266,13 @@ public class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
fileService.write(uuid, 20, new byte[]{10, 11, 12, 13, 14, 15, 16, 17, 18, 19});
Assertions.assertArrayEquals(new byte[]{
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19
}, fileService.read(uuid, 0, 30).get().toByteArray());
}, fileService.read(uuid, 0, 30).toByteArray());
}
@Test
@@ -275,7 +282,7 @@ public class DhfsFileServiceSimpleTestImpl {
var uuid = ret.get();
fileService.write(uuid, 0, new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).toByteArray());
// var oldfile = jObjectManager.get(uuid).orElseThrow(IllegalStateException::new);
// var chunk = oldfile.runReadLocked(JObjectManager.ResolutionStrategy.LOCAL_ONLY, (m, d) -> d.extractRefs()).stream().toList().get(0);
@@ -290,6 +297,6 @@ public class DhfsFileServiceSimpleTestImpl {
Assertions.assertTrue(fileService.open("/movedTest2").isPresent());
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
fileService.read(fileService.open("/movedTest2").get(), 0, 10).get().toByteArray());
fileService.read(fileService.open("/movedTest2").get(), 0, 10).toByteArray());
}
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs.files;
package com.usatiuk.dhfsfs;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs.files;
package com.usatiuk.dhfsfs;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;

View File

@@ -0,0 +1,29 @@
package com.usatiuk.dhfsfs;
import io.quarkus.test.junit.QuarkusTestProfile;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Map;
abstract public class TempDataProfile implements QuarkusTestProfile {
protected void getConfigOverrides(Map<String, String> toPut) {
}
@Override
final public Map<String, String> getConfigOverrides() {
Path tempDirWithPrefix;
try {
tempDirWithPrefix = Files.createTempDirectory("dhfs-test");
} catch (IOException e) {
throw new RuntimeException(e);
}
var ret = new HashMap<String, String>();
ret.put("dhfs.objects.persistence.files.root", tempDirWithPrefix.resolve("dhfs_root_test").toString());
ret.put("dhfs.fuse.root", tempDirWithPrefix.resolve("dhfs_fuse_root_test").toString());
getConfigOverrides(ret);
return ret;
}
}

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs.objects;
package com.usatiuk.dhfsfs;
import io.quarkus.logging.Log;
import io.quarkus.runtime.ShutdownEvent;

View File

@@ -1,4 +1,4 @@
package com.usatiuk.dhfs.benchmarks;
package com.usatiuk.dhfsfs.benchmarks;
import io.quarkus.logging.Log;
import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;

View File

@@ -1,9 +1,9 @@
package com.usatiuk.dhfs.benchmarks;
package com.usatiuk.dhfsfs.benchmarks;
import com.google.protobuf.UnsafeByteOperations;
import com.usatiuk.dhfs.TempDataProfile;
import com.usatiuk.dhfs.files.service.DhfsFileService;
import com.usatiuk.dhfs.objects.JObjectKey;
import com.usatiuk.dhfsfs.TempDataProfile;
import com.usatiuk.dhfsfs.service.DhfsFileService;
import com.usatiuk.objects.JObjectKey;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.junit.TestProfile;
import jakarta.inject.Inject;

View File

@@ -5,7 +5,7 @@ dhfs.objects.ref_verification=true
dhfs.objects.deletion.delay=0
quarkus.log.category."com.usatiuk.dhfs".level=TRACE
quarkus.log.category."com.usatiuk.dhfs".min-level=TRACE
quarkus.class-loading.parent-first-artifacts=com.usatiuk.dhfs:supportlib
quarkus.http.test-port=0
quarkus.http.test-ssl-port=0
dhfs.local-discovery=false
dhfs.local-discovery=false
dhfs.objects.persistence.snapshot-extra-checks=true

View File

@@ -0,0 +1,5 @@
*
!target/*-runner
!target/*-runner.jar
!target/lib/*
!target/quarkus-app/*

43
dhfs-parent/dhfs-fuse/.gitignore vendored Normal file
View File

@@ -0,0 +1,43 @@
#Maven
target/
pom.xml.tag
pom.xml.releaseBackup
pom.xml.versionsBackup
release.properties
.flattened-pom.xml
# Eclipse
.project
.classpath
.settings/
bin/
# IntelliJ
.idea
*.ipr
*.iml
*.iws
# NetBeans
nb-configuration.xml
# Visual Studio Code
.vscode
.factorypath
# OSX
.DS_Store
# Vim
*.swp
*.swo
# patch
*.orig
*.rej
# Local environment
.env
# Plugin directory
/.quarkus/cli/plugins/

View File

@@ -0,0 +1,2 @@
FROM azul/zulu-openjdk-debian:21-jre-latest
RUN apt update && apt install -y libfuse2 curl

View File

@@ -0,0 +1,43 @@
version: "3.2"
services:
dhfs1:
build: .
privileged: true
devices:
- /dev/fuse
volumes:
- $HOME/dhfs/dhfs1:/dhfs_root
- $HOME/dhfs/dhfs1_f:/dhfs_root/fuse:rshared
- ./target/quarkus-app:/app
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
-Ddhfs.objects.root=/dhfs_root/d
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005
-jar /app/quarkus-run.jar"
ports:
- 8080:8080
- 8081:8443
- 5005:5005
dhfs2:
build: .
privileged: true
devices:
- /dev/fuse
volumes:
- $HOME/dhfs/dhfs2:/dhfs_root
- $HOME/dhfs/dhfs2_f:/dhfs_root/fuse:rshared
- ./target/quarkus-app:/app
command: "java --add-exports java.base/sun.nio.ch=ALL-UNNAMED
--add-exports java.base/jdk.internal.access=ALL-UNNAMED
--add-opens=java.base/java.nio=ALL-UNNAMED
-Ddhfs.objects.persistence.files.root=/dhfs_root/p
-Ddhfs.objects.root=/dhfs_root/d
-Ddhfs.fuse.root=/dhfs_root/fuse -Dquarkus.http.host=0.0.0.0
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5010
-jar /app/quarkus-run.jar"
ports:
- 8090:8080
- 8091:8443
- 5010:5010

View File

@@ -3,8 +3,8 @@
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>server</artifactId>
<version>1.0.0-SNAPSHOT</version>
<artifactId>dhfs-fuse</artifactId>
<version>1.0-SNAPSHOT</version>
<parent>
<groupId>com.usatiuk.dhfs</groupId>
@@ -23,26 +23,13 @@
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>autoprotomap-deployment</artifactId>
<version>1.0-SNAPSHOT</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcprov-jdk18on</artifactId>
<version>1.78.1</version>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcpkix-jdk18on</artifactId>
<version>1.78.1</version>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
@@ -86,29 +73,9 @@
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.github.SerCeMan</groupId>
<groupId>com.github.serceman</groupId>
<artifactId>jnr-fuse</artifactId>
<version>44ed40f8ce</version>
</dependency>
<dependency>
<groupId>com.github.jnr</groupId>
<artifactId>jnr-ffi</artifactId>
<version>2.2.16</version>
</dependency>
<dependency>
<groupId>com.github.jnr</groupId>
<artifactId>jnr-posix</artifactId>
<version>3.1.19</version>
</dependency>
<dependency>
<groupId>com.github.jnr</groupId>
<artifactId>jnr-constants</artifactId>
<version>0.10.4</version>
<version>0.5.8</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
@@ -131,24 +98,18 @@
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
</dependency>
<dependency>
<groupId>org.pcollections</groupId>
<artifactId>pcollections</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math3</artifactId>
<version>3.6.1</version>
</dependency>
<dependency>
<groupId>com.usatiuk</groupId>
<artifactId>kleppmanntree</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>supportlib</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.usatiuk.dhfs</groupId>
<artifactId>objects</artifactId>
<artifactId>dhfs-fs</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<dependency>
@@ -167,6 +128,11 @@
<forkCount>1C</forkCount>
<reuseForks>false</reuseForks>
<parallel>classes</parallel>
<systemPropertyVariables>
<junit.jupiter.execution.parallel.enabled>
false
</junit.jupiter.execution.parallel.enabled>
</systemPropertyVariables>
</configuration>
</plugin>
<plugin>

View File

@@ -0,0 +1,97 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
#
# Before building the container image run:
#
# ./mvnw package
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/server-jvm .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
#
# If you want to include the debug port into your docker image
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
# when running the container
#
# Then run the container using :
#
# docker run -i --rm -p 8080:8080 quarkus/server-jvm
#
# This image uses the `run-java.sh` script to run the application.
# This scripts computes the command line to execute your Java application, and
# includes memory/GC tuning.
# You can configure the behavior using the following environment properties:
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
# in JAVA_OPTS (example: "-Dsome.property=foo")
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
# used to calculate a default maximal heap memory based on a containers restriction.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
# of the container available memory as set here. The default is `50` which means 50%
# of the available memory is used as an upper boundary. You can skip this mechanism by
# setting this value to `0` in which case no `-Xmx` option is added.
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
# is used to calculate a default initial heap memory based on the maximum heap memory.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
# is used as the initial heap size. You can skip this mechanism by setting this value
# to `0` in which case no `-Xms` option is added (example: "25")
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
# This is used to calculate the maximum value of the initial heap memory. If used in
# a container without any memory constraints for the container then this option has
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
# here. The default is 4096MB which means the calculated value of `-Xms` never will
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
# when things are happening. This option, if set to true, will set
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
# true").
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
# (example: "20")
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
# (example: "40")
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
# (example: "4")
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
# previous GC times. (example: "90")
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
# contain the necessary JRE command-line options to specify the required GC, which
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
# accessed directly. (example: "foo.example.com,bar.example.com")
#
###
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
ENV LANGUAGE='en_US:en'
# We make four distinct layers so if there are application changes the library layers can be re-used
COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/
COPY --chown=185 target/quarkus-app/*.jar /deployments/
COPY --chown=185 target/quarkus-app/app/ /deployments/app/
COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/
EXPOSE 8080
USER 185
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]

View File

@@ -0,0 +1,93 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
#
# Before building the container image run:
#
# ./mvnw package -Dquarkus.package.jar.type=legacy-jar
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/server-legacy-jar .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
#
# If you want to include the debug port into your docker image
# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005.
# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005
# when running the container
#
# Then run the container using :
#
# docker run -i --rm -p 8080:8080 quarkus/server-legacy-jar
#
# This image uses the `run-java.sh` script to run the application.
# This scripts computes the command line to execute your Java application, and
# includes memory/GC tuning.
# You can configure the behavior using the following environment properties:
# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class")
# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options
# in JAVA_OPTS (example: "-Dsome.property=foo")
# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is
# used to calculate a default maximal heap memory based on a containers restriction.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio
# of the container available memory as set here. The default is `50` which means 50%
# of the available memory is used as an upper boundary. You can skip this mechanism by
# setting this value to `0` in which case no `-Xmx` option is added.
# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This
# is used to calculate a default initial heap memory based on the maximum heap memory.
# If used in a container without any memory constraints for the container then this
# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio
# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx`
# is used as the initial heap size. You can skip this mechanism by setting this value
# to `0` in which case no `-Xms` option is added (example: "25")
# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS.
# This is used to calculate the maximum value of the initial heap memory. If used in
# a container without any memory constraints for the container then this option has
# no effect. If there is a memory constraint then `-Xms` is limited to the value set
# here. The default is 4096MB which means the calculated value of `-Xms` never will
# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096")
# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output
# when things are happening. This option, if set to true, will set
# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true").
# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example:
# true").
# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787").
# - CONTAINER_CORE_LIMIT: A calculated core limit as described in
# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2")
# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024").
# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion.
# (example: "20")
# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking.
# (example: "40")
# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection.
# (example: "4")
# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus
# previous GC times. (example: "90")
# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20")
# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100")
# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should
# contain the necessary JRE command-line options to specify the required GC, which
# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC).
# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080")
# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080")
# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be
# accessed directly. (example: "foo.example.com,bar.example.com")
#
###
FROM registry.access.redhat.com/ubi8/openjdk-21:1.18
ENV LANGUAGE='en_US:en'
COPY target/lib/* /deployments/lib/
COPY target/*-runner.jar /deployments/quarkus-run.jar
EXPOSE 8080
USER 185
ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
ENV JAVA_APP_JAR="/deployments/quarkus-run.jar"
ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ]

View File

@@ -0,0 +1,27 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
#
# Before building the container image run:
#
# ./mvnw package -Dnative
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.native -t quarkus/server .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server
#
###
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9
WORKDIR /work/
RUN chown 1001 /work \
&& chmod "g+rwX" /work \
&& chown 1001:root /work
COPY --chown=1001:root target/*-runner /work/application
EXPOSE 8080
USER 1001
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]

View File

@@ -0,0 +1,30 @@
####
# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode.
# It uses a micro base image, tuned for Quarkus native executables.
# It reduces the size of the resulting container image.
# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image.
#
# Before building the container image run:
#
# ./mvnw package -Dnative
#
# Then, build the image with:
#
# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/server .
#
# Then run the container using:
#
# docker run -i --rm -p 8080:8080 quarkus/server
#
###
FROM quay.io/quarkus/quarkus-micro-image:2.0
WORKDIR /work/
RUN chown 1001 /work \
&& chmod "g+rwX" /work \
&& chown 1001:root /work
COPY --chown=1001:root target/*-runner /work/application
EXPOSE 8080
USER 1001
ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]

View File

@@ -1,12 +1,15 @@
package com.usatiuk.dhfs.fuse;
package com.usatiuk.dhfsfuse;
import com.google.protobuf.UnsafeByteOperations;
import com.kenai.jffi.MemoryIO;
import com.sun.security.auth.module.UnixSystem;
import com.usatiuk.dhfs.files.service.DhfsFileService;
import com.usatiuk.dhfs.files.service.DirectoryNotEmptyException;
import com.usatiuk.dhfs.files.service.GetattrRes;
import com.usatiuk.dhfs.supportlib.UninitializedByteBuffer;
import com.usatiuk.dhfsfs.service.DhfsFileService;
import com.usatiuk.dhfsfs.service.DirectoryNotEmptyException;
import com.usatiuk.dhfsfs.service.GetattrRes;
import com.usatiuk.kleppmanntree.AlreadyExistsException;
import com.usatiuk.objects.JObjectKey;
import com.usatiuk.utils.UninitializedByteBuffer;
import com.usatiuk.utils.UnsafeAccessor;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.quarkus.logging.Log;
@@ -17,19 +20,25 @@ import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.event.Observes;
import jakarta.inject.Inject;
import jnr.ffi.Pointer;
import jnr.ffi.Runtime;
import jnr.ffi.Struct;
import jnr.ffi.types.off_t;
import jnr.ffi.types.size_t;
import org.apache.commons.lang3.SystemUtils;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import ru.serce.jnrfuse.ErrorCodes;
import ru.serce.jnrfuse.FuseFillDir;
import ru.serce.jnrfuse.FuseStubFS;
import ru.serce.jnrfuse.struct.FileStat;
import ru.serce.jnrfuse.struct.FuseFileInfo;
import ru.serce.jnrfuse.struct.Statvfs;
import ru.serce.jnrfuse.struct.Timespec;
import ru.serce.jnrfuse.NotImplemented;
import ru.serce.jnrfuse.flags.FuseBufFlags;
import ru.serce.jnrfuse.struct.*;
import java.nio.ByteBuffer;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
import static jnr.posix.FileStat.*;
@@ -37,6 +46,8 @@ import static jnr.posix.FileStat.*;
public class DhfsFuse extends FuseStubFS {
private static final int blksize = 1048576;
private static final int iosize = 1048576;
private final ConcurrentHashMap<Long, JObjectKey> _openHandles = new ConcurrentHashMap<>();
private final AtomicLong _fh = new AtomicLong(1);
@ConfigProperty(name = "dhfs.fuse.root")
String root;
@ConfigProperty(name = "dhfs.fuse.enabled")
@@ -46,42 +57,67 @@ public class DhfsFuse extends FuseStubFS {
@ConfigProperty(name = "dhfs.files.target_chunk_size")
int targetChunkSize;
@Inject
JnrPtrByteOutputAccessors jnrPtrByteOutputAccessors;
@Inject
DhfsFileService fileService;
private long allocateHandle(JObjectKey key) {
while (true) {
var newFh = _fh.getAndIncrement();
if (newFh == 0) continue;
if (_openHandles.putIfAbsent(newFh, key) == null) {
return newFh;
}
}
}
private JObjectKey getFromHandle(long handle) {
assert handle != 0;
return _openHandles.get(handle);
}
void init(@Observes @Priority(100000) StartupEvent event) {
if (!enabled) return;
Paths.get(root).toFile().mkdirs();
Log.info("Mounting with root " + root);
var uid = new UnixSystem().getUid();
var gid = new UnixSystem().getGid();
var opts = new ArrayList<String>();
// Assuming macFuse
if (SystemUtils.IS_OS_MAC) {
if (SystemUtils.IS_OS_WINDOWS) {
opts.add("-o");
opts.add("iosize=" + iosize);
} else if (SystemUtils.IS_OS_LINUX) {
// FIXME: There's something else missing: the writes still seem to be 32k max
opts.add("auto_cache");
opts.add("-o");
opts.add("uid=-1");
opts.add("-o");
opts.add("gid=-1");
} else {
Paths.get(root).toFile().mkdirs();
if (!Paths.get(root).toFile().isDirectory())
throw new IllegalStateException("Could not create directory " + root);
var uid = new UnixSystem().getUid();
var gid = new UnixSystem().getGid();
// Assuming macFuse
if (SystemUtils.IS_OS_MAC) {
opts.add("-o");
opts.add("iosize=" + iosize);
} else if (SystemUtils.IS_OS_LINUX) {
// FIXME: There's something else missing: the writes still seem to be 32k max
// opts.add("-o");
// opts.add("large_read");
opts.add("-o");
opts.add("big_writes");
opts.add("-o");
opts.add("max_read=" + iosize);
opts.add("-o");
opts.add("max_write=" + iosize);
}
opts.add("-o");
opts.add("big_writes");
opts.add("auto_cache");
opts.add("-o");
opts.add("max_read=" + iosize);
opts.add("uid=" + uid);
opts.add("-o");
opts.add("max_write=" + iosize);
opts.add("gid=" + gid);
}
opts.add("-o");
opts.add("auto_cache");
opts.add("-o");
opts.add("uid=" + uid);
opts.add("-o");
opts.add("gid=" + gid);
mount(Paths.get(root), false, debug, opts.toArray(String[]::new));
}
@@ -174,7 +210,9 @@ public class DhfsFuse extends FuseStubFS {
@Override
public int open(String path, FuseFileInfo fi) {
try {
if (fileService.open(path).isEmpty()) return -ErrorCodes.ENOENT();
var opened = fileService.open(path);
if (opened.isEmpty()) return -ErrorCodes.ENOENT();
fi.fh.set(allocateHandle(opened.get()));
return 0;
} catch (Throwable e) {
Log.error("When open " + path, e);
@@ -182,18 +220,23 @@ public class DhfsFuse extends FuseStubFS {
}
}
@Override
public int release(String path, FuseFileInfo fi) {
assert fi.fh.get() != 0;
_openHandles.remove(fi.fh.get());
return 0;
}
@Override
public int read(String path, Pointer buf, long size, long offset, FuseFileInfo fi) {
if (size < 0) return -ErrorCodes.EINVAL();
if (offset < 0) return -ErrorCodes.EINVAL();
try {
var fileOpt = fileService.open(path);
if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
var file = fileOpt.get();
var read = fileService.read(fileOpt.get(), offset, (int) size);
var fileKey = getFromHandle(fi.fh.get());
var read = fileService.read(fileKey, offset, (int) size);
if (read.isEmpty()) return 0;
UnsafeByteOperations.unsafeWriteTo(read.get(), new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size));
return read.get().size();
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(buf, size));
return read.size();
} catch (Throwable e) {
Log.error("When reading " + path, e);
return -ErrorCodes.EIO();
@@ -202,25 +245,22 @@ public class DhfsFuse extends FuseStubFS {
@Override
public int write(String path, Pointer buf, long size, long offset, FuseFileInfo fi) {
var buffer = UninitializedByteBuffer.allocate((int) size);
UnsafeAccessor.UNSAFE.copyMemory(
buf.address(),
UnsafeAccessor.NIO.getBufferAddress(buffer),
size
);
return write(path, buffer, offset, fi);
}
public int write(String path, ByteBuffer buffer, long offset, FuseFileInfo fi) {
if (offset < 0) return -ErrorCodes.EINVAL();
try {
var fileOpt = fileService.open(path);
if (fileOpt.isEmpty()) return -ErrorCodes.ENOENT();
var buffer = UninitializedByteBuffer.allocateUninitialized((int) size);
if (buffer.isDirect()) {
jnrPtrByteOutputAccessors.getUnsafe().copyMemory(
buf.address(),
jnrPtrByteOutputAccessors.getNioAccess().getBufferAddress(buffer),
size
);
} else {
buf.get(0, buffer.array(), 0, (int) size);
}
var written = fileService.write(fileOpt.get(), offset, UnsafeByteOperations.unsafeWrap(buffer));
var fileKey = getFromHandle(fi.fh.get());
var written = fileService.write(fileKey, offset, UnsafeByteOperations.unsafeWrap(buffer));
return written.intValue();
} catch (Throwable e) {
} catch (Exception e) {
Log.error("When writing " + path, e);
return -ErrorCodes.EIO();
}
@@ -231,7 +271,8 @@ public class DhfsFuse extends FuseStubFS {
try {
var ret = fileService.create(path, mode);
if (ret.isEmpty()) return -ErrorCodes.ENOSPC();
else return 0;
fi.fh.set(allocateHandle(ret.get()));
return 0;
} catch (Throwable e) {
Log.error("When creating " + path, e);
return -ErrorCodes.EIO();
@@ -355,7 +396,7 @@ public class DhfsFuse extends FuseStubFS {
var file = fileOpt.get();
var read = fileService.readlinkBS(fileOpt.get());
if (read.isEmpty()) return 0;
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(jnrPtrByteOutputAccessors, buf, size));
UnsafeByteOperations.unsafeWriteTo(read, new JnrPtrByteOutput(buf, size));
buf.putByte(Math.min(size - 1, read.size()), (byte) 0);
return 0;
} catch (Throwable e) {
@@ -387,4 +428,29 @@ public class DhfsFuse extends FuseStubFS {
return -ErrorCodes.EIO();
}
}
@Override
public int write_buf(String path, FuseBufvec buf, @off_t long off, FuseFileInfo fi) {
int size = (int) libFuse.fuse_buf_size(buf);
FuseBufvec tmpVec = new FuseBufvec(Runtime.getSystemRuntime());
long tmpVecAddr = MemoryIO.getInstance().allocateMemory(Struct.size(tmpVec), false);
try {
tmpVec.useMemory(Pointer.wrap(Runtime.getSystemRuntime(), tmpVecAddr));
FuseBufvec.init(tmpVec, size);
var bb = UninitializedByteBuffer.allocate(size);
var mem = UninitializedByteBuffer.getAddress(bb);
tmpVec.buf.mem.set(mem);
tmpVec.buf.size.set(size);
int res = (int) libFuse.fuse_buf_copy(tmpVec, buf, 0);
if (res != size) {
Log.errorv("fuse_buf_copy failed: {0} != {1}", res, size);
return -ErrorCodes.ENOMEM();
}
return write(path, bb, off, fi);
} finally {
if (tmpVecAddr != 0) {
MemoryIO.getInstance().freeMemory(tmpVecAddr);
}
}
}
}

View File

@@ -1,6 +1,7 @@
package com.usatiuk.dhfs.fuse;
package com.usatiuk.dhfsfuse;
import com.google.protobuf.ByteOutput;
import com.usatiuk.utils.UnsafeAccessor;
import jnr.ffi.Pointer;
import java.nio.ByteBuffer;
@@ -9,14 +10,12 @@ import java.nio.MappedByteBuffer;
public class JnrPtrByteOutput extends ByteOutput {
private final Pointer _backing;
private final long _size;
private final JnrPtrByteOutputAccessors _accessors;
private long _pos;
public JnrPtrByteOutput(JnrPtrByteOutputAccessors accessors, Pointer backing, long size) {
public JnrPtrByteOutput(Pointer backing, long size) {
_backing = backing;
_size = size;
_pos = 0;
_accessors = accessors;
}
@Override
@@ -47,9 +46,9 @@ public class JnrPtrByteOutput extends ByteOutput {
if (value instanceof MappedByteBuffer mb) {
mb.load();
}
long addr = _accessors.getNioAccess().getBufferAddress(value) + value.position();
long addr = UnsafeAccessor.NIO.getBufferAddress(value) + value.position();
var out = _backing.address() + _pos;
_accessors.getUnsafe().copyMemory(addr, out, rem);
UnsafeAccessor.UNSAFE.copyMemory(addr, out, rem);
} else {
_backing.put(_pos, value.array(), value.arrayOffset() + value.position(), rem);
}

Some files were not shown because too many files have changed in this diff Show More