mirror of
https://github.com/usatiuk/dhfs.git
synced 2025-10-29 04:57:48 +01:00
Fs: file writing with less cpu wasted
This commit is contained in:
@@ -367,16 +367,12 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
|||||||
+ offset + " " + data.size());
|
+ offset + " " + data.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (size(fileUuid) < offset) {
|
|
||||||
truncate(fileUuid, offset);
|
|
||||||
file = remoteTx.getData(File.class, fileUuid).orElse(null);
|
|
||||||
}
|
|
||||||
|
|
||||||
NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
|
NavigableMap<Long, JObjectKey> removedChunks = new TreeMap<>();
|
||||||
|
|
||||||
long realOffset = targetChunkAlignment >= 0 ? alignDown(offset, targetChunkAlignment) : offset;
|
long realOffset = targetChunkAlignment >= 0 ? alignDown(offset, targetChunkAlignment) : offset;
|
||||||
long writeEnd = offset + data.size();
|
long writeEnd = offset + data.size();
|
||||||
long start = realOffset;
|
long start = realOffset;
|
||||||
|
long existingEnd = 0;
|
||||||
ByteString pendingPrefix = ByteString.empty();
|
ByteString pendingPrefix = ByteString.empty();
|
||||||
ByteString pendingSuffix = ByteString.empty();
|
ByteString pendingSuffix = ByteString.empty();
|
||||||
|
|
||||||
@@ -385,8 +381,8 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
|||||||
var curEntry = it.next();
|
var curEntry = it.next();
|
||||||
long curChunkStart = curEntry.getKey().key();
|
long curChunkStart = curEntry.getKey().key();
|
||||||
var curChunkId = curEntry.getValue().ref();
|
var curChunkId = curEntry.getValue().ref();
|
||||||
long curChunkEnd = curChunkStart + getChunkSize(curChunkId);
|
long curChunkEnd = it.hasNext() ? it.peekNextKey().key() : curChunkStart + getChunkSize(curChunkId);
|
||||||
|
existingEnd = curChunkEnd;
|
||||||
if (curChunkEnd <= realOffset) break;
|
if (curChunkEnd <= realOffset) break;
|
||||||
|
|
||||||
removedChunks.put(curEntry.getKey().key(), curChunkId);
|
removedChunks.put(curEntry.getKey().key(), curChunkId);
|
||||||
@@ -408,12 +404,23 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
|
||||||
|
|
||||||
|
if (existingEnd < offset) {
|
||||||
|
if (!pendingPrefix.isEmpty()) {
|
||||||
|
int diff = Math.toIntExact(offset - existingEnd);
|
||||||
|
pendingPrefix = pendingPrefix.concat(ByteString.copyFrom(new byte[diff]));
|
||||||
|
} else {
|
||||||
|
fillZeros(existingEnd, offset, newChunks);
|
||||||
|
start = offset;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ByteString pendingWrites = pendingPrefix.concat(data).concat(pendingSuffix);
|
ByteString pendingWrites = pendingPrefix.concat(data).concat(pendingSuffix);
|
||||||
|
|
||||||
int combinedSize = pendingWrites.size();
|
int combinedSize = pendingWrites.size();
|
||||||
|
|
||||||
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
|
|
||||||
|
|
||||||
{
|
{
|
||||||
int targetChunkSize = 1 << targetChunkAlignment;
|
int targetChunkSize = 1 << targetChunkAlignment;
|
||||||
int cur = 0;
|
int cur = 0;
|
||||||
@@ -476,38 +483,7 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
|||||||
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
|
NavigableMap<Long, JObjectKey> newChunks = new TreeMap<>();
|
||||||
|
|
||||||
if (curSize < length) {
|
if (curSize < length) {
|
||||||
long combinedSize = (length - curSize);
|
fillZeros(curSize, length, newChunks);
|
||||||
|
|
||||||
long start = curSize;
|
|
||||||
|
|
||||||
// Hack
|
|
||||||
HashMap<Long, ChunkData> zeroCache = new HashMap<>();
|
|
||||||
|
|
||||||
{
|
|
||||||
long cur = 0;
|
|
||||||
while (cur < combinedSize) {
|
|
||||||
long end;
|
|
||||||
|
|
||||||
if (targetChunkSize <= 0)
|
|
||||||
end = combinedSize;
|
|
||||||
else {
|
|
||||||
if ((combinedSize - cur) > (targetChunkSize * 1.5)) {
|
|
||||||
end = cur + targetChunkSize;
|
|
||||||
} else {
|
|
||||||
end = combinedSize;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!zeroCache.containsKey(end - cur))
|
|
||||||
zeroCache.put(end - cur, createChunk(UnsafeByteOperations.unsafeWrap(new byte[Math.toIntExact(end - cur)])));
|
|
||||||
|
|
||||||
ChunkData newChunkData = zeroCache.get(end - cur);
|
|
||||||
newChunks.put(start, newChunkData.key());
|
|
||||||
|
|
||||||
start += newChunkData.data().size();
|
|
||||||
cur = end;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// Pair<JMapLongKey, JMapEntry<JMapLongKey>> first;
|
// Pair<JMapLongKey, JMapEntry<JMapLongKey>> first;
|
||||||
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
|
Pair<JMapLongKey, JMapEntry<JMapLongKey>> last;
|
||||||
@@ -572,6 +548,41 @@ public class DhfsFileServiceImpl implements DhfsFileService {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void fillZeros(long fillStart, long length, NavigableMap<Long, JObjectKey> newChunks) {
|
||||||
|
long combinedSize = (length - fillStart);
|
||||||
|
|
||||||
|
long start = fillStart;
|
||||||
|
|
||||||
|
// Hack
|
||||||
|
HashMap<Long, ChunkData> zeroCache = new HashMap<>();
|
||||||
|
|
||||||
|
{
|
||||||
|
long cur = 0;
|
||||||
|
while (cur < combinedSize) {
|
||||||
|
long end;
|
||||||
|
|
||||||
|
if (targetChunkSize <= 0)
|
||||||
|
end = combinedSize;
|
||||||
|
else {
|
||||||
|
if ((combinedSize - cur) > (targetChunkSize * 1.5)) {
|
||||||
|
end = cur + targetChunkSize;
|
||||||
|
} else {
|
||||||
|
end = combinedSize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!zeroCache.containsKey(end - cur))
|
||||||
|
zeroCache.put(end - cur, createChunk(UnsafeByteOperations.unsafeWrap(new byte[Math.toIntExact(end - cur)])));
|
||||||
|
|
||||||
|
ChunkData newChunkData = zeroCache.get(end - cur);
|
||||||
|
newChunks.put(start, newChunkData.key());
|
||||||
|
|
||||||
|
start += newChunkData.data().size();
|
||||||
|
cur = end;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String readlink(JObjectKey uuid) {
|
public String readlink(JObjectKey uuid) {
|
||||||
return jObjectTxManager.executeTx(() -> {
|
return jObjectTxManager.executeTx(() -> {
|
||||||
|
|||||||
@@ -169,6 +169,7 @@ public abstract class DhfsFileServiceSimpleTestImpl {
|
|||||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, fileService.read(uuid, 0, 10).get().toByteArray());
|
||||||
|
|
||||||
fileService.truncate(uuid, 20);
|
fileService.truncate(uuid, 20);
|
||||||
|
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fileService.read(uuid, 0, 20).get().toByteArray());
|
||||||
fileService.write(uuid, 10, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
|
fileService.write(uuid, 10, new byte[]{11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
|
||||||
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, fileService.read(uuid, 0, 20).get().toByteArray());
|
Assertions.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, fileService.read(uuid, 0, 20).get().toByteArray());
|
||||||
} finally {
|
} finally {
|
||||||
|
|||||||
Reference in New Issue
Block a user