From 40925034e1badaffaff89e2d9052ae8690480384 Mon Sep 17 00:00:00 2001 From: Alex Barney Date: Thu, 2 Dec 2021 16:15:44 -0700 Subject: [PATCH] Update IndirectStorage and SparseStorage for 13.0 --- src/LibHac/FsSystem/Aes128CtrExStorage.cs | 91 +- src/LibHac/FsSystem/BucketTree.cs | 1413 ++++++++++++----- src/LibHac/FsSystem/BucketTree2.cs | 1298 --------------- src/LibHac/FsSystem/BucketTreeBuilder.cs | 175 +- src/LibHac/FsSystem/BucketTreeBuilder2.cs | 314 ---- src/LibHac/FsSystem/IndirectStorage.cs | 603 ++++--- src/LibHac/FsSystem/NcaUtils/Nca.cs | 6 +- src/LibHac/FsSystem/SparseStorage.cs | 132 ++ .../FsSystem/BucketTreeBuilderTests.cs | 16 +- .../FsSystem/BucketTreeCreator.cs | 8 +- .../LibHac.Tests/FsSystem/BucketTreeTests.cs | 16 +- .../FsSystem/IndirectStorageCreator.cs | 154 ++ .../FsSystem/IndirectStorageTests.cs | 356 +++++ 13 files changed, 2205 insertions(+), 2377 deletions(-) delete mode 100644 src/LibHac/FsSystem/BucketTree2.cs delete mode 100644 src/LibHac/FsSystem/BucketTreeBuilder2.cs create mode 100644 src/LibHac/FsSystem/SparseStorage.cs create mode 100644 tests/LibHac.Tests/FsSystem/IndirectStorageCreator.cs create mode 100644 tests/LibHac.Tests/FsSystem/IndirectStorageTests.cs diff --git a/src/LibHac/FsSystem/Aes128CtrExStorage.cs b/src/LibHac/FsSystem/Aes128CtrExStorage.cs index b753c825..decc5e66 100644 --- a/src/LibHac/FsSystem/Aes128CtrExStorage.cs +++ b/src/LibHac/FsSystem/Aes128CtrExStorage.cs @@ -25,7 +25,14 @@ public class Aes128CtrExStorage : Aes128CtrStorage int entryCount, byte[] key, byte[] counter, bool leaveOpen) : base(baseStorage, key, counter, leaveOpen) { - Result rc = Table.Initialize(nodeStorage, entryStorage, NodeSize, Unsafe.SizeOf(), entryCount); + nodeStorage.GetSize(out long nodeStorageSize).ThrowIfFailure(); + entryStorage.GetSize(out long entryStorageSize).ThrowIfFailure(); + + using var valueNodeStorage = new ValueSubStorage(nodeStorage, 0, nodeStorageSize); + using var valueEntryStorage = new ValueSubStorage(entryStorage, 0, entryStorageSize); + + Result rc = Table.Initialize(new ArrayPoolMemoryResource(), in valueNodeStorage, in valueEntryStorage, NodeSize, + Unsafe.SizeOf(), entryCount); rc.ThrowIfFailure(); } @@ -34,53 +41,55 @@ public class Aes128CtrExStorage : Aes128CtrStorage if (destination.Length == 0) return Result.Success; - var visitor = new BucketTree.Visitor(); + Result rc = Table.GetOffsets(out BucketTree.Offsets offsets); + if (rc.IsFailure()) return rc.Miss(); - try + if (!offsets.IsInclude(offset, destination.Length)) + return ResultFs.OutOfRange.Log(); + + using var visitor = new BucketTree.Visitor(); + + rc = Table.Find(ref visitor.Ref, offset); + if (rc.IsFailure()) return rc; + + long inPos = offset; + int outPos = 0; + int remaining = destination.Length; + + while (remaining > 0) { - Result rc = Table.Find(ref visitor, offset); - if (rc.IsFailure()) return rc; + var currentEntry = visitor.Get(); - long inPos = offset; - int outPos = 0; - int remaining = destination.Length; - - while (remaining > 0) + // Get and validate the next entry offset + long nextEntryOffset; + if (visitor.CanMoveNext()) { - var currentEntry = visitor.Get(); + rc = visitor.MoveNext(); + if (rc.IsFailure()) return rc; - // Get and validate the next entry offset - long nextEntryOffset; - if (visitor.CanMoveNext()) - { - rc = visitor.MoveNext(); - if (rc.IsFailure()) return rc; - - nextEntryOffset = visitor.Get().Offset; - if (!Table.Includes(nextEntryOffset)) - return ResultFs.InvalidIndirectEntryOffset.Log(); - } - else - { - nextEntryOffset = Table.GetEnd(); - } - - int bytesToRead = (int)Math.Min(nextEntryOffset - inPos, remaining); - - lock (_locker) - { - UpdateCounterSubsection((uint)currentEntry.Generation); - - rc = base.DoRead(inPos, destination.Slice(outPos, bytesToRead)); - if (rc.IsFailure()) return rc; - } - - outPos += bytesToRead; - inPos += bytesToRead; - remaining -= bytesToRead; + nextEntryOffset = visitor.Get().Offset; + if (!offsets.IsInclude(nextEntryOffset)) + return ResultFs.InvalidIndirectEntryOffset.Log(); } + else + { + nextEntryOffset = offsets.EndOffset; + } + + int bytesToRead = (int)Math.Min(nextEntryOffset - inPos, remaining); + + lock (_locker) + { + UpdateCounterSubsection((uint)currentEntry.Generation); + + rc = base.DoRead(inPos, destination.Slice(outPos, bytesToRead)); + if (rc.IsFailure()) return rc; + } + + outPos += bytesToRead; + inPos += bytesToRead; + remaining -= bytesToRead; } - finally { visitor.Dispose(); } return Result.Success; } diff --git a/src/LibHac/FsSystem/BucketTree.cs b/src/LibHac/FsSystem/BucketTree.cs index 420083c0..f82eb496 100644 --- a/src/LibHac/FsSystem/BucketTree.cs +++ b/src/LibHac/FsSystem/BucketTree.cs @@ -1,16 +1,21 @@ using System; -using System.Buffers; using System.Buffers.Binary; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using LibHac.Common; using LibHac.Diag; using LibHac.Fs; +using LibHac.Os; using LibHac.Util; +using Buffer = LibHac.Mem.Buffer; namespace LibHac.FsSystem; -public partial class BucketTree +/// +/// Allows searching and iterating the entries in a bucket tree data structure. +/// +/// Based on FS 13.1.0 (nnSdk 13.4.0) +public partial class BucketTree : IDisposable { private const uint ExpectedMagic = 0x52544B42; // BKTR private const int MaxVersion = 1; @@ -18,23 +23,476 @@ public partial class BucketTree private const int NodeSizeMin = 1024; private const int NodeSizeMax = 1024 * 512; + private static readonly int BufferAlignment = sizeof(long); + private static int NodeHeaderSize => Unsafe.SizeOf(); - private SubStorage NodeStorage { get; set; } - private SubStorage EntryStorage { get; set; } + private ValueSubStorage _nodeStorage; + private ValueSubStorage _entryStorage; private NodeBuffer _nodeL1; - private long NodeSize { get; set; } - private long EntrySize { get; set; } - private int OffsetCount { get; set; } - private int EntrySetCount { get; set; } - private long StartOffset { get; set; } - private long EndOffset { get; set; } + private long _nodeSize; + private long _entrySize; + private int _entryCount; + private int _offsetCount; + private int _entrySetCount; + private OffsetCache _offsetCache; - public Result Initialize(SubStorage nodeStorage, SubStorage entryStorage, int nodeSize, int entrySize, - int entryCount) + public struct ContinuousReadingInfo { + private long _readSize; + private int _skipCount; + private bool _isDone; + + public readonly bool CanDo() => _readSize != 0; + public bool CheckNeedScan() => --_skipCount <= 0; + public readonly bool IsDone() => _isDone; + + public void Done() + { + _readSize = 0; + _isDone = true; + } + + public readonly long GetReadSize() => _readSize; + public void SetReadSize(long readSize) => _readSize = readSize; + + public void Reset() + { + _readSize = 0; + _skipCount = 0; + _isDone = false; + } + + public void SetSkipCount(int count) + { + Assert.SdkRequiresGreaterEqual(count, 0); + + _skipCount = count; + } + } + + public interface IContinuousReadingEntry + { + int FragmentSizeMax { get; } + + long GetVirtualOffset(); + long GetPhysicalOffset(); + bool IsFragment(); + } + + private struct ContinuousReadingParam where TEntry : unmanaged, IContinuousReadingEntry + { + public long Offset; + public long Size; + public NodeHeader EntrySet; + public int EntryIndex; + public Offsets TreeOffsets; + public TEntry Entry; + } + + public struct NodeHeader + { + public int Index; + public int EntryCount; + public long OffsetEnd; + + public Result Verify(int nodeIndex, long nodeSize, long entrySize) + { + if (Index != nodeIndex) + return ResultFs.InvalidBucketTreeNodeIndex.Log(); + + if (entrySize == 0 || nodeSize < entrySize + NodeHeaderSize) + return ResultFs.InvalidSize.Log(); + + long maxEntryCount = (nodeSize - NodeHeaderSize) / entrySize; + + if (EntryCount <= 0 || maxEntryCount < EntryCount) + return ResultFs.InvalidBucketTreeNodeEntryCount.Log(); + + if (OffsetEnd < 0) + return ResultFs.InvalidBucketTreeNodeOffset.Log(); + + return Result.Success; + } + } + + [NonCopyable] + private struct NodeBuffer : IDisposable + { + private MemoryResource _allocator; + private Buffer _header; + + public void Dispose() + { + Assert.SdkAssert(_header.IsNull); + } + + public readonly MemoryResource GetAllocator() => _allocator; + + public bool Allocate(MemoryResource allocator, int nodeSize) + { + Assert.SdkRequires(_header.IsNull); + + _allocator = allocator; + _header = allocator.Allocate(nodeSize, BufferAlignment); + + return !_header.IsNull; + } + + public void Free() + { + if (!_header.IsNull) + { + _allocator.Deallocate(ref _header, BufferAlignment); + _header = Buffer.Empty; + } + + _allocator = null; + } + + public void FillZero() + { + if (!_header.IsNull) + { + _header.Span.Clear(); + } + } + + public readonly ref NodeHeader GetHeader() + { + Assert.SdkRequiresGreaterEqual(_header.Length * sizeof(long), Unsafe.SizeOf()); + + return ref Unsafe.As(ref _header.Span[0]); + } + + public readonly Span GetBuffer() + { + return _header.Span; + } + + public readonly BucketTreeNode GetNode() where TEntry : unmanaged + { + return new BucketTreeNode(GetBuffer()); + } + } + + private struct StorageNode + { + private Offset _start; + private int _count; + private int _index; + + public StorageNode(long offset, long size, int count) + { + _start = new Offset(offset + NodeHeaderSize, (int)size); + _count = count; + _index = -1; + } + + public StorageNode(long size, int count) + { + _start = new Offset(NodeHeaderSize, (int)size); + _count = count; + _index = -1; + } + + public readonly int GetIndex() => _index; + + public void Find(ReadOnlySpan buffer, long virtualAddress) + { + int end = _count; + Offset pos = _start; + + while (end > 0) + { + int half = end / 2; + Offset mid = pos + half; + + long offset = BinaryPrimitives.ReadInt64LittleEndian(buffer.Slice((int)mid.Get())); + + if (offset <= virtualAddress) + { + pos = mid + 1; + end -= half + 1; + } + else + { + end = half; + } + } + + _index = (int)(pos - _start) - 1; + } + + public Result Find(in ValueSubStorage storage, long virtualAddress) + { + int end = _count; + Offset pos = _start; + + while (end > 0) + { + int half = end / 2; + Offset mid = pos + half; + + long offset = 0; + Result rc = storage.Read(mid.Get(), SpanHelpers.AsByteSpan(ref offset)); + if (rc.IsFailure()) return rc.Miss(); + + if (offset <= virtualAddress) + { + pos = mid + 1; + end -= half + 1; + } + else + { + end = half; + } + } + + _index = (int)(pos - _start) - 1; + return Result.Success; + } + + private readonly struct Offset + { + private readonly long _offset; + private readonly int _stride; + + public Offset(long offset, int stride) + { + _offset = offset; + _stride = stride; + } + + public long Get() => _offset; + + public static Offset operator ++(Offset left) => left + 1; + public static Offset operator --(Offset left) => left - 1; + + public static Offset operator +(Offset left, long right) => new Offset(left._offset + right * left._stride, left._stride); + public static Offset operator -(Offset left, long right) => new Offset(left._offset - right * left._stride, left._stride); + + public static long operator -(Offset left, Offset right) => + (left._offset - right._offset) / left._stride; + + public static bool operator ==(Offset left, Offset right) => left._offset == right._offset; + public static bool operator !=(Offset left, Offset right) => left._offset != right._offset; + + public bool Equals(Offset other) => _offset == other._offset; + public override bool Equals(object obj) => obj is Offset other && Equals(other); + public override int GetHashCode() => _offset.GetHashCode(); + } + } + + private struct OffsetCache + { + public OffsetCache() + { + Mutex = new SdkMutexType(); + Mutex.Initialize(); + IsInitialized = false; + Offsets.StartOffset = -1; + Offsets.EndOffset = -1; + } + + public bool IsInitialized; + public Offsets Offsets; + public SdkMutexType Mutex; + } + + public struct Offsets + { + public long StartOffset; + public long EndOffset; + + public readonly bool IsInclude(long offset) + { + return StartOffset <= offset && offset < EndOffset; + } + + public readonly bool IsInclude(long offset, long size) + { + return size > 0 && StartOffset <= offset && size <= EndOffset - offset; + } + } + + public struct Header + { + public uint Magic; + public uint Version; + public int EntryCount; +#pragma warning disable 414 + private int _reserved; +#pragma warning restore 414 + + public void Format(int entryCount) + { + Assert.SdkRequiresLessEqual(0, entryCount); + + Magic = ExpectedMagic; + Version = MaxVersion; + EntryCount = entryCount; + _reserved = 0; + } + + public readonly Result Verify() + { + if (Magic != ExpectedMagic) + return ResultFs.InvalidBucketTreeSignature.Log(); + + if (EntryCount < 0) + return ResultFs.InvalidBucketTreeEntryCount.Log(); + + if (Version > MaxVersion) + return ResultFs.UnsupportedVersion.Log(); + + return Result.Success; + } + } + + public readonly ref struct BucketTreeNode where TEntry : unmanaged + { + private readonly Span _buffer; + + public BucketTreeNode(Span buffer) + { + _buffer = buffer; + + Assert.SdkRequiresGreaterEqual(_buffer.Length, Unsafe.SizeOf()); + Assert.SdkRequiresGreaterEqual(_buffer.Length, + Unsafe.SizeOf() + GetHeader().EntryCount * Unsafe.SizeOf()); + } + + public int GetCount() => GetHeader().EntryCount; + + public ReadOnlySpan GetArray() => GetWritableArray(); + internal Span GetWritableArray() => GetWritableArray(); + + public long GetBeginOffset() => GetArray()[0]; + public long GetEndOffset() => GetHeader().OffsetEnd; + public long GetL2BeginOffset() => GetArray()[GetCount()]; + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public ReadOnlySpan GetArray() where TElement : unmanaged + { + return GetWritableArray(); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private Span GetWritableArray() where TElement : unmanaged + { + return MemoryMarshal.Cast(_buffer.Slice(Unsafe.SizeOf())); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal ref NodeHeader GetHeader() + { + return ref Unsafe.As(ref MemoryMarshal.GetReference(_buffer)); + } + } + + private static int GetEntryCount(long nodeSize, long entrySize) + { + return (int)((nodeSize - Unsafe.SizeOf()) / entrySize); + } + + private static int GetOffsetCount(long nodeSize) + { + return (int)((nodeSize - Unsafe.SizeOf()) / sizeof(long)); + } + + private static int GetEntrySetCount(long nodeSize, long entrySize, int entryCount) + { + int entryCountPerNode = GetEntryCount(nodeSize, entrySize); + return BitUtil.DivideUp(entryCount, entryCountPerNode); + } + + private static int GetNodeL2Count(long nodeSize, long entrySize, int entryCount) + { + int offsetCountPerNode = GetOffsetCount(nodeSize); + int entrySetCount = GetEntrySetCount(nodeSize, entrySize, entryCount); + + if (entrySetCount <= offsetCountPerNode) + return 0; + + int nodeL2Count = BitUtil.DivideUp(entrySetCount, offsetCountPerNode); + Abort.DoAbortUnless(nodeL2Count <= offsetCountPerNode); + + return BitUtil.DivideUp(entrySetCount - (offsetCountPerNode - (nodeL2Count - 1)), offsetCountPerNode); + } + + public static int QueryHeaderStorageSize() => Unsafe.SizeOf
(); + + public static long QueryNodeStorageSize(long nodeSize, long entrySize, int entryCount) + { + Assert.SdkRequiresLessEqual(sizeof(long), entrySize); + Assert.SdkRequiresLessEqual(entrySize + Unsafe.SizeOf(), nodeSize); + Assert.SdkRequiresWithinMinMax(nodeSize, NodeSizeMin, NodeSizeMax); + Assert.SdkRequires(BitUtil.IsPowerOfTwo(nodeSize)); + Assert.SdkRequiresLessEqual(0, entryCount); + + if (entryCount <= 0) + return 0; + + return (1 + GetNodeL2Count(nodeSize, entrySize, entryCount)) * nodeSize; + } + + public static long QueryEntryStorageSize(long nodeSize, long entrySize, int entryCount) + { + Assert.SdkRequiresLessEqual(sizeof(long), entrySize); + Assert.SdkRequiresLessEqual(entrySize + Unsafe.SizeOf(), nodeSize); + Assert.SdkRequiresWithinMinMax(nodeSize, NodeSizeMin, NodeSizeMax); + Assert.SdkRequires(BitUtil.IsPowerOfTwo(nodeSize)); + Assert.SdkRequiresLessEqual(0, entryCount); + + if (entryCount <= 0) + return 0; + + return GetEntrySetCount(nodeSize, entrySize, entryCount) * nodeSize; + } + + private static long GetBucketTreeEntryOffset(long entrySetOffset, long entrySize, int entryIndex) + { + return entrySetOffset + Unsafe.SizeOf() + entryIndex * entrySize; + } + + private static long GetBucketTreeEntryOffset(int entrySetIndex, long nodeSize, long entrySize, int entryIndex) + { + return GetBucketTreeEntryOffset(entrySetIndex * nodeSize, entrySize, entryIndex); + } + + public BucketTree() + { + _offsetCache = new OffsetCache(); + } + + public void Dispose() + { + FinalizeObject(); + _nodeL1.Dispose(); + } + + public MemoryResource GetAllocator() => _nodeL1.GetAllocator(); + + public int GetEntryCount() => _entryCount; + + public Result GetOffsets(out Offsets offsets) + { + UnsafeHelpers.SkipParamInit(out offsets); + + Result rc = EnsureOffsetCache(); + if (rc.IsFailure()) return rc.Miss(); + + offsets = _offsetCache.Offsets; + return Result.Success; + } + + public Result Initialize(MemoryResource allocator, in ValueSubStorage nodeStorage, in ValueSubStorage entryStorage, + int nodeSize, int entrySize, int entryCount) + { + Assert.SdkRequiresNotNull(allocator); Assert.SdkRequiresLessEqual(sizeof(long), entrySize); Assert.SdkRequiresLessEqual(entrySize + Unsafe.SizeOf(), nodeSize); Assert.SdkRequiresWithinMinMax(nodeSize, NodeSizeMin, NodeSizeMax); @@ -46,7 +504,7 @@ public partial class BucketTree return ResultFs.InvalidArgument.Log(); // Allocate node. - if (!_nodeL1.Allocate(nodeSize)) + if (!_nodeL1.Allocate(allocator, nodeSize)) return ResultFs.BufferAllocationFailed.Log(); bool needFree = true; @@ -80,14 +538,15 @@ public partial class BucketTree if (startOffset < 0 || startOffset > node.GetBeginOffset() || startOffset >= endOffset) return ResultFs.InvalidBucketTreeEntryOffset.Log(); - NodeStorage = nodeStorage; - EntryStorage = entryStorage; - NodeSize = nodeSize; - EntrySize = entrySize; - OffsetCount = offsetCount; - EntrySetCount = entrySetCount; - StartOffset = startOffset; - EndOffset = endOffset; + _nodeStorage.Set(in nodeStorage); + _entryStorage.Set(in entryStorage); + _nodeSize = nodeSize; + _entrySize = entrySize; + _offsetCount = offsetCount; + _entrySetCount = entrySetCount; + _offsetCache.IsInitialized = true; + _offsetCache.Offsets.StartOffset = startOffset; + _offsetCache.Offsets.EndOffset = endOffset; needFree = false; @@ -100,23 +559,41 @@ public partial class BucketTree } } - public bool IsInitialized() => NodeSize > 0; - public bool IsEmpty() => EntrySize == 0; - - public long GetStart() => StartOffset; - public long GetEnd() => EndOffset; - public long GetSize() => EndOffset - StartOffset; - - public bool Includes(long offset) + public void Initialize(long nodeSize, long endOffset) { - return StartOffset <= offset && offset < EndOffset; + Assert.SdkRequiresWithinMinMax(nodeSize, NodeSizeMin, NodeSizeMax); + Assert.SdkRequires(BitUtil.IsPowerOfTwo(nodeSize)); + Assert.SdkRequiresLessEqual(0, endOffset); + Assert.SdkRequires(!IsInitialized()); + + _nodeSize = nodeSize; + _offsetCache.IsInitialized = true; + _offsetCache.Offsets.StartOffset = 0; + _offsetCache.Offsets.EndOffset = endOffset; } - public bool Includes(long offset, long size) + public void FinalizeObject() { - return size > 0 && StartOffset <= offset && size <= EndOffset - offset; + if (IsInitialized()) + { + _nodeStorage.Dispose(); + _entryStorage.Dispose(); + + _nodeL1.Free(); + + _nodeSize = 0; + _entrySize = 0; + _entryCount = 0; + _offsetCount = 0; + _entrySetCount = 0; + _offsetCache.IsInitialized = false; + _offsetCache.Offsets = default; + } } + public bool IsInitialized() => _nodeSize > 0; + public bool IsEmpty() => _entrySize == 0; + public Result Find(ref Visitor visitor, long virtualAddress) { Assert.SdkRequires(IsInitialized()); @@ -127,231 +604,244 @@ public partial class BucketTree if (IsEmpty()) return ResultFs.OutOfRange.Log(); - Result rc = visitor.Initialize(this); - if (rc.IsFailure()) return rc; + Result rc = GetOffsets(out Offsets offsets); + if (rc.IsFailure()) return rc.Miss(); + + rc = visitor.Initialize(this, in offsets); + if (rc.IsFailure()) return rc.Miss(); return visitor.Find(virtualAddress); } - public static int QueryHeaderStorageSize() => 16; - - public static long QueryNodeStorageSize(long nodeSize, long entrySize, int entryCount) + public Result InvalidateCache() { - if (entryCount <= 0) - return 0; + Result rc = _nodeStorage.OperateRange(OperationId.InvalidateCache, 0, long.MaxValue); + if (rc.IsFailure()) return rc.Miss(); - return (1 + GetNodeL2Count(nodeSize, entrySize, entryCount)) * nodeSize; + rc = _entryStorage.OperateRange(OperationId.InvalidateCache, 0, long.MaxValue); + if (rc.IsFailure()) return rc.Miss(); + + _offsetCache.IsInitialized = false; + return Result.Success; } - public static long QueryEntryStorageSize(long nodeSize, long entrySize, int entryCount) + private Result EnsureOffsetCache() { - if (entryCount <= 0) - return 0; - - return GetEntrySetCount(nodeSize, entrySize, entryCount) * nodeSize; - } - - private static int GetEntryCount(long nodeSize, long entrySize) - { - return (int)((nodeSize - 16) / entrySize); - } - - private static int GetOffsetCount(long nodeSize) - { - return (int)((nodeSize - 16) / sizeof(long)); - } - - private static int GetEntrySetCount(long nodeSize, long entrySize, int entryCount) - { - int entryCountPerNode = GetEntryCount(nodeSize, entrySize); - uint divisor = (uint)entryCountPerNode; - return (int)(((uint)entryCount + divisor - 1) / divisor); - } - - public static int GetNodeL2Count(long nodeSize, long entrySize, int entryCount) - { - int offsetCountPerNode = GetOffsetCount(nodeSize); - int entrySetCount = GetEntrySetCount(nodeSize, entrySize, entryCount); - - if (entrySetCount <= offsetCountPerNode) - return 0; - - uint divisor1 = (uint)offsetCountPerNode; - int nodeL2Count = (int)(((uint)entrySetCount + divisor1 - 1) / divisor1); - Assert.SdkLessEqual(nodeL2Count, offsetCountPerNode); - - uint divisor = (uint)offsetCountPerNode; - return (int)(((uint)(entrySetCount - (offsetCountPerNode - (nodeL2Count - 1))) + divisor - 1) / divisor); - } - - private static long GetBucketTreeEntryOffset(long entrySetOffset, long entrySize, int entryIndex) - { - return entrySetOffset + Unsafe.SizeOf() + entryIndex * entrySize; - } - - private static long GetBucketTreeEntryOffset(int entrySetIndex, long nodeSize, long entrySize, int entryIndex) - { - return GetBucketTreeEntryOffset(entrySetIndex * nodeSize, entrySize, entryIndex); - } - - private bool IsExistL2() => OffsetCount < EntrySetCount; - private bool IsExistOffsetL2OnL1() => IsExistL2() && _nodeL1.GetHeader().Count < OffsetCount; - - private long GetEntrySetIndex(int nodeIndex, int offsetIndex) - { - return (OffsetCount - _nodeL1.GetHeader().Count) + (OffsetCount * nodeIndex) + offsetIndex; - } - - public struct Header - { - public uint Magic; - public uint Version; - public int EntryCount; -#pragma warning disable 414 - private int _reserved; -#pragma warning restore 414 - - public void Format(int entryCount) - { - Magic = ExpectedMagic; - Version = MaxVersion; - EntryCount = entryCount; - _reserved = 0; - } - - public Result Verify() - { - if (Magic != ExpectedMagic) - return ResultFs.InvalidBucketTreeSignature.Log(); - - if (EntryCount < 0) - return ResultFs.InvalidBucketTreeEntryCount.Log(); - - if (Version > MaxVersion) - return ResultFs.UnsupportedVersion.Log(); - + if (_offsetCache.IsInitialized) return Result.Success; + + using ScopedLock lk = ScopedLock.Lock(ref _offsetCache.Mutex); + + if (_offsetCache.IsInitialized) + return Result.Success; + + Result rc = _nodeStorage.Read(0, _nodeL1.GetBuffer()); + if (rc.IsFailure()) return rc.Miss(); + + rc = _nodeL1.GetHeader().Verify(0, _nodeSize, sizeof(long)); + if (rc.IsFailure()) return rc.Miss(); + + BucketTreeNode node = _nodeL1.GetNode(); + + long startOffset; + if (_offsetCount < _entrySetCount && node.GetCount() < _offsetCount) + { + startOffset = node.GetL2BeginOffset(); } + else + { + startOffset = node.GetBeginOffset(); + } + + if (startOffset < 0 || startOffset > node.GetBeginOffset()) + return ResultFs.InvalidBucketTreeEntryOffset.Log(); + + long endOffset = node.GetEndOffset(); + + if (startOffset >= endOffset) + return ResultFs.InvalidBucketTreeEntryOffset.Log(); + + _offsetCache.IsInitialized = true; + _offsetCache.Offsets.StartOffset = startOffset; + _offsetCache.Offsets.EndOffset = endOffset; + + return Result.Success; } - public struct NodeHeader + private bool IsExistL2() => _offsetCount < _entrySetCount; + private bool IsExistOffsetL2OnL1() => IsExistL2() && _nodeL1.GetHeader().EntryCount < _offsetCount; + + private int GetEntrySetIndex(int nodeIndex, int offsetIndex) { - public int Index; - public int Count; - public long Offset; + return (_offsetCount - _nodeL1.GetHeader().EntryCount) + (_offsetCount * nodeIndex) + offsetIndex; + } - public Result Verify(int nodeIndex, long nodeSize, long entrySize) + private Result ScanContinuousReading(out ContinuousReadingInfo info, + in ContinuousReadingParam param) where TEntry : unmanaged, IContinuousReadingEntry + { + Assert.SdkRequires(IsInitialized()); + Assert.Equal(Unsafe.SizeOf(), _entrySize); + + info = new ContinuousReadingInfo(); + + // If there's nothing to read, we're done. + if (param.Size == 0) + return Result.Success; + + // If we're reading a fragment, we're done. + // IsFragment() is a readonly function, but we can't specify that on interfaces + // so cast the readonly params to non-readonly + if (Unsafe.AsRef(in param.Entry).IsFragment()) + return Result.Success; + + // Validate the first entry. + TEntry entry = param.Entry; + long currentOffset = param.Offset; + + if (entry.GetVirtualOffset() > currentOffset) + return ResultFs.OutOfRange.Log(); + + // Create a pooled buffer for our scan. + var pool = new PooledBuffer((int)_nodeSize, 1); + var buffer = Span.Empty; + + Result rc = _entryStorage.GetSize(out long entryStorageSize); + if (rc.IsFailure()) return rc.Miss(); + + // Read the node. + if (_nodeSize <= pool.GetSize()) { - if (Index != nodeIndex) - return ResultFs.InvalidBucketTreeNodeIndex.Log(); + buffer = pool.GetBuffer(); + long ofs = param.EntrySet.Index * _nodeSize; - if (entrySize == 0 || nodeSize < entrySize + NodeHeaderSize) - return ResultFs.InvalidSize.Log(); - - long maxEntryCount = (nodeSize - NodeHeaderSize) / entrySize; - - if (Count <= 0 || maxEntryCount < Count) + if (_nodeSize + ofs > entryStorageSize) return ResultFs.InvalidBucketTreeNodeEntryCount.Log(); - if (Offset < 0) - return ResultFs.InvalidBucketTreeNodeOffset.Log(); - - return Result.Success; - } - } - - private struct NodeBuffer - { - // Use long to ensure alignment - private long[] _header; - - public bool Allocate(int nodeSize) - { - Assert.SdkRequiresNull(_header); - - _header = new long[nodeSize / sizeof(long)]; - - return _header != null; + rc = _entryStorage.Read(ofs, buffer.Slice(0, (int)_nodeSize)); + if (rc.IsFailure()) return rc.Miss(); } - public void Free() - { - _header = null; - } + // Calculate extents. + long endOffset = param.Size + currentOffset; + long physicalOffset = entry.GetPhysicalOffset(); - public void FillZero() + // Start merge tracking. + long mergeSize = 0; + long readableSize = 0; + bool merged = false; + + // Iterate. + int entryIndex = param.EntryIndex; + int entryCount = param.EntrySet.EntryCount; + + while (entryIndex < entryCount) { - if (_header != null) + // If we're past the end, we're done. + if (endOffset <= currentOffset) + break; + + // Validate the entry offset. + long entryOffset = entry.GetVirtualOffset(); + if (entryOffset > currentOffset) + return ResultFs.InvalidIndirectEntryOffset.Log(); + + // Get the next entry. + TEntry nextEntry = default; + long nextEntryOffset; + + if (entryIndex + 1 < entryCount) { - Array.Fill(_header, 0); + if (buffer.IsEmpty) + { + long ofs = GetBucketTreeEntryOffset(param.EntrySet.Index, _nodeSize, _entrySize, entryIndex + 1); + + if (_entrySize + ofs > entryStorageSize) + return ResultFs.InvalidBucketTreeEntryOffset.Log(); + + rc = _entryStorage.Read(ofs, SpanHelpers.AsByteSpan(ref nextEntry)); + if (rc.IsFailure()) return rc.Miss(); + } + else + { + long ofs = GetBucketTreeEntryOffset(0, _entrySize, entryIndex + 1); + buffer.Slice((int)ofs, (int)_entrySize).CopyTo(SpanHelpers.AsByteSpan(ref nextEntry)); + } + + nextEntryOffset = nextEntry.GetVirtualOffset(); + + if (!param.TreeOffsets.IsInclude(nextEntryOffset)) + return ResultFs.InvalidIndirectEntryOffset.Log(); } + else + { + nextEntryOffset = param.EntrySet.OffsetEnd; + } + + // Validate the next entry offset. + if (currentOffset >= nextEntryOffset) + return ResultFs.InvalidIndirectEntryOffset.Log(); + + // Determine the much data there is. + long dataSize = nextEntryOffset - currentOffset; + Assert.SdkLess(0, dataSize); + + // Determine how much data we should read. + long remainingSize = endOffset - currentOffset; + long readSize = Math.Min(remainingSize, dataSize); + Assert.SdkLessEqual(readSize, param.Size); + + // Update our merge tracking. + if (entry.IsFragment()) + { + // If we can't merge, stop looping. + if (readSize >= entry.FragmentSizeMax || remainingSize <= dataSize) + break; + + // Otherwise, add the current size to the merge size. + mergeSize += readSize; + } + else + { + // If we can't merge, stop looping. + if (physicalOffset != entry.GetPhysicalOffset()) + break; + + // Add the size to the readable amount. + readableSize += readSize + mergeSize; + Assert.SdkLessEqual(readableSize, param.Size); + + // Update whether we've merged. + merged |= mergeSize > 0; + mergeSize = 0; + } + + // Advance. + currentOffset += readSize; + Assert.SdkLessEqual(currentOffset, endOffset); + + physicalOffset += nextEntryOffset - entryOffset; + entry = nextEntry; + entryIndex++; } - public ref NodeHeader GetHeader() + // If we merged, set our readable size. + if (merged) { - Assert.SdkRequiresGreaterEqual(_header.Length * sizeof(long), Unsafe.SizeOf()); - - return ref Unsafe.As(ref _header[0]); + info.SetReadSize(readableSize); } - public Span GetBuffer() - { - return MemoryMarshal.AsBytes(_header.AsSpan()); - } + info.SetSkipCount(entryIndex - param.EntryIndex); - public BucketTreeNode GetNode() where TEntry : unmanaged - { - return new BucketTreeNode(GetBuffer()); - } - } - - public readonly ref struct BucketTreeNode where TEntry : unmanaged - { - private readonly Span _buffer; - - public BucketTreeNode(Span buffer) - { - _buffer = buffer; - - Assert.SdkRequiresGreaterEqual(_buffer.Length, Unsafe.SizeOf()); - Assert.SdkRequiresGreaterEqual(_buffer.Length, - Unsafe.SizeOf() + GetHeader().Count * Unsafe.SizeOf()); - } - - public int GetCount() => GetHeader().Count; - - public ReadOnlySpan GetArray() => GetWritableArray(); - internal Span GetWritableArray() => GetWritableArray(); - - public long GetBeginOffset() => GetArray()[0]; - public long GetEndOffset() => GetHeader().Offset; - public long GetL2BeginOffset() => GetArray()[GetCount()]; - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public ReadOnlySpan GetArray() where TElement : unmanaged - { - return GetWritableArray(); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private Span GetWritableArray() where TElement : unmanaged - { - return MemoryMarshal.Cast(_buffer.Slice(Unsafe.SizeOf())); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal ref NodeHeader GetHeader() - { - return ref Unsafe.As(ref MemoryMarshal.GetReference(_buffer)); - } + return Result.Success; } public ref struct Visitor { - private BucketTree Tree { get; set; } - private byte[] Entry { get; set; } - private int EntryIndex { get; set; } - private int EntrySetCount { get; set; } + private BucketTree _tree; + private Offsets _treeOffsets; + private Buffer _entry; + private int _entryIndex; + private int _entrySetCount; private EntrySetHeader _entrySet; [StructLayout(LayoutKind.Explicit)] @@ -371,167 +861,109 @@ public partial class BucketTree } } - public Result Initialize(BucketTree tree) + public Visitor() { - Assert.SdkRequiresNotNull(tree); - Assert.SdkRequires(Tree == null || tree == Tree); - - if (Entry == null) - { - Entry = ArrayPool.Shared.Rent((int)tree.EntrySize); - Tree = tree; - EntryIndex = -1; - } - - return Result.Success; + _tree = null; + _entry = Buffer.Empty; + _treeOffsets = default; + _entryIndex = -1; + _entrySetCount = 0; + _entrySet = new EntrySetHeader(); } public void Dispose() { - if (Entry != null) + if (!_entry.IsNull) { - ArrayPool.Shared.Return(Entry); - Entry = null; + _tree.GetAllocator().Deallocate(ref _entry); + _tree = null; + _entry = Buffer.Empty; } } - public bool IsValid() => EntryIndex >= 0; - - public bool CanMoveNext() + /// + /// Returns a writable reference to this . + /// + /// This property allows using a expression with s + /// while still being able to pass it by reference. + /// A reference to this . + public ref Visitor Ref { - return IsValid() && (EntryIndex + 1 < _entrySet.Info.Count || _entrySet.Info.Index + 1 < EntrySetCount); - } - - public bool CanMovePrevious() - { - return IsValid() && (EntryIndex > 0 || _entrySet.Info.Index > 0); - } - - public ref T Get() where T : unmanaged - { - return ref MemoryMarshal.Cast(Entry)[0]; - } - - public Result MoveNext() - { - Result rc; - - if (!IsValid()) - return ResultFs.OutOfRange.Log(); - - int entryIndex = EntryIndex + 1; - - // Invalidate our index, and read the header for the next index. - if (entryIndex == _entrySet.Info.Count) + get { - int entrySetIndex = _entrySet.Info.Index + 1; - if (entrySetIndex >= EntrySetCount) - return ResultFs.OutOfRange.Log(); - - EntryIndex = -1; - - long end = _entrySet.Info.End; - - long entrySetSize = Tree.NodeSize; - long entrySetOffset = entrySetIndex * entrySetSize; - - rc = Tree.EntryStorage.Read(entrySetOffset, SpanHelpers.AsByteSpan(ref _entrySet)); - if (rc.IsFailure()) return rc; - - rc = _entrySet.Header.Verify(entrySetIndex, entrySetSize, Tree.EntrySize); - if (rc.IsFailure()) return rc; - - if (_entrySet.Info.Start != end || _entrySet.Info.Start >= _entrySet.Info.End) - return ResultFs.InvalidBucketTreeEntrySetOffset.Log(); - - entryIndex = 0; + InlineIL.IL.Emit.Ldarg_0(); + InlineIL.IL.Emit.Ret(); + throw InlineIL.IL.Unreachable(); } - else + } + + internal Result Initialize(BucketTree tree, in Offsets offsets) + { + Assert.SdkRequiresNotNull(tree); + Assert.SdkRequires(_tree == null || tree == _tree); + + if (_entry.IsNull) { - EntryIndex = 1; + _entry = tree.GetAllocator().Allocate(tree._entrySize, BufferAlignment); + if (_entry.IsNull) + return ResultFs.BufferAllocationFailed.Log(); + + _tree = tree; + _treeOffsets = offsets; } - // Read the new entry - long entrySize = Tree.EntrySize; - long entryOffset = GetBucketTreeEntryOffset(_entrySet.Info.Index, Tree.NodeSize, entrySize, entryIndex); - - rc = Tree.EntryStorage.Read(entryOffset, Entry); - if (rc.IsFailure()) return rc; - - // Note that we changed index. - EntryIndex = entryIndex; return Result.Success; } - public Result MovePrevious() + public readonly bool IsValid() => _entryIndex >= 0; + + public readonly Offsets GetTreeOffsets() => _treeOffsets; + + public readonly bool CanMoveNext() { - Result rc; - - if (!IsValid()) - return ResultFs.OutOfRange.Log(); - - int entryIndex = EntryIndex; - - if (entryIndex == 0) - { - if (_entrySet.Info.Index <= 0) - return ResultFs.OutOfRange.Log(); - - EntryIndex = -1; - - long start = _entrySet.Info.Start; - - long entrySetSize = Tree.NodeSize; - int entrySetIndex = _entrySet.Info.Index - 1; - long entrySetOffset = entrySetIndex * entrySetSize; - - rc = Tree.EntryStorage.Read(entrySetOffset, SpanHelpers.AsByteSpan(ref _entrySet)); - if (rc.IsFailure()) return rc; - - rc = _entrySet.Header.Verify(entrySetIndex, entrySetSize, Tree.EntrySize); - if (rc.IsFailure()) return rc; - - if (_entrySet.Info.End != start || _entrySet.Info.Start >= _entrySet.Info.End) - return ResultFs.InvalidBucketTreeEntrySetOffset.Log(); - - entryIndex = _entrySet.Info.Count; - } - else - { - EntryIndex = -1; - } - - // Read the new entry - long entrySize = Tree.EntrySize; - long entryOffset = GetBucketTreeEntryOffset(_entrySet.Info.Index, Tree.NodeSize, entrySize, entryIndex); - - rc = Tree.EntryStorage.Read(entryOffset, Entry); - if (rc.IsFailure()) return rc; - - // Note that we changed index. - EntryIndex = entryIndex; - return Result.Success; + return IsValid() && (_entryIndex + 1 < _entrySet.Info.Count || _entrySet.Info.Index + 1 < _entrySetCount); } - public Result Find(long virtualAddress) + public readonly bool CanMovePrevious() { + return IsValid() && (_entryIndex > 0 || _entrySet.Info.Index > 0); + } + + public readonly ref readonly T Get() where T : unmanaged + { + Assert.SdkRequires(IsValid()); + + return ref MemoryMarshal.Cast(_entry.Span)[0]; + } + + internal Result Find(long virtualAddress) + { + Assert.SdkRequiresNotNull(_tree); + Result rc; - // Get the node. - BucketTreeNode node = Tree._nodeL1.GetNode(); + // Get the L1 node. + BucketTreeNode nodeL1 = _tree._nodeL1.GetNode(); - if (virtualAddress >= node.GetEndOffset()) + if (virtualAddress >= nodeL1.GetEndOffset()) return ResultFs.OutOfRange.Log(); int entrySetIndex; - if (Tree.IsExistOffsetL2OnL1() && virtualAddress < node.GetBeginOffset()) + if (_tree.IsExistOffsetL2OnL1() && virtualAddress < nodeL1.GetBeginOffset()) { // The portion of the L2 offsets containing our target offset is stored in the L1 node - ReadOnlySpan offsets = node.GetArray().Slice(node.GetCount()); + ReadOnlySpan offsets = nodeL1.GetArray().Slice(nodeL1.GetCount()); + + // Find the index of the entry containing the requested offset. + // If the value is not found, BinarySearch will return the bitwise complement of the + // index of the first element that is larger than the value. + // The offsets are the start offsets of each entry, so subtracting 1 from the index of + // the next-largest value will get us the index of the entry containing the offset. int index = offsets.BinarySearch(virtualAddress); if (index < 0) index = (~index) - 1; + // If the requested offset comes before the first offset in the list, "index" will be -1. if (index < 0) return ResultFs.OutOfRange.Log(); @@ -539,16 +971,16 @@ public partial class BucketTree } else { - ReadOnlySpan offsets = node.GetArray().Slice(0, node.GetCount()); + ReadOnlySpan offsets = nodeL1.GetArray().Slice(0, nodeL1.GetCount()); int index = offsets.BinarySearch(virtualAddress); if (index < 0) index = (~index) - 1; if (index < 0) return ResultFs.OutOfRange.Log(); - if (Tree.IsExistL2()) + if (_tree.IsExistL2()) { - if (index >= Tree.OffsetCount) + if (index >= _tree._offsetCount) return ResultFs.InvalidBucketTreeNodeOffset.Log(); rc = FindEntrySet(out entrySetIndex, virtualAddress, index); @@ -561,7 +993,7 @@ public partial class BucketTree } // Validate the entry set index. - if (entrySetIndex < 0 || entrySetIndex >= Tree.EntrySetCount) + if (entrySetIndex < 0 || entrySetIndex >= _tree._entrySetCount) return ResultFs.InvalidBucketTreeNodeOffset.Log(); // Find the entry. @@ -569,29 +1001,36 @@ public partial class BucketTree if (rc.IsFailure()) return rc; // Set count. - EntrySetCount = Tree.EntrySetCount; + _entrySetCount = _tree._entrySetCount; return Result.Success; } private Result FindEntrySet(out int entrySetIndex, long virtualAddress, int nodeIndex) { - long nodeSize = Tree.NodeSize; + long nodeSize = _tree._nodeSize; - using (var rented = new RentedArray((int)nodeSize)) + using var pool = new PooledBuffer((int)nodeSize, 1); + + if (nodeSize <= pool.GetSize()) { - return FindEntrySetWithBuffer(out entrySetIndex, virtualAddress, nodeIndex, rented.Span); + return FindEntrySetWithBuffer(out entrySetIndex, virtualAddress, nodeIndex, pool.GetBuffer()); + } + else + { + pool.Deallocate(); + return FindEntrySetWithoutBuffer(out entrySetIndex, virtualAddress, nodeIndex); } } - private Result FindEntrySetWithBuffer(out int outIndex, long virtualAddress, int nodeIndex, + private Result FindEntrySetWithBuffer(out int entrySetIndex, long virtualAddress, int nodeIndex, Span buffer) { - UnsafeHelpers.SkipParamInit(out outIndex); + UnsafeHelpers.SkipParamInit(out entrySetIndex); // Calculate node extents. - long nodeSize = Tree.NodeSize; + long nodeSize = _tree._nodeSize; long nodeOffset = (nodeIndex + 1) * nodeSize; - SubStorage storage = Tree.NodeStorage; + ref ValueSubStorage storage = ref _tree._nodeStorage; // Read the node. Result rc = storage.Read(nodeOffset, buffer.Slice(0, (int)nodeSize)); @@ -603,34 +1042,71 @@ public partial class BucketTree if (rc.IsFailure()) return rc; // Create the node and find. - var node = new StorageNode(sizeof(long), header.Count); + var node = new StorageNode(sizeof(long), header.EntryCount); node.Find(buffer, virtualAddress); if (node.GetIndex() < 0) return ResultFs.InvalidBucketTreeVirtualOffset.Log(); // Return the index. - outIndex = (int)Tree.GetEntrySetIndex(header.Index, node.GetIndex()); + entrySetIndex = _tree.GetEntrySetIndex(header.Index, node.GetIndex()); + return Result.Success; + } + + private Result FindEntrySetWithoutBuffer(out int outIndex, long virtualAddress, int nodeIndex) + { + UnsafeHelpers.SkipParamInit(out outIndex); + + // Calculate node extents. + long nodeSize = _tree._nodeSize; + long nodeOffset = nodeSize * (nodeIndex + 1); + ref ValueSubStorage storage = ref _tree._nodeStorage; + + // Read and validate the header. + Unsafe.SkipInit(out NodeHeader header); + Result rc = storage.Read(nodeOffset, SpanHelpers.AsByteSpan(ref header)); + if (rc.IsFailure()) return rc.Miss(); + + rc = header.Verify(nodeIndex, nodeSize, sizeof(long)); + if (rc.IsFailure()) return rc.Miss(); + + // Create the node, and find. + var node = new StorageNode(nodeOffset, sizeof(long), header.EntryCount); + rc = node.Find(in storage, virtualAddress); + if (rc.IsFailure()) return rc.Miss(); + + if (node.GetIndex() < 0) + return ResultFs.InvalidBucketTreeVirtualOffset.Log(); + + // Return the index. + outIndex = _tree.GetEntrySetIndex(header.Index, node.GetIndex()); return Result.Success; } private Result FindEntry(long virtualAddress, int entrySetIndex) { - long entrySetSize = Tree.NodeSize; + long entrySetSize = _tree._nodeSize; - using (var rented = new RentedArray((int)entrySetSize)) + using var pool = new PooledBuffer((int)entrySetSize, 1); + + if (entrySetSize <= pool.GetSize()) { - return FindEntryWithBuffer(virtualAddress, entrySetIndex, rented.Span); + return FindEntryWithBuffer(virtualAddress, entrySetIndex, pool.GetBuffer()); + } + else + { + pool.Deallocate(); + return FindEntryWithoutBuffer(virtualAddress, entrySetIndex); } } private Result FindEntryWithBuffer(long virtualAddress, int entrySetIndex, Span buffer) { // Calculate entry set extents. - long entrySize = Tree.EntrySize; - long entrySetSize = Tree.NodeSize; + long entrySize = _tree._entrySize; + long entrySetSize = _tree._nodeSize; long entrySetOffset = entrySetIndex * entrySetSize; - SubStorage storage = Tree.EntryStorage; + ref ValueSubStorage storage = ref _tree._entryStorage; // Read the entry set. Result rc = storage.Read(entrySetOffset, buffer.Slice(0, (int)entrySetSize)); @@ -651,85 +1127,172 @@ public partial class BucketTree // Copy the data into entry. int entryIndex = node.GetIndex(); long entryOffset = GetBucketTreeEntryOffset(0, entrySize, entryIndex); - buffer.Slice((int)entryOffset, (int)entrySize).CopyTo(Entry); + buffer.Slice((int)entryOffset, (int)entrySize).CopyTo(_entry.Span); // Set our entry set/index. _entrySet = entrySet; - EntryIndex = entryIndex; + _entryIndex = entryIndex; return Result.Success; } - private struct StorageNode + private Result FindEntryWithoutBuffer(long virtualAddress, int entrySetIndex) { - private Offset _start; - private int _count; - private int _index; + // Calculate entry set extents. + long entrySize = _tree._entrySize; + long entrySetSize = _tree._nodeSize; + long entrySetOffset = entrySetSize * entrySetIndex; + ref ValueSubStorage storage = ref _tree._entryStorage; - public StorageNode(long size, int count) + // Read and validate the entry set. + Unsafe.SkipInit(out EntrySetHeader entrySet); + Result rc = storage.Read(entrySetOffset, SpanHelpers.AsByteSpan(ref entrySet)); + if (rc.IsFailure()) return rc.Miss(); + + rc = entrySet.Header.Verify(entrySetIndex, entrySetSize, entrySize); + if (rc.IsFailure()) return rc.Miss(); + + // Create the node, and find. + var node = new StorageNode(entrySetOffset, entrySize, entrySet.Info.Count); + rc = node.Find(in storage, virtualAddress); + if (rc.IsFailure()) return rc.Miss(); + + if (node.GetIndex() < 0) + return ResultFs.InvalidBucketTreeVirtualOffset.Log(); + + // Copy the data into entry. + _entryIndex = -1; + int entryIndex = node.GetIndex(); + long entryOffset = GetBucketTreeEntryOffset(entrySetOffset, entrySize, entryIndex); + + rc = storage.Read(entryOffset, _entry.Span); + if (rc.IsFailure()) return rc.Miss(); + + // Set our entry set/index. + _entrySet = entrySet; + _entryIndex = entryIndex; + + return Result.Success; + } + + public Result MoveNext() + { + Result rc; + + if (!IsValid()) + return ResultFs.OutOfRange.Log(); + + int entryIndex = _entryIndex + 1; + + // Invalidate our index, and read the header for the next index. + if (entryIndex == _entrySet.Info.Count) { - _start = new Offset(NodeHeaderSize, (int)size); - _count = count; - _index = -1; + int entrySetIndex = _entrySet.Info.Index + 1; + if (entrySetIndex >= _entrySetCount) + return ResultFs.OutOfRange.Log(); + + _entryIndex = -1; + + long end = _entrySet.Info.End; + + long entrySetSize = _tree._nodeSize; + long entrySetOffset = entrySetIndex * entrySetSize; + + rc = _tree._entryStorage.Read(entrySetOffset, SpanHelpers.AsByteSpan(ref _entrySet)); + if (rc.IsFailure()) return rc; + + rc = _entrySet.Header.Verify(entrySetIndex, entrySetSize, _tree._entrySize); + if (rc.IsFailure()) return rc; + + if (_entrySet.Info.Start != end || _entrySet.Info.Start >= _entrySet.Info.End) + return ResultFs.InvalidBucketTreeEntrySetOffset.Log(); + + entryIndex = 0; + } + else + { + _entryIndex = 1; } - public int GetIndex() => _index; + // Read the new entry + long entrySize = _tree._entrySize; + long entryOffset = GetBucketTreeEntryOffset(_entrySet.Info.Index, _tree._nodeSize, entrySize, entryIndex); - public void Find(ReadOnlySpan buffer, long virtualAddress) + rc = _tree._entryStorage.Read(entryOffset, _entry.Span); + if (rc.IsFailure()) return rc; + + // Note that we changed index. + _entryIndex = entryIndex; + return Result.Success; + } + + public Result MovePrevious() + { + Result rc; + + if (!IsValid()) + return ResultFs.OutOfRange.Log(); + + int entryIndex = _entryIndex; + + if (entryIndex == 0) { - int end = _count; - Offset pos = _start; + if (_entrySet.Info.Index <= 0) + return ResultFs.OutOfRange.Log(); - while (end > 0) - { - int half = end / 2; - Offset mid = pos + half; + _entryIndex = -1; - long offset = BinaryPrimitives.ReadInt64LittleEndian(buffer.Slice((int)mid.Get())); + long start = _entrySet.Info.Start; - if (offset <= virtualAddress) - { - pos = mid + 1; - end -= half + 1; - } - else - { - end = half; - } - } + long entrySetSize = _tree._nodeSize; + int entrySetIndex = _entrySet.Info.Index - 1; + long entrySetOffset = entrySetIndex * entrySetSize; - _index = (int)(pos - _start) - 1; + rc = _tree._entryStorage.Read(entrySetOffset, SpanHelpers.AsByteSpan(ref _entrySet)); + if (rc.IsFailure()) return rc; + + rc = _entrySet.Header.Verify(entrySetIndex, entrySetSize, _tree._entrySize); + if (rc.IsFailure()) return rc; + + if (_entrySet.Info.End != start || _entrySet.Info.Start >= _entrySet.Info.End) + return ResultFs.InvalidBucketTreeEntrySetOffset.Log(); + + entryIndex = _entrySet.Info.Count; + } + else + { + _entryIndex = -1; } - private readonly struct Offset + entryIndex--; + + // Read the new entry + long entrySize = _tree._entrySize; + long entryOffset = GetBucketTreeEntryOffset(_entrySet.Info.Index, _tree._nodeSize, entrySize, entryIndex); + + rc = _tree._entryStorage.Read(entryOffset, _entry.Span); + if (rc.IsFailure()) return rc; + + // Note that we changed index. + _entryIndex = entryIndex; + return Result.Success; + } + + public readonly Result ScanContinuousReading(out ContinuousReadingInfo info, long offset, long size) + where TEntry : unmanaged, IContinuousReadingEntry + { + var param = new ContinuousReadingParam { - private readonly long _offset; - private readonly int _stride; + Offset = offset, + Size = size, + EntrySet = _entrySet.Header, + EntryIndex = _entryIndex, + TreeOffsets = _treeOffsets + }; - public Offset(long offset, int stride) - { - _offset = offset; - _stride = stride; - } + _entry.Span.CopyTo(SpanHelpers.AsByteSpan(ref param.Entry)); - public long Get() => _offset; - - public static Offset operator ++(Offset left) => left + 1; - public static Offset operator --(Offset left) => left - 1; - - public static Offset operator +(Offset left, long right) => new Offset(left._offset + right * left._stride, left._stride); - public static Offset operator -(Offset left, long right) => new Offset(left._offset - right * left._stride, left._stride); - - public static long operator -(Offset left, Offset right) => - (left._offset - right._offset) / left._stride; - - public static bool operator ==(Offset left, Offset right) => left._offset == right._offset; - public static bool operator !=(Offset left, Offset right) => left._offset != right._offset; - - public bool Equals(Offset other) => _offset == other._offset; - public override bool Equals(object obj) => obj is Offset other && Equals(other); - public override int GetHashCode() => _offset.GetHashCode(); - } + return _tree.ScanContinuousReading(out info, in param); } } } diff --git a/src/LibHac/FsSystem/BucketTree2.cs b/src/LibHac/FsSystem/BucketTree2.cs deleted file mode 100644 index 1d36e7fa..00000000 --- a/src/LibHac/FsSystem/BucketTree2.cs +++ /dev/null @@ -1,1298 +0,0 @@ -using System; -using System.Buffers.Binary; -using System.Runtime.CompilerServices; -using System.Runtime.InteropServices; -using LibHac.Common; -using LibHac.Diag; -using LibHac.Fs; -using LibHac.Os; -using LibHac.Util; -using Buffer = LibHac.Mem.Buffer; - -namespace LibHac.FsSystem; - -/// -/// Allows searching and iterating the entries in a bucket tree data structure. -/// -/// Based on FS 13.1.0 (nnSdk 13.4.0) -public partial class BucketTree2 : IDisposable -{ - private const uint ExpectedMagic = 0x52544B42; // BKTR - private const int MaxVersion = 1; - - private const int NodeSizeMin = 1024; - private const int NodeSizeMax = 1024 * 512; - - private static readonly int BufferAlignment = sizeof(long); - - private static int NodeHeaderSize => Unsafe.SizeOf(); - - private ValueSubStorage _nodeStorage; - private ValueSubStorage _entryStorage; - - private NodeBuffer _nodeL1; - - private long _nodeSize; - private long _entrySize; - private int _entryCount; - private int _offsetCount; - private int _entrySetCount; - private OffsetCache _offsetCache; - - public struct ContinuousReadingInfo - { - private long _readSize; - private int _skipCount; - private bool _isDone; - - public readonly bool CanDo() => _readSize != 0; - public bool CheckNeedScan() => --_skipCount <= 0; - public readonly bool IsDone() => _isDone; - - public void Done() - { - _readSize = 0; - _isDone = true; - } - - public readonly long GetReadSize() => _readSize; - public void SetReadSize(long readSize) => _readSize = readSize; - - public void Reset() - { - _readSize = 0; - _skipCount = 0; - _isDone = false; - } - - public void SetSkipCount(int count) - { - Assert.SdkRequiresGreaterEqual(count, 0); - - _skipCount = count; - } - } - - public interface IContinuousReadingEntry - { - int FragmentSizeMax { get; } - - long GetVirtualOffset(); - long GetPhysicalOffset(); - bool IsFragment(); - } - - private struct ContinuousReadingParam where TEntry : unmanaged, IContinuousReadingEntry - { - public long Offset; - public long Size; - public NodeHeader EntrySet; - public int EntryIndex; - public Offsets TreeOffsets; - public TEntry Entry; - } - - public struct NodeHeader - { - public int Index; - public int EntryCount; - public long OffsetEnd; - - public Result Verify(int nodeIndex, long nodeSize, long entrySize) - { - if (Index != nodeIndex) - return ResultFs.InvalidBucketTreeNodeIndex.Log(); - - if (entrySize == 0 || nodeSize < entrySize + NodeHeaderSize) - return ResultFs.InvalidSize.Log(); - - long maxEntryCount = (nodeSize - NodeHeaderSize) / entrySize; - - if (EntryCount <= 0 || maxEntryCount < EntryCount) - return ResultFs.InvalidBucketTreeNodeEntryCount.Log(); - - if (OffsetEnd < 0) - return ResultFs.InvalidBucketTreeNodeOffset.Log(); - - return Result.Success; - } - } - - [NonCopyable] - private struct NodeBuffer : IDisposable - { - private MemoryResource _allocator; - private Buffer _header; - - public void Dispose() - { - Assert.SdkAssert(_header.IsNull); - } - - public readonly MemoryResource GetAllocator() => _allocator; - - public bool Allocate(MemoryResource allocator, int nodeSize) - { - Assert.SdkRequires(_header.IsNull); - - _allocator = allocator; - _header = allocator.Allocate(nodeSize, BufferAlignment); - - return !_header.IsNull; - } - - public void Free() - { - if (!_header.IsNull) - { - _allocator.Deallocate(ref _header, BufferAlignment); - _header = Buffer.Empty; - } - - _allocator = null; - } - - public void FillZero() - { - if (!_header.IsNull) - { - _header.Span.Clear(); - } - } - - public readonly ref NodeHeader GetHeader() - { - Assert.SdkRequiresGreaterEqual(_header.Length * sizeof(long), Unsafe.SizeOf()); - - return ref Unsafe.As(ref _header.Span[0]); - } - - public readonly Span GetBuffer() - { - return _header.Span; - } - - public readonly BucketTreeNode GetNode() where TEntry : unmanaged - { - return new BucketTreeNode(GetBuffer()); - } - } - - private struct StorageNode - { - private Offset _start; - private int _count; - private int _index; - - public StorageNode(long offset, long size, int count) - { - _start = new Offset(offset + NodeHeaderSize, (int)size); - _count = count; - _index = -1; - } - - public StorageNode(long size, int count) - { - _start = new Offset(NodeHeaderSize, (int)size); - _count = count; - _index = -1; - } - - public readonly int GetIndex() => _index; - - public void Find(ReadOnlySpan buffer, long virtualAddress) - { - int end = _count; - Offset pos = _start; - - while (end > 0) - { - int half = end / 2; - Offset mid = pos + half; - - long offset = BinaryPrimitives.ReadInt64LittleEndian(buffer.Slice((int)mid.Get())); - - if (offset <= virtualAddress) - { - pos = mid + 1; - end -= half + 1; - } - else - { - end = half; - } - } - - _index = (int)(pos - _start) - 1; - } - - public Result Find(in ValueSubStorage storage, long virtualAddress) - { - int end = _count; - Offset pos = _start; - - while (end > 0) - { - int half = end / 2; - Offset mid = pos + half; - - long offset = 0; - Result rc = storage.Read(mid.Get(), SpanHelpers.AsByteSpan(ref offset)); - if (rc.IsFailure()) return rc.Miss(); - - if (offset <= virtualAddress) - { - pos = mid + 1; - end -= half + 1; - } - else - { - end = half; - } - } - - _index = (int)(pos - _start) - 1; - return Result.Success; - } - - private readonly struct Offset - { - private readonly long _offset; - private readonly int _stride; - - public Offset(long offset, int stride) - { - _offset = offset; - _stride = stride; - } - - public long Get() => _offset; - - public static Offset operator ++(Offset left) => left + 1; - public static Offset operator --(Offset left) => left - 1; - - public static Offset operator +(Offset left, long right) => new Offset(left._offset + right * left._stride, left._stride); - public static Offset operator -(Offset left, long right) => new Offset(left._offset - right * left._stride, left._stride); - - public static long operator -(Offset left, Offset right) => - (left._offset - right._offset) / left._stride; - - public static bool operator ==(Offset left, Offset right) => left._offset == right._offset; - public static bool operator !=(Offset left, Offset right) => left._offset != right._offset; - - public bool Equals(Offset other) => _offset == other._offset; - public override bool Equals(object obj) => obj is Offset other && Equals(other); - public override int GetHashCode() => _offset.GetHashCode(); - } - } - - private struct OffsetCache - { - public OffsetCache() - { - Mutex = new SdkMutexType(); - Mutex.Initialize(); - IsInitialized = false; - Offsets.StartOffset = -1; - Offsets.EndOffset = -1; - } - - public bool IsInitialized; - public Offsets Offsets; - public SdkMutexType Mutex; - } - - public struct Offsets - { - public long StartOffset; - public long EndOffset; - - public readonly bool IsInclude(long offset) - { - return StartOffset <= offset && offset < EndOffset; - } - - public readonly bool IsInclude(long offset, long size) - { - return size > 0 && StartOffset <= offset && size <= EndOffset - offset; - } - } - - public struct Header - { - public uint Magic; - public uint Version; - public int EntryCount; -#pragma warning disable 414 - private int _reserved; -#pragma warning restore 414 - - public void Format(int entryCount) - { - Assert.SdkRequiresLessEqual(0, entryCount); - - Magic = ExpectedMagic; - Version = MaxVersion; - EntryCount = entryCount; - _reserved = 0; - } - - public readonly Result Verify() - { - if (Magic != ExpectedMagic) - return ResultFs.InvalidBucketTreeSignature.Log(); - - if (EntryCount < 0) - return ResultFs.InvalidBucketTreeEntryCount.Log(); - - if (Version > MaxVersion) - return ResultFs.UnsupportedVersion.Log(); - - return Result.Success; - } - } - - public readonly ref struct BucketTreeNode where TEntry : unmanaged - { - private readonly Span _buffer; - - public BucketTreeNode(Span buffer) - { - _buffer = buffer; - - Assert.SdkRequiresGreaterEqual(_buffer.Length, Unsafe.SizeOf()); - Assert.SdkRequiresGreaterEqual(_buffer.Length, - Unsafe.SizeOf() + GetHeader().EntryCount * Unsafe.SizeOf()); - } - - public int GetCount() => GetHeader().EntryCount; - - public ReadOnlySpan GetArray() => GetWritableArray(); - internal Span GetWritableArray() => GetWritableArray(); - - public long GetBeginOffset() => GetArray()[0]; - public long GetEndOffset() => GetHeader().OffsetEnd; - public long GetL2BeginOffset() => GetArray()[GetCount()]; - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public ReadOnlySpan GetArray() where TElement : unmanaged - { - return GetWritableArray(); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private Span GetWritableArray() where TElement : unmanaged - { - return MemoryMarshal.Cast(_buffer.Slice(Unsafe.SizeOf())); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal ref NodeHeader GetHeader() - { - return ref Unsafe.As(ref MemoryMarshal.GetReference(_buffer)); - } - } - - private static int GetEntryCount(long nodeSize, long entrySize) - { - return (int)((nodeSize - Unsafe.SizeOf()) / entrySize); - } - - private static int GetOffsetCount(long nodeSize) - { - return (int)((nodeSize - Unsafe.SizeOf()) / sizeof(long)); - } - - private static int GetEntrySetCount(long nodeSize, long entrySize, int entryCount) - { - int entryCountPerNode = GetEntryCount(nodeSize, entrySize); - return BitUtil.DivideUp(entryCount, entryCountPerNode); - } - - private static int GetNodeL2Count(long nodeSize, long entrySize, int entryCount) - { - int offsetCountPerNode = GetOffsetCount(nodeSize); - int entrySetCount = GetEntrySetCount(nodeSize, entrySize, entryCount); - - if (entrySetCount <= offsetCountPerNode) - return 0; - - int nodeL2Count = BitUtil.DivideUp(entrySetCount, offsetCountPerNode); - Abort.DoAbortUnless(nodeL2Count <= offsetCountPerNode); - - return BitUtil.DivideUp(entrySetCount - (offsetCountPerNode - (nodeL2Count - 1)), offsetCountPerNode); - } - - public static int QueryHeaderStorageSize() => Unsafe.SizeOf
(); - - public static long QueryNodeStorageSize(long nodeSize, long entrySize, int entryCount) - { - Assert.SdkRequiresLessEqual(sizeof(long), entrySize); - Assert.SdkRequiresLessEqual(entrySize + Unsafe.SizeOf(), nodeSize); - Assert.SdkRequiresWithinMinMax(nodeSize, NodeSizeMin, NodeSizeMax); - Assert.SdkRequires(BitUtil.IsPowerOfTwo(nodeSize)); - Assert.SdkRequiresLessEqual(0, entryCount); - - if (entryCount <= 0) - return 0; - - return (1 + GetNodeL2Count(nodeSize, entrySize, entryCount)) * nodeSize; - } - - public static long QueryEntryStorageSize(long nodeSize, long entrySize, int entryCount) - { - Assert.SdkRequiresLessEqual(sizeof(long), entrySize); - Assert.SdkRequiresLessEqual(entrySize + Unsafe.SizeOf(), nodeSize); - Assert.SdkRequiresWithinMinMax(nodeSize, NodeSizeMin, NodeSizeMax); - Assert.SdkRequires(BitUtil.IsPowerOfTwo(nodeSize)); - Assert.SdkRequiresLessEqual(0, entryCount); - - if (entryCount <= 0) - return 0; - - return GetEntrySetCount(nodeSize, entrySize, entryCount) * nodeSize; - } - - private static long GetBucketTreeEntryOffset(long entrySetOffset, long entrySize, int entryIndex) - { - return entrySetOffset + Unsafe.SizeOf() + entryIndex * entrySize; - } - - private static long GetBucketTreeEntryOffset(int entrySetIndex, long nodeSize, long entrySize, int entryIndex) - { - return GetBucketTreeEntryOffset(entrySetIndex * nodeSize, entrySize, entryIndex); - } - - public BucketTree2() - { - _offsetCache = new OffsetCache(); - } - - public void Dispose() - { - FinalizeObject(); - _nodeL1.Dispose(); - } - - public MemoryResource GetAllocator() => _nodeL1.GetAllocator(); - - public int GetEntryCount() => _entryCount; - - public Result GetOffsets(out Offsets offsets) - { - UnsafeHelpers.SkipParamInit(out offsets); - - Result rc = EnsureOffsetCache(); - if (rc.IsFailure()) return rc.Miss(); - - offsets = _offsetCache.Offsets; - return Result.Success; - } - - public Result Initialize(MemoryResource allocator, in ValueSubStorage nodeStorage, in ValueSubStorage entryStorage, - int nodeSize, int entrySize, int entryCount) - { - Assert.SdkRequiresNotNull(allocator); - Assert.SdkRequiresLessEqual(sizeof(long), entrySize); - Assert.SdkRequiresLessEqual(entrySize + Unsafe.SizeOf(), nodeSize); - Assert.SdkRequiresWithinMinMax(nodeSize, NodeSizeMin, NodeSizeMax); - Assert.SdkRequires(BitUtil.IsPowerOfTwo(nodeSize)); - Assert.SdkRequires(!IsInitialized()); - - // Ensure valid entry count. - if (entryCount <= 0) - return ResultFs.InvalidArgument.Log(); - - // Allocate node. - if (!_nodeL1.Allocate(allocator, nodeSize)) - return ResultFs.BufferAllocationFailed.Log(); - - bool needFree = true; - try - { - // Read node. - Result rc = nodeStorage.Read(0, _nodeL1.GetBuffer()); - if (rc.IsFailure()) return rc; - - // Verify node. - rc = _nodeL1.GetHeader().Verify(0, nodeSize, sizeof(long)); - if (rc.IsFailure()) return rc; - - // Validate offsets. - int offsetCount = GetOffsetCount(nodeSize); - int entrySetCount = GetEntrySetCount(nodeSize, entrySize, entryCount); - BucketTreeNode node = _nodeL1.GetNode(); - - long startOffset; - if (offsetCount < entrySetCount && node.GetCount() < offsetCount) - { - startOffset = node.GetL2BeginOffset(); - } - else - { - startOffset = node.GetBeginOffset(); - } - - long endOffset = node.GetEndOffset(); - - if (startOffset < 0 || startOffset > node.GetBeginOffset() || startOffset >= endOffset) - return ResultFs.InvalidBucketTreeEntryOffset.Log(); - - _nodeStorage.Set(in nodeStorage); - _entryStorage.Set(in entryStorage); - _nodeSize = nodeSize; - _entrySize = entrySize; - _offsetCount = offsetCount; - _entrySetCount = entrySetCount; - _offsetCache.IsInitialized = true; - _offsetCache.Offsets.StartOffset = startOffset; - _offsetCache.Offsets.EndOffset = endOffset; - - needFree = false; - - return Result.Success; - } - finally - { - if (needFree) - _nodeL1.Free(); - } - } - - public void Initialize(long nodeSize, long endOffset) - { - Assert.SdkRequiresWithinMinMax(nodeSize, NodeSizeMin, NodeSizeMax); - Assert.SdkRequires(BitUtil.IsPowerOfTwo(nodeSize)); - Assert.SdkRequiresLessEqual(0, endOffset); - Assert.SdkRequires(!IsInitialized()); - - _nodeSize = nodeSize; - _offsetCache.IsInitialized = true; - _offsetCache.Offsets.StartOffset = 0; - _offsetCache.Offsets.EndOffset = endOffset; - } - - public void FinalizeObject() - { - if (IsInitialized()) - { - _nodeStorage.Dispose(); - _entryStorage.Dispose(); - - _nodeL1.Free(); - - _nodeSize = 0; - _entrySize = 0; - _entryCount = 0; - _offsetCount = 0; - _entrySetCount = 0; - _offsetCache.IsInitialized = false; - _offsetCache.Offsets = default; - } - } - - public bool IsInitialized() => _nodeSize > 0; - public bool IsEmpty() => _entrySize == 0; - - public Result Find(ref Visitor visitor, long virtualAddress) - { - Assert.SdkRequires(IsInitialized()); - - if (virtualAddress < 0) - return ResultFs.InvalidOffset.Log(); - - if (IsEmpty()) - return ResultFs.OutOfRange.Log(); - - Result rc = GetOffsets(out Offsets offsets); - if (rc.IsFailure()) return rc.Miss(); - - rc = visitor.Initialize(this, in offsets); - if (rc.IsFailure()) return rc.Miss(); - - return visitor.Find(virtualAddress); - } - - public Result InvalidateCache() - { - Result rc = _nodeStorage.OperateRange(OperationId.InvalidateCache, 0, long.MaxValue); - if (rc.IsFailure()) return rc.Miss(); - - rc = _entryStorage.OperateRange(OperationId.InvalidateCache, 0, long.MaxValue); - if (rc.IsFailure()) return rc.Miss(); - - _offsetCache.IsInitialized = false; - return Result.Success; - } - - private Result EnsureOffsetCache() - { - if (_offsetCache.IsInitialized) - return Result.Success; - - using ScopedLock lk = ScopedLock.Lock(ref _offsetCache.Mutex); - - if (_offsetCache.IsInitialized) - return Result.Success; - - Result rc = _nodeStorage.Read(0, _nodeL1.GetBuffer()); - if (rc.IsFailure()) return rc.Miss(); - - rc = _nodeL1.GetHeader().Verify(0, _nodeSize, sizeof(long)); - if (rc.IsFailure()) return rc.Miss(); - - BucketTreeNode node = _nodeL1.GetNode(); - - long startOffset; - if (_offsetCount < _entrySetCount && node.GetCount() < _offsetCount) - { - startOffset = node.GetL2BeginOffset(); - } - else - { - startOffset = node.GetBeginOffset(); - } - - if (startOffset < 0 || startOffset > node.GetBeginOffset()) - return ResultFs.InvalidBucketTreeEntryOffset.Log(); - - long endOffset = node.GetEndOffset(); - - if (startOffset >= endOffset) - return ResultFs.InvalidBucketTreeEntryOffset.Log(); - - _offsetCache.IsInitialized = true; - _offsetCache.Offsets.StartOffset = startOffset; - _offsetCache.Offsets.EndOffset = endOffset; - - return Result.Success; - } - - private bool IsExistL2() => _offsetCount < _entrySetCount; - private bool IsExistOffsetL2OnL1() => IsExistL2() && _nodeL1.GetHeader().EntryCount < _offsetCount; - - private int GetEntrySetIndex(int nodeIndex, int offsetIndex) - { - return (_offsetCount - _nodeL1.GetHeader().EntryCount) + (_offsetCount * nodeIndex) + offsetIndex; - } - - private Result ScanContinuousReading(out ContinuousReadingInfo info, - in ContinuousReadingParam param) where TEntry : unmanaged, IContinuousReadingEntry - { - Assert.SdkRequires(IsInitialized()); - Assert.Equal(Unsafe.SizeOf(), _entrySize); - - info = new ContinuousReadingInfo(); - - // If there's nothing to read, we're done. - if (param.Size == 0) - return Result.Success; - - // If we're reading a fragment, we're done. - // IsFragment() is a readonly function, but we can't specify that on interfaces - // so cast the readonly params to non-readonly - if (Unsafe.AsRef(in param.Entry).IsFragment()) - return Result.Success; - - // Validate the first entry. - TEntry entry = param.Entry; - long currentOffset = param.Offset; - - if (entry.GetVirtualOffset() > currentOffset) - return ResultFs.OutOfRange.Log(); - - // Create a pooled buffer for our scan. - var pool = new PooledBuffer((int)_nodeSize, 1); - var buffer = Span.Empty; - - Result rc = _entryStorage.GetSize(out long entryStorageSize); - if (rc.IsFailure()) return rc.Miss(); - - // Read the node. - if (_nodeSize <= pool.GetSize()) - { - buffer = pool.GetBuffer(); - long ofs = param.EntrySet.Index * _nodeSize; - - if (_nodeSize + ofs > entryStorageSize) - return ResultFs.InvalidBucketTreeNodeEntryCount.Log(); - - rc = _entryStorage.Read(ofs, buffer.Slice(0, (int)_nodeSize)); - if (rc.IsFailure()) return rc.Miss(); - } - - // Calculate extents. - long endOffset = param.Size + currentOffset; - long physicalOffset = entry.GetPhysicalOffset(); - - // Start merge tracking. - long mergeSize = 0; - long readableSize = 0; - bool merged = false; - - // Iterate. - int entryIndex = param.EntryIndex; - int entryCount = param.EntrySet.EntryCount; - - while (entryIndex < entryCount) - { - // If we're past the end, we're done. - if (endOffset <= currentOffset) - break; - - // Validate the entry offset. - long entryOffset = entry.GetVirtualOffset(); - if (entryOffset > currentOffset) - return ResultFs.InvalidIndirectEntryOffset.Log(); - - // Get the next entry. - TEntry nextEntry = default; - long nextEntryOffset; - - if (entryIndex + 1 < entryCount) - { - if (buffer.IsEmpty) - { - long ofs = GetBucketTreeEntryOffset(param.EntrySet.Index, _nodeSize, _entrySize, entryIndex + 1); - - if (_entrySize + ofs > entryStorageSize) - return ResultFs.InvalidBucketTreeEntryOffset.Log(); - - rc = _entryStorage.Read(ofs, SpanHelpers.AsByteSpan(ref nextEntry)); - if (rc.IsFailure()) return rc.Miss(); - } - else - { - long ofs = GetBucketTreeEntryOffset(0, _entrySize, entryIndex + 1); - buffer.Slice((int)ofs, (int)_entrySize).CopyTo(SpanHelpers.AsByteSpan(ref nextEntry)); - } - - nextEntryOffset = nextEntry.GetVirtualOffset(); - - if (!param.TreeOffsets.IsInclude(nextEntryOffset)) - return ResultFs.InvalidIndirectEntryOffset.Log(); - } - else - { - nextEntryOffset = param.EntrySet.OffsetEnd; - } - - // Validate the next entry offset. - if (currentOffset >= nextEntryOffset) - return ResultFs.InvalidIndirectEntryOffset.Log(); - - // Determine the much data there is. - long dataSize = nextEntryOffset - currentOffset; - Assert.SdkLess(0, dataSize); - - // Determine how much data we should read. - long remainingSize = endOffset - currentOffset; - long readSize = Math.Min(remainingSize, dataSize); - Assert.SdkLessEqual(readSize, param.Size); - - // Update our merge tracking. - if (entry.IsFragment()) - { - // If we can't merge, stop looping. - if (readSize >= entry.FragmentSizeMax || remainingSize <= dataSize) - break; - - // Otherwise, add the current size to the merge size. - mergeSize += readSize; - } - else - { - // If we can't merge, stop looping. - if (physicalOffset != entry.GetPhysicalOffset()) - break; - - // Add the size to the readable amount. - readableSize += readSize + mergeSize; - Assert.SdkLessEqual(readableSize, param.Size); - - // Update whether we've merged. - merged |= mergeSize > 0; - mergeSize = 0; - } - - // Advance. - currentOffset += readSize; - Assert.SdkLessEqual(currentOffset, endOffset); - - physicalOffset += nextEntryOffset - entryOffset; - entry = nextEntry; - entryIndex++; - } - - // If we merged, set our readable size. - if (merged) - { - info.SetReadSize(readableSize); - } - - info.SetSkipCount(entryIndex - param.EntryIndex); - - return Result.Success; - } - - public ref struct Visitor - { - private BucketTree2 _tree; - private Offsets _treeOffsets; - private Buffer _entry; - private int _entryIndex; - private int _entrySetCount; - private EntrySetHeader _entrySet; - - [StructLayout(LayoutKind.Explicit)] - private struct EntrySetHeader - { - // ReSharper disable once MemberHidesStaticFromOuterClass - [FieldOffset(0)] public NodeHeader Header; - [FieldOffset(0)] public EntrySetInfo Info; - - [StructLayout(LayoutKind.Sequential)] - public struct EntrySetInfo - { - public int Index; - public int Count; - public long End; - public long Start; - } - } - - public Visitor() - { - _tree = null; - _entry = Buffer.Empty; - _treeOffsets = default; - _entryIndex = -1; - _entrySetCount = 0; - _entrySet = new EntrySetHeader(); - } - - public void Dispose() - { - if (!_entry.IsNull) - { - _tree.GetAllocator().Deallocate(ref _entry); - _tree = null; - _entry = Buffer.Empty; - } - } - - /// - /// Returns a writable reference to this . - /// - /// This property allows using a expression with s - /// while still being able to pass it by reference. - /// A reference to this . - public ref Visitor Ref - { - get - { - InlineIL.IL.Emit.Ldarg_0(); - InlineIL.IL.Emit.Ret(); - throw InlineIL.IL.Unreachable(); - } - } - - internal Result Initialize(BucketTree2 tree, in Offsets offsets) - { - Assert.SdkRequiresNotNull(tree); - Assert.SdkRequires(_tree == null || tree == _tree); - - if (_entry.IsNull) - { - _entry = tree.GetAllocator().Allocate(tree._entrySize, BufferAlignment); - if (_entry.IsNull) - return ResultFs.BufferAllocationFailed.Log(); - - _tree = tree; - _treeOffsets = offsets; - } - - return Result.Success; - } - - public readonly bool IsValid() => _entryIndex >= 0; - - public readonly Offsets GetTreeOffsets() => _treeOffsets; - - public readonly bool CanMoveNext() - { - return IsValid() && (_entryIndex + 1 < _entrySet.Info.Count || _entrySet.Info.Index + 1 < _entrySetCount); - } - - public readonly bool CanMovePrevious() - { - return IsValid() && (_entryIndex > 0 || _entrySet.Info.Index > 0); - } - - public readonly ref readonly T Get() where T : unmanaged - { - Assert.SdkRequires(IsValid()); - - return ref MemoryMarshal.Cast(_entry.Span)[0]; - } - - internal Result Find(long virtualAddress) - { - Assert.SdkRequiresNotNull(_tree); - - Result rc; - - // Get the L1 node. - BucketTreeNode nodeL1 = _tree._nodeL1.GetNode(); - - if (virtualAddress >= nodeL1.GetEndOffset()) - return ResultFs.OutOfRange.Log(); - - int entrySetIndex; - - if (_tree.IsExistOffsetL2OnL1() && virtualAddress < nodeL1.GetBeginOffset()) - { - // The portion of the L2 offsets containing our target offset is stored in the L1 node - ReadOnlySpan offsets = nodeL1.GetArray().Slice(nodeL1.GetCount()); - - // Find the index of the entry containing the requested offset. - // If the value is not found, BinarySearch will return the bitwise complement of the - // index of the first element that is larger than the value. - // The offsets are the start offsets of each entry, so subtracting 1 from the index of - // the next-largest value will get us the index of the entry containing the offset. - int index = offsets.BinarySearch(virtualAddress); - if (index < 0) index = (~index) - 1; - - // If the requested offset comes before the first offset in the list, "index" will be -1. - if (index < 0) - return ResultFs.OutOfRange.Log(); - - entrySetIndex = index; - } - else - { - ReadOnlySpan offsets = nodeL1.GetArray().Slice(0, nodeL1.GetCount()); - int index = offsets.BinarySearch(virtualAddress); - if (index < 0) index = (~index) - 1; - - if (index < 0) - return ResultFs.OutOfRange.Log(); - - if (_tree.IsExistL2()) - { - if (index >= _tree._offsetCount) - return ResultFs.InvalidBucketTreeNodeOffset.Log(); - - rc = FindEntrySet(out entrySetIndex, virtualAddress, index); - if (rc.IsFailure()) return rc; - } - else - { - entrySetIndex = index; - } - } - - // Validate the entry set index. - if (entrySetIndex < 0 || entrySetIndex >= _tree._entrySetCount) - return ResultFs.InvalidBucketTreeNodeOffset.Log(); - - // Find the entry. - rc = FindEntry(virtualAddress, entrySetIndex); - if (rc.IsFailure()) return rc; - - // Set count. - _entrySetCount = _tree._entrySetCount; - return Result.Success; - } - - private Result FindEntrySet(out int entrySetIndex, long virtualAddress, int nodeIndex) - { - long nodeSize = _tree._nodeSize; - - using var pool = new PooledBuffer((int)nodeSize, 1); - - if (nodeSize <= pool.GetSize()) - { - return FindEntrySetWithBuffer(out entrySetIndex, virtualAddress, nodeIndex, pool.GetBuffer()); - } - else - { - pool.Deallocate(); - return FindEntrySetWithoutBuffer(out entrySetIndex, virtualAddress, nodeIndex); - } - } - - private Result FindEntrySetWithBuffer(out int entrySetIndex, long virtualAddress, int nodeIndex, - Span buffer) - { - UnsafeHelpers.SkipParamInit(out entrySetIndex); - - // Calculate node extents. - long nodeSize = _tree._nodeSize; - long nodeOffset = (nodeIndex + 1) * nodeSize; - ref ValueSubStorage storage = ref _tree._nodeStorage; - - // Read the node. - Result rc = storage.Read(nodeOffset, buffer.Slice(0, (int)nodeSize)); - if (rc.IsFailure()) return rc; - - // Validate the header. - NodeHeader header = MemoryMarshal.Cast(buffer)[0]; - rc = header.Verify(nodeIndex, nodeSize, sizeof(long)); - if (rc.IsFailure()) return rc; - - // Create the node and find. - var node = new StorageNode(sizeof(long), header.EntryCount); - node.Find(buffer, virtualAddress); - - if (node.GetIndex() < 0) - return ResultFs.InvalidBucketTreeVirtualOffset.Log(); - - // Return the index. - entrySetIndex = _tree.GetEntrySetIndex(header.Index, node.GetIndex()); - return Result.Success; - } - - private Result FindEntrySetWithoutBuffer(out int outIndex, long virtualAddress, int nodeIndex) - { - UnsafeHelpers.SkipParamInit(out outIndex); - - // Calculate node extents. - long nodeSize = _tree._nodeSize; - long nodeOffset = nodeSize * (nodeIndex + 1); - ref ValueSubStorage storage = ref _tree._nodeStorage; - - // Read and validate the header. - Unsafe.SkipInit(out NodeHeader header); - Result rc = storage.Read(nodeOffset, SpanHelpers.AsByteSpan(ref header)); - if (rc.IsFailure()) return rc.Miss(); - - rc = header.Verify(nodeIndex, nodeSize, sizeof(long)); - if (rc.IsFailure()) return rc.Miss(); - - // Create the node, and find. - var node = new StorageNode(nodeOffset, sizeof(long), header.EntryCount); - rc = node.Find(in storage, virtualAddress); - if (rc.IsFailure()) return rc.Miss(); - - if (node.GetIndex() < 0) - return ResultFs.InvalidBucketTreeVirtualOffset.Log(); - - // Return the index. - outIndex = _tree.GetEntrySetIndex(header.Index, node.GetIndex()); - return Result.Success; - } - - private Result FindEntry(long virtualAddress, int entrySetIndex) - { - long entrySetSize = _tree._nodeSize; - - using var pool = new PooledBuffer((int)entrySetSize, 1); - - if (entrySetSize <= pool.GetSize()) - { - return FindEntryWithBuffer(virtualAddress, entrySetIndex, pool.GetBuffer()); - } - else - { - pool.Deallocate(); - return FindEntryWithoutBuffer(virtualAddress, entrySetIndex); - } - } - - private Result FindEntryWithBuffer(long virtualAddress, int entrySetIndex, Span buffer) - { - // Calculate entry set extents. - long entrySize = _tree._entrySize; - long entrySetSize = _tree._nodeSize; - long entrySetOffset = entrySetIndex * entrySetSize; - ref ValueSubStorage storage = ref _tree._entryStorage; - - // Read the entry set. - Result rc = storage.Read(entrySetOffset, buffer.Slice(0, (int)entrySetSize)); - if (rc.IsFailure()) return rc; - - // Validate the entry set. - EntrySetHeader entrySet = MemoryMarshal.Cast(buffer)[0]; - rc = entrySet.Header.Verify(entrySetIndex, entrySetSize, entrySize); - if (rc.IsFailure()) return rc; - - // Create the node, and find. - var node = new StorageNode(entrySize, entrySet.Info.Count); - node.Find(buffer, virtualAddress); - - if (node.GetIndex() < 0) - return ResultFs.InvalidBucketTreeVirtualOffset.Log(); - - // Copy the data into entry. - int entryIndex = node.GetIndex(); - long entryOffset = GetBucketTreeEntryOffset(0, entrySize, entryIndex); - buffer.Slice((int)entryOffset, (int)entrySize).CopyTo(_entry.Span); - - // Set our entry set/index. - _entrySet = entrySet; - _entryIndex = entryIndex; - - return Result.Success; - } - - private Result FindEntryWithoutBuffer(long virtualAddress, int entrySetIndex) - { - // Calculate entry set extents. - long entrySize = _tree._entrySize; - long entrySetSize = _tree._nodeSize; - long entrySetOffset = entrySetSize * entrySetIndex; - ref ValueSubStorage storage = ref _tree._entryStorage; - - // Read and validate the entry set. - Unsafe.SkipInit(out EntrySetHeader entrySet); - Result rc = storage.Read(entrySetOffset, SpanHelpers.AsByteSpan(ref entrySet)); - if (rc.IsFailure()) return rc.Miss(); - - rc = entrySet.Header.Verify(entrySetIndex, entrySetSize, entrySize); - if (rc.IsFailure()) return rc.Miss(); - - // Create the node, and find. - var node = new StorageNode(entrySetOffset, entrySize, entrySet.Info.Count); - rc = node.Find(in storage, virtualAddress); - if (rc.IsFailure()) return rc.Miss(); - - if (node.GetIndex() < 0) - return ResultFs.InvalidBucketTreeVirtualOffset.Log(); - - // Copy the data into entry. - _entryIndex = -1; - int entryIndex = node.GetIndex(); - long entryOffset = GetBucketTreeEntryOffset(entrySetOffset, entrySize, entryIndex); - - rc = storage.Read(entryOffset, _entry.Span); - if (rc.IsFailure()) return rc.Miss(); - - // Set our entry set/index. - _entrySet = entrySet; - _entryIndex = entryIndex; - - return Result.Success; - } - - public Result MoveNext() - { - Result rc; - - if (!IsValid()) - return ResultFs.OutOfRange.Log(); - - int entryIndex = _entryIndex + 1; - - // Invalidate our index, and read the header for the next index. - if (entryIndex == _entrySet.Info.Count) - { - int entrySetIndex = _entrySet.Info.Index + 1; - if (entrySetIndex >= _entrySetCount) - return ResultFs.OutOfRange.Log(); - - _entryIndex = -1; - - long end = _entrySet.Info.End; - - long entrySetSize = _tree._nodeSize; - long entrySetOffset = entrySetIndex * entrySetSize; - - rc = _tree._entryStorage.Read(entrySetOffset, SpanHelpers.AsByteSpan(ref _entrySet)); - if (rc.IsFailure()) return rc; - - rc = _entrySet.Header.Verify(entrySetIndex, entrySetSize, _tree._entrySize); - if (rc.IsFailure()) return rc; - - if (_entrySet.Info.Start != end || _entrySet.Info.Start >= _entrySet.Info.End) - return ResultFs.InvalidBucketTreeEntrySetOffset.Log(); - - entryIndex = 0; - } - else - { - _entryIndex = 1; - } - - // Read the new entry - long entrySize = _tree._entrySize; - long entryOffset = GetBucketTreeEntryOffset(_entrySet.Info.Index, _tree._nodeSize, entrySize, entryIndex); - - rc = _tree._entryStorage.Read(entryOffset, _entry.Span); - if (rc.IsFailure()) return rc; - - // Note that we changed index. - _entryIndex = entryIndex; - return Result.Success; - } - - public Result MovePrevious() - { - Result rc; - - if (!IsValid()) - return ResultFs.OutOfRange.Log(); - - int entryIndex = _entryIndex; - - if (entryIndex == 0) - { - if (_entrySet.Info.Index <= 0) - return ResultFs.OutOfRange.Log(); - - _entryIndex = -1; - - long start = _entrySet.Info.Start; - - long entrySetSize = _tree._nodeSize; - int entrySetIndex = _entrySet.Info.Index - 1; - long entrySetOffset = entrySetIndex * entrySetSize; - - rc = _tree._entryStorage.Read(entrySetOffset, SpanHelpers.AsByteSpan(ref _entrySet)); - if (rc.IsFailure()) return rc; - - rc = _entrySet.Header.Verify(entrySetIndex, entrySetSize, _tree._entrySize); - if (rc.IsFailure()) return rc; - - if (_entrySet.Info.End != start || _entrySet.Info.Start >= _entrySet.Info.End) - return ResultFs.InvalidBucketTreeEntrySetOffset.Log(); - - entryIndex = _entrySet.Info.Count; - } - else - { - _entryIndex = -1; - } - - entryIndex--; - - // Read the new entry - long entrySize = _tree._entrySize; - long entryOffset = GetBucketTreeEntryOffset(_entrySet.Info.Index, _tree._nodeSize, entrySize, entryIndex); - - rc = _tree._entryStorage.Read(entryOffset, _entry.Span); - if (rc.IsFailure()) return rc; - - // Note that we changed index. - _entryIndex = entryIndex; - return Result.Success; - } - - public readonly Result ScanContinuousReading(out ContinuousReadingInfo info, long offset, long size) - where TEntry : unmanaged, IContinuousReadingEntry - { - var param = new ContinuousReadingParam - { - Offset = offset, - Size = size, - EntrySet = _entrySet.Header, - EntryIndex = _entryIndex, - TreeOffsets = _treeOffsets - }; - - _entry.Span.CopyTo(SpanHelpers.AsByteSpan(ref param.Entry)); - - return _tree.ScanContinuousReading(out info, in param); - } - } -} diff --git a/src/LibHac/FsSystem/BucketTreeBuilder.cs b/src/LibHac/FsSystem/BucketTreeBuilder.cs index d66438b3..84c49e1c 100644 --- a/src/LibHac/FsSystem/BucketTreeBuilder.cs +++ b/src/LibHac/FsSystem/BucketTreeBuilder.cs @@ -12,52 +12,57 @@ public partial class BucketTree { public class Builder { - private SubStorage NodeStorage { get; set; } - private SubStorage EntryStorage { get; set; } - private NodeBuffer _l1Node; private NodeBuffer _l2Node; private NodeBuffer _entrySet; - private int NodeSize { get; set; } - private int EntrySize { get; set; } - private int EntryCount { get; set; } - private int EntriesPerEntrySet { get; set; } - private int OffsetsPerNode { get; set; } + private ValueSubStorage _nodeStorage; + private ValueSubStorage _entryStorage; - private int CurrentL2OffsetIndex { get; set; } - private int CurrentEntryIndex { get; set; } - private long CurrentOffset { get; set; } = -1; + private int _nodeSize; + private int _entrySize; + private int _entryCount; + private int _entriesPerEntrySet; + private int _offsetsPerNode; + + private int _currentL2OffsetIndex; + private int _currentEntryIndex; + private long _currentOffset; + + public Builder() + { + _currentOffset = -1; + } /// /// Initializes the bucket tree builder. /// - /// The the tree's header will be written to.Must be at least the size in bytes returned by . - /// The the tree's nodes will be written to. Must be at least the size in bytes returned by . - /// The the tree's entries will be written to. Must be at least the size in bytes returned by . + /// The to use for buffer allocation. + /// The the tree's header will be written to.Must be at least the size in bytes returned by . + /// The the tree's nodes will be written to. Must be at least the size in bytes returned by . + /// The the tree's entries will be written to. Must be at least the size in bytes returned by . /// The size of each node in the bucket tree. Must be a power of 2. /// The size of each entry that will be stored in the bucket tree. /// The exact number of entries that will be added to the bucket tree. /// The of the operation. - public Result Initialize(SubStorage headerStorage, SubStorage nodeStorage, SubStorage entryStorage, - int nodeSize, int entrySize, int entryCount) + public Result Initialize(MemoryResource allocator, in ValueSubStorage headerStorage, + in ValueSubStorage nodeStorage, in ValueSubStorage entryStorage, int nodeSize, int entrySize, + int entryCount) { + Assert.NotNull(allocator); Assert.SdkRequiresLessEqual(sizeof(long), entrySize); Assert.SdkRequiresLessEqual(entrySize + Unsafe.SizeOf(), nodeSize); Assert.SdkRequiresWithinMinMax(nodeSize, NodeSizeMin, NodeSizeMax); Assert.SdkRequires(BitUtil.IsPowerOfTwo(nodeSize)); - if (headerStorage is null || nodeStorage is null || entryStorage is null) - return ResultFs.NullptrArgument.Log(); - // Set the builder parameters - NodeSize = nodeSize; - EntrySize = entrySize; - EntryCount = entryCount; + _nodeSize = nodeSize; + _entrySize = entrySize; + _entryCount = entryCount; - EntriesPerEntrySet = GetEntryCount(nodeSize, entrySize); - OffsetsPerNode = GetOffsetCount(nodeSize); - CurrentL2OffsetIndex = GetNodeL2Count(nodeSize, entrySize, entryCount); + _entriesPerEntrySet = GetEntryCount(nodeSize, entrySize); + _offsetsPerNode = GetOffsetCount(nodeSize); + _currentL2OffsetIndex = GetNodeL2Count(nodeSize, entrySize, entryCount); // Create and write the header var header = new Header(); @@ -66,27 +71,27 @@ public partial class BucketTree if (rc.IsFailure()) return rc; // Allocate buffers for the L1 node and entry sets - _l1Node.Allocate(nodeSize); - _entrySet.Allocate(nodeSize); + _l1Node.Allocate(allocator, nodeSize); + _entrySet.Allocate(allocator, nodeSize); int entrySetCount = GetEntrySetCount(nodeSize, entrySize, entryCount); // Allocate an L2 node buffer if there are more entry sets than will fit in the L1 node - if (OffsetsPerNode < entrySetCount) + if (_offsetsPerNode < entrySetCount) { - _l2Node.Allocate(nodeSize); + _l2Node.Allocate(allocator, nodeSize); } _l1Node.FillZero(); _l2Node.FillZero(); _entrySet.FillZero(); - NodeStorage = nodeStorage; - EntryStorage = entryStorage; + _nodeStorage.Set(in nodeStorage); + _entryStorage.Set(in entryStorage); // Set the initial position - CurrentEntryIndex = 0; - CurrentOffset = -1; + _currentEntryIndex = 0; + _currentOffset = -1; return Result.Success; } @@ -97,17 +102,17 @@ public partial class BucketTree /// The type of the entry to add. Added entries should all be the same type. /// The entry to add. /// The of the operation. - public Result Add(ref T entry) where T : unmanaged + public Result Add(in T entry) where T : unmanaged { - Assert.SdkRequiresEqual(Unsafe.SizeOf(), EntrySize); + Assert.SdkRequiresEqual(Unsafe.SizeOf(), _entrySize); - if (CurrentEntryIndex >= EntryCount) + if (_currentEntryIndex >= _entryCount) return ResultFs.OutOfRange.Log(); // The entry offset must always be the first 8 bytes of the struct - long entryOffset = BinaryPrimitives.ReadInt64LittleEndian(SpanHelpers.AsByteSpan(ref entry)); + long entryOffset = BinaryPrimitives.ReadInt64LittleEndian(SpanHelpers.AsReadOnlyByteSpan(in entry)); - if (entryOffset <= CurrentOffset) + if (entryOffset <= _currentOffset) return ResultFs.InvalidOffset.Log(); Result rc = FinalizePreviousEntrySet(entryOffset); @@ -116,11 +121,11 @@ public partial class BucketTree AddEntryOffset(entryOffset); // Write the new entry - int indexInEntrySet = CurrentEntryIndex % EntriesPerEntrySet; + int indexInEntrySet = _currentEntryIndex % _entriesPerEntrySet; _entrySet.GetNode().GetWritableArray()[indexInEntrySet] = entry; - CurrentOffset = entryOffset; - CurrentEntryIndex++; + _currentOffset = entryOffset; + _currentEntryIndex++; return Result.Success; } @@ -133,32 +138,32 @@ public partial class BucketTree /// The of the operation. private Result FinalizePreviousEntrySet(long endOffset) { - int prevEntrySetIndex = CurrentEntryIndex / EntriesPerEntrySet - 1; - int indexInEntrySet = CurrentEntryIndex % EntriesPerEntrySet; + int prevEntrySetIndex = _currentEntryIndex / _entriesPerEntrySet - 1; + int indexInEntrySet = _currentEntryIndex % _entriesPerEntrySet; // If the previous Add finished an entry set - if (CurrentEntryIndex > 0 && indexInEntrySet == 0) + if (_currentEntryIndex > 0 && indexInEntrySet == 0) { // Set the end offset of that entry set ref NodeHeader entrySetHeader = ref _entrySet.GetHeader(); entrySetHeader.Index = prevEntrySetIndex; - entrySetHeader.Count = EntriesPerEntrySet; - entrySetHeader.Offset = endOffset; + entrySetHeader.EntryCount = _entriesPerEntrySet; + entrySetHeader.OffsetEnd = endOffset; // Write the entry set to the entry storage - long storageOffset = (long)NodeSize * prevEntrySetIndex; - Result rc = EntryStorage.Write(storageOffset, _entrySet.GetBuffer()); + long storageOffset = (long)_nodeSize * prevEntrySetIndex; + Result rc = _entryStorage.Write(storageOffset, _entrySet.GetBuffer()); if (rc.IsFailure()) return rc; // Clear the entry set buffer to begin the new entry set _entrySet.FillZero(); // Check if we're writing in L2 nodes - if (CurrentL2OffsetIndex > OffsetsPerNode) + if (_currentL2OffsetIndex > _offsetsPerNode) { - int prevL2NodeIndex = CurrentL2OffsetIndex / OffsetsPerNode - 2; - int indexInL2Node = CurrentL2OffsetIndex % OffsetsPerNode; + int prevL2NodeIndex = _currentL2OffsetIndex / _offsetsPerNode - 2; + int indexInL2Node = _currentL2OffsetIndex % _offsetsPerNode; // If the previous Add finished an L2 node if (indexInL2Node == 0) @@ -167,12 +172,12 @@ public partial class BucketTree ref NodeHeader l2NodeHeader = ref _l2Node.GetHeader(); l2NodeHeader.Index = prevL2NodeIndex; - l2NodeHeader.Count = OffsetsPerNode; - l2NodeHeader.Offset = endOffset; + l2NodeHeader.EntryCount = _offsetsPerNode; + l2NodeHeader.OffsetEnd = endOffset; // Write the L2 node to the node storage - long nodeOffset = (long)NodeSize * (prevL2NodeIndex + 1); - rc = NodeStorage.Write(nodeOffset, _l2Node.GetBuffer()); + long nodeOffset = (long)_nodeSize * (prevL2NodeIndex + 1); + rc = _nodeStorage.Write(nodeOffset, _l2Node.GetBuffer()); if (rc.IsFailure()) return rc; // Clear the L2 node buffer to begin the new node @@ -190,31 +195,31 @@ public partial class BucketTree /// The start offset of the entry being added. private void AddEntryOffset(long entryOffset) { - int entrySetIndex = CurrentEntryIndex / EntriesPerEntrySet; - int indexInEntrySet = CurrentEntryIndex % EntriesPerEntrySet; + int entrySetIndex = _currentEntryIndex / _entriesPerEntrySet; + int indexInEntrySet = _currentEntryIndex % _entriesPerEntrySet; // If we're starting a new entry set we need to add its start offset to the L1/L2 nodes if (indexInEntrySet == 0) { Span l1Data = _l1Node.GetNode().GetWritableArray(); - if (CurrentL2OffsetIndex == 0) + if (_currentL2OffsetIndex == 0) { // There are no L2 nodes. Write the entry set end offset directly to L1 l1Data[entrySetIndex] = entryOffset; } else { - if (CurrentL2OffsetIndex < OffsetsPerNode) + if (_currentL2OffsetIndex < _offsetsPerNode) { // The current L2 offset is stored in the L1 node - l1Data[CurrentL2OffsetIndex] = entryOffset; + l1Data[_currentL2OffsetIndex] = entryOffset; } else { // Write the entry set offset to the current L2 node - int l2NodeIndex = CurrentL2OffsetIndex / OffsetsPerNode; - int indexInL2Node = CurrentL2OffsetIndex % OffsetsPerNode; + int l2NodeIndex = _currentL2OffsetIndex / _offsetsPerNode; + int indexInL2Node = _currentL2OffsetIndex % _offsetsPerNode; Span l2Data = _l2Node.GetNode().GetWritableArray(); l2Data[indexInL2Node] = entryOffset; @@ -226,7 +231,7 @@ public partial class BucketTree } } - CurrentL2OffsetIndex++; + _currentL2OffsetIndex++; } } } @@ -239,20 +244,20 @@ public partial class BucketTree public Result Finalize(long endOffset) { // Finalize must only be called after all entries are added - if (EntryCount != CurrentEntryIndex) + if (_entryCount != _currentEntryIndex) return ResultFs.OutOfRange.Log(); - if (endOffset <= CurrentOffset) + if (endOffset <= _currentOffset) return ResultFs.InvalidOffset.Log(); - if (CurrentOffset == -1) + if (_currentOffset == -1) return Result.Success; Result rc = FinalizePreviousEntrySet(endOffset); if (rc.IsFailure()) return rc; - int entrySetIndex = CurrentEntryIndex / EntriesPerEntrySet; - int indexInEntrySet = CurrentEntryIndex % EntriesPerEntrySet; + int entrySetIndex = _currentEntryIndex / _entriesPerEntrySet; + int indexInEntrySet = _currentEntryIndex % _entriesPerEntrySet; // Finalize the current entry set if needed if (indexInEntrySet != 0) @@ -260,49 +265,49 @@ public partial class BucketTree ref NodeHeader entrySetHeader = ref _entrySet.GetHeader(); entrySetHeader.Index = entrySetIndex; - entrySetHeader.Count = indexInEntrySet; - entrySetHeader.Offset = endOffset; + entrySetHeader.EntryCount = indexInEntrySet; + entrySetHeader.OffsetEnd = endOffset; - long entryStorageOffset = (long)NodeSize * entrySetIndex; - rc = EntryStorage.Write(entryStorageOffset, _entrySet.GetBuffer()); + long entryStorageOffset = (long)_nodeSize * entrySetIndex; + rc = _entryStorage.Write(entryStorageOffset, _entrySet.GetBuffer()); if (rc.IsFailure()) return rc; } - int l2NodeIndex = BitUtil.DivideUp(CurrentL2OffsetIndex, OffsetsPerNode) - 2; - int indexInL2Node = CurrentL2OffsetIndex % OffsetsPerNode; + int l2NodeIndex = BitUtil.DivideUp(_currentL2OffsetIndex, _offsetsPerNode) - 2; + int indexInL2Node = _currentL2OffsetIndex % _offsetsPerNode; // Finalize the current L2 node if needed - if (CurrentL2OffsetIndex > OffsetsPerNode && (indexInEntrySet != 0 || indexInL2Node != 0)) + if (_currentL2OffsetIndex > _offsetsPerNode && (indexInEntrySet != 0 || indexInL2Node != 0)) { ref NodeHeader l2NodeHeader = ref _l2Node.GetHeader(); l2NodeHeader.Index = l2NodeIndex; - l2NodeHeader.Count = indexInL2Node != 0 ? indexInL2Node : OffsetsPerNode; - l2NodeHeader.Offset = endOffset; + l2NodeHeader.EntryCount = indexInL2Node != 0 ? indexInL2Node : _offsetsPerNode; + l2NodeHeader.OffsetEnd = endOffset; - long l2NodeStorageOffset = NodeSize * (l2NodeIndex + 1); - rc = NodeStorage.Write(l2NodeStorageOffset, _l2Node.GetBuffer()); + long l2NodeStorageOffset = _nodeSize * (l2NodeIndex + 1); + rc = _nodeStorage.Write(l2NodeStorageOffset, _l2Node.GetBuffer()); if (rc.IsFailure()) return rc; } // Finalize the L1 node ref NodeHeader l1NodeHeader = ref _l1Node.GetHeader(); l1NodeHeader.Index = 0; - l1NodeHeader.Offset = endOffset; + l1NodeHeader.OffsetEnd = endOffset; // L1 count depends on the existence or absence of L2 nodes - if (CurrentL2OffsetIndex == 0) + if (_currentL2OffsetIndex == 0) { - l1NodeHeader.Count = BitUtil.DivideUp(CurrentEntryIndex, EntriesPerEntrySet); + l1NodeHeader.EntryCount = BitUtil.DivideUp(_currentEntryIndex, _entriesPerEntrySet); } else { - l1NodeHeader.Count = l2NodeIndex + 1; + l1NodeHeader.EntryCount = l2NodeIndex + 1; } - rc = NodeStorage.Write(0, _l1Node.GetBuffer()); + rc = _nodeStorage.Write(0, _l1Node.GetBuffer()); if (rc.IsFailure()) return rc; - CurrentOffset = long.MaxValue; + _currentOffset = long.MaxValue; return Result.Success; } } diff --git a/src/LibHac/FsSystem/BucketTreeBuilder2.cs b/src/LibHac/FsSystem/BucketTreeBuilder2.cs deleted file mode 100644 index 412b8003..00000000 --- a/src/LibHac/FsSystem/BucketTreeBuilder2.cs +++ /dev/null @@ -1,314 +0,0 @@ -using System; -using System.Buffers.Binary; -using System.Runtime.CompilerServices; -using LibHac.Common; -using LibHac.Diag; -using LibHac.Fs; -using LibHac.Util; - -namespace LibHac.FsSystem; - -public partial class BucketTree2 -{ - public class Builder - { - private NodeBuffer _l1Node; - private NodeBuffer _l2Node; - private NodeBuffer _entrySet; - - private ValueSubStorage _nodeStorage; - private ValueSubStorage _entryStorage; - - private int _nodeSize; - private int _entrySize; - private int _entryCount; - private int _entriesPerEntrySet; - private int _offsetsPerNode; - - private int _currentL2OffsetIndex; - private int _currentEntryIndex; - private long _currentOffset; - - public Builder() - { - _currentOffset = -1; - } - - /// - /// Initializes the bucket tree builder. - /// - /// The to use for buffer allocation. - /// The the tree's header will be written to.Must be at least the size in bytes returned by . - /// The the tree's nodes will be written to. Must be at least the size in bytes returned by . - /// The the tree's entries will be written to. Must be at least the size in bytes returned by . - /// The size of each node in the bucket tree. Must be a power of 2. - /// The size of each entry that will be stored in the bucket tree. - /// The exact number of entries that will be added to the bucket tree. - /// The of the operation. - public Result Initialize(MemoryResource allocator, in ValueSubStorage headerStorage, - in ValueSubStorage nodeStorage, in ValueSubStorage entryStorage, int nodeSize, int entrySize, - int entryCount) - { - Assert.NotNull(allocator); - Assert.SdkRequiresLessEqual(sizeof(long), entrySize); - Assert.SdkRequiresLessEqual(entrySize + Unsafe.SizeOf(), nodeSize); - Assert.SdkRequiresWithinMinMax(nodeSize, NodeSizeMin, NodeSizeMax); - Assert.SdkRequires(BitUtil.IsPowerOfTwo(nodeSize)); - - // Set the builder parameters - _nodeSize = nodeSize; - _entrySize = entrySize; - _entryCount = entryCount; - - _entriesPerEntrySet = GetEntryCount(nodeSize, entrySize); - _offsetsPerNode = GetOffsetCount(nodeSize); - _currentL2OffsetIndex = GetNodeL2Count(nodeSize, entrySize, entryCount); - - // Create and write the header - var header = new Header(); - header.Format(entryCount); - Result rc = headerStorage.Write(0, SpanHelpers.AsByteSpan(ref header)); - if (rc.IsFailure()) return rc; - - // Allocate buffers for the L1 node and entry sets - _l1Node.Allocate(allocator, nodeSize); - _entrySet.Allocate(allocator, nodeSize); - - int entrySetCount = GetEntrySetCount(nodeSize, entrySize, entryCount); - - // Allocate an L2 node buffer if there are more entry sets than will fit in the L1 node - if (_offsetsPerNode < entrySetCount) - { - _l2Node.Allocate(allocator, nodeSize); - } - - _l1Node.FillZero(); - _l2Node.FillZero(); - _entrySet.FillZero(); - - _nodeStorage.Set(in nodeStorage); - _entryStorage.Set(in entryStorage); - - // Set the initial position - _currentEntryIndex = 0; - _currentOffset = -1; - - return Result.Success; - } - - /// - /// Adds a new entry to the bucket tree. - /// - /// The type of the entry to add. Added entries should all be the same type. - /// The entry to add. - /// The of the operation. - public Result Add(in T entry) where T : unmanaged - { - Assert.SdkRequiresEqual(Unsafe.SizeOf(), _entrySize); - - if (_currentEntryIndex >= _entryCount) - return ResultFs.OutOfRange.Log(); - - // The entry offset must always be the first 8 bytes of the struct - long entryOffset = BinaryPrimitives.ReadInt64LittleEndian(SpanHelpers.AsReadOnlyByteSpan(in entry)); - - if (entryOffset <= _currentOffset) - return ResultFs.InvalidOffset.Log(); - - Result rc = FinalizePreviousEntrySet(entryOffset); - if (rc.IsFailure()) return rc; - - AddEntryOffset(entryOffset); - - // Write the new entry - int indexInEntrySet = _currentEntryIndex % _entriesPerEntrySet; - _entrySet.GetNode().GetWritableArray()[indexInEntrySet] = entry; - - _currentOffset = entryOffset; - _currentEntryIndex++; - - return Result.Success; - } - - /// - /// Checks if a new entry set is being started. If so, sets the end offset of the previous - /// entry set and writes it to the output storage. - /// - /// The end offset of the previous entry. - /// The of the operation. - private Result FinalizePreviousEntrySet(long endOffset) - { - int prevEntrySetIndex = _currentEntryIndex / _entriesPerEntrySet - 1; - int indexInEntrySet = _currentEntryIndex % _entriesPerEntrySet; - - // If the previous Add finished an entry set - if (_currentEntryIndex > 0 && indexInEntrySet == 0) - { - // Set the end offset of that entry set - ref NodeHeader entrySetHeader = ref _entrySet.GetHeader(); - - entrySetHeader.Index = prevEntrySetIndex; - entrySetHeader.EntryCount = _entriesPerEntrySet; - entrySetHeader.OffsetEnd = endOffset; - - // Write the entry set to the entry storage - long storageOffset = (long)_nodeSize * prevEntrySetIndex; - Result rc = _entryStorage.Write(storageOffset, _entrySet.GetBuffer()); - if (rc.IsFailure()) return rc; - - // Clear the entry set buffer to begin the new entry set - _entrySet.FillZero(); - - // Check if we're writing in L2 nodes - if (_currentL2OffsetIndex > _offsetsPerNode) - { - int prevL2NodeIndex = _currentL2OffsetIndex / _offsetsPerNode - 2; - int indexInL2Node = _currentL2OffsetIndex % _offsetsPerNode; - - // If the previous Add finished an L2 node - if (indexInL2Node == 0) - { - // Set the end offset of that node - ref NodeHeader l2NodeHeader = ref _l2Node.GetHeader(); - - l2NodeHeader.Index = prevL2NodeIndex; - l2NodeHeader.EntryCount = _offsetsPerNode; - l2NodeHeader.OffsetEnd = endOffset; - - // Write the L2 node to the node storage - long nodeOffset = (long)_nodeSize * (prevL2NodeIndex + 1); - rc = _nodeStorage.Write(nodeOffset, _l2Node.GetBuffer()); - if (rc.IsFailure()) return rc; - - // Clear the L2 node buffer to begin the new node - _l2Node.FillZero(); - } - } - } - - return Result.Success; - } - - /// - /// If needed, adds a new entry set's start offset to the L1 or L2 nodes. - /// - /// The start offset of the entry being added. - private void AddEntryOffset(long entryOffset) - { - int entrySetIndex = _currentEntryIndex / _entriesPerEntrySet; - int indexInEntrySet = _currentEntryIndex % _entriesPerEntrySet; - - // If we're starting a new entry set we need to add its start offset to the L1/L2 nodes - if (indexInEntrySet == 0) - { - Span l1Data = _l1Node.GetNode().GetWritableArray(); - - if (_currentL2OffsetIndex == 0) - { - // There are no L2 nodes. Write the entry set end offset directly to L1 - l1Data[entrySetIndex] = entryOffset; - } - else - { - if (_currentL2OffsetIndex < _offsetsPerNode) - { - // The current L2 offset is stored in the L1 node - l1Data[_currentL2OffsetIndex] = entryOffset; - } - else - { - // Write the entry set offset to the current L2 node - int l2NodeIndex = _currentL2OffsetIndex / _offsetsPerNode; - int indexInL2Node = _currentL2OffsetIndex % _offsetsPerNode; - - Span l2Data = _l2Node.GetNode().GetWritableArray(); - l2Data[indexInL2Node] = entryOffset; - - // If we're starting a new L2 node we need to add its start offset to the L1 node - if (indexInL2Node == 0) - { - l1Data[l2NodeIndex - 1] = entryOffset; - } - } - - _currentL2OffsetIndex++; - } - } - } - - /// - /// Finalizes the bucket tree. Must be called after all entries are added. - /// - /// The end offset of the bucket tree. - /// The of the operation. - public Result Finalize(long endOffset) - { - // Finalize must only be called after all entries are added - if (_entryCount != _currentEntryIndex) - return ResultFs.OutOfRange.Log(); - - if (endOffset <= _currentOffset) - return ResultFs.InvalidOffset.Log(); - - if (_currentOffset == -1) - return Result.Success; - - Result rc = FinalizePreviousEntrySet(endOffset); - if (rc.IsFailure()) return rc; - - int entrySetIndex = _currentEntryIndex / _entriesPerEntrySet; - int indexInEntrySet = _currentEntryIndex % _entriesPerEntrySet; - - // Finalize the current entry set if needed - if (indexInEntrySet != 0) - { - ref NodeHeader entrySetHeader = ref _entrySet.GetHeader(); - - entrySetHeader.Index = entrySetIndex; - entrySetHeader.EntryCount = indexInEntrySet; - entrySetHeader.OffsetEnd = endOffset; - - long entryStorageOffset = (long)_nodeSize * entrySetIndex; - rc = _entryStorage.Write(entryStorageOffset, _entrySet.GetBuffer()); - if (rc.IsFailure()) return rc; - } - - int l2NodeIndex = BitUtil.DivideUp(_currentL2OffsetIndex, _offsetsPerNode) - 2; - int indexInL2Node = _currentL2OffsetIndex % _offsetsPerNode; - - // Finalize the current L2 node if needed - if (_currentL2OffsetIndex > _offsetsPerNode && (indexInEntrySet != 0 || indexInL2Node != 0)) - { - ref NodeHeader l2NodeHeader = ref _l2Node.GetHeader(); - l2NodeHeader.Index = l2NodeIndex; - l2NodeHeader.EntryCount = indexInL2Node != 0 ? indexInL2Node : _offsetsPerNode; - l2NodeHeader.OffsetEnd = endOffset; - - long l2NodeStorageOffset = _nodeSize * (l2NodeIndex + 1); - rc = _nodeStorage.Write(l2NodeStorageOffset, _l2Node.GetBuffer()); - if (rc.IsFailure()) return rc; - } - - // Finalize the L1 node - ref NodeHeader l1NodeHeader = ref _l1Node.GetHeader(); - l1NodeHeader.Index = 0; - l1NodeHeader.OffsetEnd = endOffset; - - // L1 count depends on the existence or absence of L2 nodes - if (_currentL2OffsetIndex == 0) - { - l1NodeHeader.EntryCount = BitUtil.DivideUp(_currentEntryIndex, _entriesPerEntrySet); - } - else - { - l1NodeHeader.EntryCount = l2NodeIndex + 1; - } - - rc = _nodeStorage.Write(0, _l1Node.GetBuffer()); - if (rc.IsFailure()) return rc; - - _currentOffset = long.MaxValue; - return Result.Success; - } - } -} diff --git a/src/LibHac/FsSystem/IndirectStorage.cs b/src/LibHac/FsSystem/IndirectStorage.cs index fd8b9aa4..67921fca 100644 --- a/src/LibHac/FsSystem/IndirectStorage.cs +++ b/src/LibHac/FsSystem/IndirectStorage.cs @@ -2,20 +2,27 @@ using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using LibHac.Common; +using LibHac.Common.FixedArrays; using LibHac.Diag; using LibHac.Fs; namespace LibHac.FsSystem; +/// +/// Combines multiple s into a single . +/// +/// The 's contains +/// values that describe how the created storage is to be built from the base storages. +/// Based on FS 13.1.0 (nnSdk 13.4.0) public class IndirectStorage : IStorage { public static readonly int StorageCount = 2; public static readonly int NodeSize = 1024 * 16; - private BucketTree Table { get; } = new BucketTree(); - private SubStorage[] DataStorage { get; } = new SubStorage[StorageCount]; + private BucketTree _table; + private Array2 _dataStorage; - [StructLayout(LayoutKind.Sequential, Size = 0x14, Pack = 4)] + [StructLayout(LayoutKind.Sequential, Pack = 4)] public struct Entry { private long VirtualOffset; @@ -29,6 +36,52 @@ public class IndirectStorage : IStorage public readonly long GetPhysicalOffset() => PhysicalOffset; } + public struct EntryData + { + public long VirtualOffset; + public long PhysicalOffset; + public int StorageIndex; + + public void Set(in Entry entry) + { + VirtualOffset = entry.GetVirtualOffset(); + PhysicalOffset = entry.GetPhysicalOffset(); + StorageIndex = entry.StorageIndex; + } + } + + private struct ContinuousReadingEntry : BucketTree.IContinuousReadingEntry + { + public int FragmentSizeMax => 1024 * 4; + +#pragma warning disable CS0649 + // This field will be read in by BucketTree.Visitor.ScanContinuousReading + private Entry _entry; +#pragma warning restore CS0649 + + public readonly long GetVirtualOffset() => _entry.GetVirtualOffset(); + public readonly long GetPhysicalOffset() => _entry.GetPhysicalOffset(); + public readonly bool IsFragment() => _entry.StorageIndex != 0; + } + + public IndirectStorage() + { + _table = new BucketTree(); + } + + public override void Dispose() + { + FinalizeObject(); + + Span items = _dataStorage.Items; + for (int i = 0; i < items.Length; i++) + items[i].Dispose(); + + _table.Dispose(); + + base.Dispose(); + } + public static long QueryHeaderStorageSize() => BucketTree.QueryHeaderStorageSize(); public static long QueryNodeStorageSize(int entryCount) => @@ -37,120 +90,86 @@ public class IndirectStorage : IStorage public static long QueryEntryStorageSize(int entryCount) => BucketTree.QueryEntryStorageSize(NodeSize, Unsafe.SizeOf(), entryCount); - public bool IsInitialized() => Table.IsInitialized(); - - public Result Initialize(SubStorage tableStorage) - { - // Read and verify the bucket tree header. - // note: skip init - var header = new BucketTree.Header(); - - Result rc = tableStorage.Read(0, SpanHelpers.AsByteSpan(ref header)); - if (rc.IsFailure()) return rc; - - rc = header.Verify(); - if (rc.IsFailure()) return rc; - - // Determine extents. - long nodeStorageSize = QueryNodeStorageSize(header.EntryCount); - long entryStorageSize = QueryEntryStorageSize(header.EntryCount); - long nodeStorageOffset = QueryHeaderStorageSize(); - long entryStorageOffset = nodeStorageOffset + nodeStorageSize; - - // Initialize. - var nodeStorage = new SubStorage(tableStorage, nodeStorageOffset, nodeStorageSize); - var entryStorage = new SubStorage(tableStorage, entryStorageOffset, entryStorageSize); - - return Initialize(nodeStorage, entryStorage, header.EntryCount); - } - - public Result Initialize(SubStorage nodeStorage, SubStorage entryStorage, int entryCount) - { - return Table.Initialize(nodeStorage, entryStorage, NodeSize, Unsafe.SizeOf(), entryCount); - } - - public void SetStorage(int index, SubStorage storage) + public void SetStorage(int index, in ValueSubStorage storage) { Assert.SdkRequiresInRange(index, 0, StorageCount); - DataStorage[index] = storage; + _dataStorage[index].Set(in storage); } public void SetStorage(int index, IStorage storage, long offset, long size) { Assert.SdkRequiresInRange(index, 0, StorageCount); - DataStorage[index] = new SubStorage(storage, offset, size); + + using var subStorage = new ValueSubStorage(storage, offset, size); + _dataStorage[index].Set(in subStorage); } - public Result GetEntryList(Span entryBuffer, out int outputEntryCount, long offset, long size) + protected ref ValueSubStorage GetDataStorage(int index) { - // Validate pre-conditions - Assert.SdkRequiresLessEqual(0, offset); - Assert.SdkRequiresLessEqual(0, size); - Assert.SdkRequires(IsInitialized()); - - // Clear the out count - outputEntryCount = 0; - - // Succeed if there's no range - if (size == 0) - return Result.Success; - - // Check that our range is valid - if (!Table.Includes(offset, size)) - return ResultFs.OutOfRange.Log(); - - // Find the offset in our tree - var visitor = new BucketTree.Visitor(); - - try - { - Result rc = Table.Find(ref visitor, offset); - if (rc.IsFailure()) return rc; - - long entryOffset = visitor.Get().GetVirtualOffset(); - if (entryOffset > 0 || !Table.Includes(entryOffset)) - return ResultFs.InvalidIndirectEntryOffset.Log(); - - // Prepare to loop over entries - long endOffset = offset + size; - int count = 0; - - ref Entry currentEntry = ref visitor.Get(); - while (currentEntry.GetVirtualOffset() < endOffset) - { - // Try to write the entry to the out list - if (entryBuffer.Length != 0) - { - if (count >= entryBuffer.Length) - break; - - entryBuffer[count] = currentEntry; - } - - count++; - - // Advance - if (visitor.CanMoveNext()) - { - rc = visitor.MoveNext(); - if (rc.IsFailure()) return rc; - - currentEntry = ref visitor.Get(); - } - else - { - break; - } - } - - // Write the entry count - outputEntryCount = count; - return Result.Success; - } - finally { visitor.Dispose(); } + Assert.SdkRequiresInRange(index, 0, StorageCount); + return ref _dataStorage[index]; } - protected override unsafe Result DoRead(long offset, Span destination) + protected BucketTree GetEntryTable() + { + return _table; + } + + public bool IsInitialized() + { + return _table.IsInitialized(); + } + + public Result Initialize(MemoryResource allocator, in ValueSubStorage tableStorage) + { + Unsafe.SkipInit(out BucketTree.Header header); + + Result rc = tableStorage.Read(0, SpanHelpers.AsByteSpan(ref header)); + if (rc.IsFailure()) return rc.Miss(); + + rc = header.Verify(); + if (rc.IsFailure()) return rc.Miss(); + + long nodeStorageSize = QueryNodeStorageSize(header.EntryCount); + long entryStorageSize = QueryEntryStorageSize(header.EntryCount); + long nodeStorageOffset = QueryHeaderStorageSize(); + long entryStorageOffset = nodeStorageSize + nodeStorageOffset; + + rc = tableStorage.GetSize(out long storageSize); + if (rc.IsFailure()) return rc.Miss(); + + if (storageSize < entryStorageOffset + entryStorageSize) + return ResultFs.InvalidIndirectStorageBucketTreeSize.Log(); + + using var nodeStorage = new ValueSubStorage(tableStorage, nodeStorageOffset, nodeStorageSize); + using var entryStorage = new ValueSubStorage(tableStorage, entryStorageOffset, entryStorageSize); + + return Initialize(allocator, in nodeStorage, in entryStorage, header.EntryCount); + } + + public Result Initialize(MemoryResource allocator, in ValueSubStorage nodeStorage, in ValueSubStorage entryStorage, + int entryCount) + { + return _table.Initialize(allocator, in nodeStorage, in entryStorage, NodeSize, Unsafe.SizeOf(), + entryCount); + } + + public void FinalizeObject() + { + if (IsInitialized()) + { + _table.FinalizeObject(); + + Span storages = _dataStorage.Items; + for (int i = 0; i < storages.Length; i++) + { + using var emptySubStorage = new ValueSubStorage(); + storages[i].Set(in emptySubStorage); + } + } + } + + protected override Result DoRead(long offset, Span destination) { // Validate pre-conditions Assert.SdkRequiresLessEqual(0, offset); @@ -160,23 +179,24 @@ public class IndirectStorage : IStorage if (destination.Length == 0) return Result.Success; - // Pin and recreate the span because C# can't use byref-like types in a closure - int bufferSize = destination.Length; - fixed (byte* pBuffer = destination) + var closure = new OperatePerEntryClosure(); + closure.OutBuffer = destination; + closure.Offset = offset; + + Result rc = OperatePerEntry(offset, destination.Length, ReadImpl, ref closure, enableContinuousReading: true, + verifyEntryRanges: true); + if (rc.IsFailure()) return rc.Miss(); + + return Result.Success; + + static Result ReadImpl(ref ValueSubStorage storage, long physicalOffset, long virtualOffset, long processSize, + ref OperatePerEntryClosure closure) { - // Copy the pointer to workaround CS1764. - // OperatePerEntry won't store the delegate anywhere, so it should be safe - byte* pBuffer2 = pBuffer; + int bufferPosition = (int)(virtualOffset - closure.Offset); + Result rc = storage.Read(physicalOffset, closure.OutBuffer.Slice(bufferPosition, (int)processSize)); + if (rc.IsFailure()) return rc.Miss(); - Result Operate(IStorage storage, long dataOffset, long currentOffset, long currentSize) - { - var buffer = new Span(pBuffer2, bufferSize); - - return storage.Read(dataOffset, - buffer.Slice((int)(currentOffset - offset), (int)currentSize)); - } - - return OperatePerEntry(offset, destination.Length, Operate); + return Result.Success; } } @@ -190,20 +210,169 @@ public class IndirectStorage : IStorage return Result.Success; } + protected override Result DoGetSize(out long size) + { + UnsafeHelpers.SkipParamInit(out size); + + Result rc = _table.GetOffsets(out BucketTree.Offsets offsets); + if (rc.IsFailure()) return rc.Miss(); + + size = offsets.EndOffset; + return Result.Success; + } + protected override Result DoSetSize(long size) { return ResultFs.UnsupportedSetSizeForIndirectStorage.Log(); } - protected override Result DoGetSize(out long size) + public Result GetEntryList(Span entryBuffer, out int outputEntryCount, long offset, long size) { - size = Table.GetEnd(); + UnsafeHelpers.SkipParamInit(out outputEntryCount); + + // Validate pre-conditions + Assert.SdkRequiresLessEqual(0, offset); + Assert.SdkRequiresLessEqual(0, size); + Assert.SdkRequires(IsInitialized()); + + // Succeed if there's no range + if (size == 0) + { + outputEntryCount = 0; + return Result.Success; + } + + // Check that our range is valid + Result rc = _table.GetOffsets(out BucketTree.Offsets offsets); + if (rc.IsFailure()) return rc.Miss(); + + if (!offsets.IsInclude(offset, size)) + return ResultFs.OutOfRange.Log(); + + + // Find the offset in our tree + using var visitor = new BucketTree.Visitor(); + + rc = _table.Find(ref visitor.Ref, offset); + if (rc.IsFailure()) return rc.Miss(); + + long entryOffset = visitor.Get().GetVirtualOffset(); + if (entryOffset < 0 || !offsets.IsInclude(entryOffset)) + return ResultFs.InvalidIndirectEntryOffset.Log(); + + // Prepare to loop over entries + long endOffset = offset + size; + int count = 0; + + var currentEntry = visitor.Get(); + while (currentEntry.GetVirtualOffset() < endOffset) + { + // Try to write the entry to the out list + if (entryBuffer.Length != 0) + { + if (count >= entryBuffer.Length) + break; + + entryBuffer[count] = currentEntry; + } + + count++; + + // Advance + if (!visitor.CanMoveNext()) + break; + + rc = visitor.MoveNext(); + if (rc.IsFailure()) return rc; + + currentEntry = visitor.Get(); + } + + outputEntryCount = count; return Result.Success; } - private delegate Result OperateFunc(IStorage storage, long dataOffset, long currentOffset, long currentSize); + protected override Result DoOperateRange(Span outBuffer, OperationId operationId, long offset, long size, + ReadOnlySpan inBuffer) + { + Assert.SdkRequiresLessEqual(0, offset); + Assert.SdkRequiresLessEqual(0, size); + Assert.SdkRequires(IsInitialized()); - private Result OperatePerEntry(long offset, long size, OperateFunc func) + switch (operationId) + { + case OperationId.InvalidateCache: + if (!_table.IsEmpty()) + { + Result rc = _table.InvalidateCache(); + if (rc.IsFailure()) return rc.Miss(); + + for (int i = 0; i < _dataStorage.Items.Length; i++) + { + rc = _dataStorage.Items[i].OperateRange(OperationId.InvalidateCache, 0, long.MaxValue); + if (rc.IsFailure()) return rc.Miss(); + } + } + break; + case OperationId.QueryRange: + if (outBuffer.Length != Unsafe.SizeOf()) + return ResultFs.InvalidArgument.Log(); + + if (size > 0) + { + Result rc = _table.GetOffsets(out BucketTree.Offsets offsets); + if (rc.IsFailure()) return rc.Miss(); + + if (!offsets.IsInclude(offset, size)) + return ResultFs.OutOfRange.Log(); + + if (!_table.IsEmpty()) + { + var closure = new OperatePerEntryClosure(); + closure.OperationId = operationId; + closure.InBuffer = inBuffer; + + static Result QueryRangeImpl(ref ValueSubStorage storage, long physicalOffset, + long virtualOffset, long processSize, ref OperatePerEntryClosure closure) + { + Unsafe.SkipInit(out QueryRangeInfo currentInfo); + Result rc = storage.OperateRange(SpanHelpers.AsByteSpan(ref currentInfo), + closure.OperationId, physicalOffset, processSize, closure.InBuffer); + if (rc.IsFailure()) return rc.Miss(); + + closure.InfoMerged.Merge(in currentInfo); + return Result.Success; + } + + rc = OperatePerEntry(offset, size, QueryRangeImpl, ref closure, enableContinuousReading: false, + verifyEntryRanges: true); + if (rc.IsFailure()) return rc.Miss(); + + SpanHelpers.AsByteSpan(ref closure.InfoMerged).CopyTo(outBuffer); + } + } + break; + default: + return ResultFs.UnsupportedOperateRangeForIndirectStorage.Log(); + } + + return Result.Success; + } + + protected delegate Result OperatePerEntryFunc(ref ValueSubStorage storage, long physicalOffset, long virtualOffset, + long processSize, ref OperatePerEntryClosure closure); + + protected ref struct OperatePerEntryClosure + { + public Span OutBuffer; + public ReadOnlySpan InBuffer; + public long Offset; + public OperationId OperationId; + public QueryRangeInfo InfoMerged; + } + + protected Result OperatePerEntry(long offset, long size, OperatePerEntryFunc func, + ref OperatePerEntryClosure closure, bool enableContinuousReading, bool verifyEntryRanges) { // Validate preconditions Assert.SdkRequiresLessEqual(0, offset); @@ -215,94 +384,146 @@ public class IndirectStorage : IStorage return Result.Success; // Validate arguments - if (!Table.Includes(offset, size)) + Result rc = _table.GetOffsets(out BucketTree.Offsets offsets); + if (rc.IsFailure()) return rc.Miss(); + + if (!offsets.IsInclude(offset, size)) return ResultFs.OutOfRange.Log(); // Find the offset in our tree var visitor = new BucketTree.Visitor(); - try - { - Result rc = Table.Find(ref visitor, offset); - if (rc.IsFailure()) return rc; + rc = _table.Find(ref visitor, offset); + if (rc.IsFailure()) return rc; - long entryOffset = visitor.Get().GetVirtualOffset(); - if (entryOffset < 0 || !Table.Includes(entryOffset)) + long entryOffset = visitor.Get().GetVirtualOffset(); + if (entryOffset < 0 || !offsets.IsInclude(entryOffset)) + return ResultFs.InvalidIndirectEntryOffset.Log(); + + // Prepare to operate in chunks + long currentOffset = offset; + long endOffset = offset + size; + var continuousReading = new BucketTree.ContinuousReadingInfo(); + + while (currentOffset < endOffset) + { + // Get the current entry + var currentEntry = visitor.Get(); + + // Get and validate the entry's offset + long currentEntryOffset = currentEntry.GetVirtualOffset(); + if (currentEntryOffset > currentOffset) + return ResultFs.InvalidIndirectEntryOffset.Log(); + + // Validate the storage index + if (currentEntry.StorageIndex < 0 || currentEntry.StorageIndex >= StorageCount) return ResultFs.InvalidIndirectEntryStorageIndex.Log(); - // Prepare to operate in chunks - long currentOffset = offset; - long endOffset = offset + size; - - while (currentOffset < endOffset) + if (enableContinuousReading) { - // Get the current entry - var currentEntry = visitor.Get(); - - // Get and validate the entry's offset - long currentEntryOffset = currentEntry.GetVirtualOffset(); - if (currentEntryOffset > currentOffset) - return ResultFs.InvalidIndirectEntryOffset.Log(); - - // Validate the storage index - if (currentEntry.StorageIndex < 0 || currentEntry.StorageIndex >= StorageCount) - return ResultFs.InvalidIndirectEntryStorageIndex.Log(); - - // todo: Implement continuous reading - - // Get and validate the next entry offset - long nextEntryOffset; - if (visitor.CanMoveNext()) + if (continuousReading.CheckNeedScan()) { - rc = visitor.MoveNext(); - if (rc.IsFailure()) return rc; - - nextEntryOffset = visitor.Get().GetVirtualOffset(); - if (!Table.Includes(nextEntryOffset)) - return ResultFs.InvalidIndirectEntryOffset.Log(); - } - else - { - nextEntryOffset = Table.GetEnd(); + rc = visitor.ScanContinuousReading(out continuousReading, currentOffset, + endOffset - currentOffset); + if (rc.IsFailure()) return rc.Miss(); } - if (currentOffset >= nextEntryOffset) - return ResultFs.InvalidIndirectEntryOffset.Log(); - - // Get the offset of the entry in the data we read - long dataOffset = currentOffset - currentEntryOffset; - long dataSize = nextEntryOffset - currentEntryOffset - dataOffset; - Assert.SdkLess(0, dataSize); - - // Determine how much is left - long remainingSize = endOffset - currentOffset; - long currentSize = Math.Min(remainingSize, dataSize); - Assert.SdkLessEqual(currentSize, size); - + if (continuousReading.CanDo()) { - SubStorage currentStorage = DataStorage[currentEntry.StorageIndex]; + if (currentEntry.StorageIndex != 0) + return ResultFs.InvalidIndirectStorageIndex.Log(); - // Get the current data storage's size. - rc = currentStorage.GetSize(out long currentDataStorageSize); - if (rc.IsFailure()) return rc; + long offsetInEntry = currentOffset - currentEntryOffset; + long entryStorageOffset = currentEntry.GetPhysicalOffset(); + long dataStorageOffset = entryStorageOffset + offsetInEntry; - // Ensure that we remain within range. - long currentEntryPhysicalOffset = currentEntry.GetPhysicalOffset(); + long continuousReadSize = continuousReading.GetReadSize(); - if (currentEntryPhysicalOffset < 0 || currentEntryPhysicalOffset > currentDataStorageSize) - return ResultFs.IndirectStorageCorrupted.Log(); + if (verifyEntryRanges) + { + rc = _dataStorage[0].GetSize(out long storageSize); + if (rc.IsFailure()) return rc.Miss(); - if (currentDataStorageSize < currentEntryPhysicalOffset + dataOffset + currentSize) - return ResultFs.IndirectStorageCorrupted.Log(); + // Ensure that we remain within range + if (entryStorageOffset < 0 || entryStorageOffset > storageSize) + return ResultFs.InvalidIndirectEntryOffset.Log(); - rc = func(currentStorage, currentEntryPhysicalOffset + dataOffset, currentOffset, currentSize); - if (rc.IsFailure()) return rc; + if (dataStorageOffset + continuousReadSize > storageSize) + return ResultFs.InvalidIndirectStorageSize.Log(); + } + + rc = func(ref _dataStorage[0], dataStorageOffset, currentOffset, continuousReadSize, ref closure); + if (rc.IsFailure()) return rc.Miss(); + + continuousReading.Done(); } - - currentOffset += currentSize; } + + // Get and validate the next entry offset + long nextEntryOffset; + if (visitor.CanMoveNext()) + { + rc = visitor.MoveNext(); + if (rc.IsFailure()) return rc; + + nextEntryOffset = visitor.Get().GetVirtualOffset(); + if (!offsets.IsInclude(nextEntryOffset)) + return ResultFs.InvalidIndirectEntryOffset.Log(); + } + else + { + nextEntryOffset = offsets.EndOffset; + } + + if (currentOffset >= nextEntryOffset) + return ResultFs.InvalidIndirectEntryOffset.Log(); + + // Get the offset of the data we need in the entry + long dataOffsetInEntry = currentOffset - currentEntryOffset; + long dataSize = nextEntryOffset - currentEntryOffset - dataOffsetInEntry; + Assert.SdkLess(0, dataSize); + + // Determine how much is left + long remainingSize = endOffset - currentOffset; + long processSize = Math.Min(remainingSize, dataSize); + Assert.SdkLessEqual(processSize, size); + + // Operate, if we need to + bool needsOperate; + if (!enableContinuousReading) + { + needsOperate = true; + } + else + { + needsOperate = !continuousReading.IsDone() || currentEntry.StorageIndex != 0; + } + + if (needsOperate) + { + long entryStorageOffset = currentEntry.GetPhysicalOffset(); + long dataStorageOffset = entryStorageOffset + dataOffsetInEntry; + + if (verifyEntryRanges) + { + rc = _dataStorage[currentEntry.StorageIndex].GetSize(out long storageSize); + if (rc.IsFailure()) return rc.Miss(); + + // Ensure that we remain within range + if (entryStorageOffset < 0 || entryStorageOffset > storageSize) + return ResultFs.IndirectStorageCorrupted.Log(); + + if (dataStorageOffset + processSize > storageSize) + return ResultFs.IndirectStorageCorrupted.Log(); + } + + rc = func(ref _dataStorage[currentEntry.StorageIndex], dataStorageOffset, currentOffset, processSize, + ref closure); + if (rc.IsFailure()) return rc.Miss(); + } + + currentOffset += processSize; } - finally { visitor.Dispose(); } return Result.Success; } diff --git a/src/LibHac/FsSystem/NcaUtils/Nca.cs b/src/LibHac/FsSystem/NcaUtils/Nca.cs index 026d5d97..05bb8412 100644 --- a/src/LibHac/FsSystem/NcaUtils/Nca.cs +++ b/src/LibHac/FsSystem/NcaUtils/Nca.cs @@ -293,11 +293,11 @@ public class Nca var relocationTableStorage = new SubStorage(patchStorage, patchInfo.RelocationTreeOffset, patchInfo.RelocationTreeSize); var cachedTableStorage = new CachedStorage(relocationTableStorage, IndirectStorage.NodeSize, 4, true); - var tableNodeStorage = new SubStorage(cachedTableStorage, 0, nodeStorageSize); - var tableEntryStorage = new SubStorage(cachedTableStorage, nodeStorageSize, entryStorageSize); + using var tableNodeStorage = new ValueSubStorage(cachedTableStorage, 0, nodeStorageSize); + using var tableEntryStorage = new ValueSubStorage(cachedTableStorage, nodeStorageSize, entryStorageSize); var storage = new IndirectStorage(); - storage.Initialize(tableNodeStorage, tableEntryStorage, treeHeader.EntryCount).ThrowIfFailure(); + storage.Initialize(new ArrayPoolMemoryResource(), in tableNodeStorage, in tableEntryStorage, treeHeader.EntryCount).ThrowIfFailure(); storage.SetStorage(0, baseStorage, 0, baseSize); storage.SetStorage(1, patchStorage, 0, patchSize); diff --git a/src/LibHac/FsSystem/SparseStorage.cs b/src/LibHac/FsSystem/SparseStorage.cs new file mode 100644 index 00000000..7eef97dd --- /dev/null +++ b/src/LibHac/FsSystem/SparseStorage.cs @@ -0,0 +1,132 @@ +using System; +using LibHac.Diag; +using LibHac.Fs; + +namespace LibHac.FsSystem; + +/// +/// Represents a sparse where blocks of empty data containing all +/// zeros are not written to disk in order to save space. +/// +/// The 's contains +/// values describing which portions of the storage are empty. This is accomplished by using a standard +/// where the second contains only zeros. +/// Based on FS 13.1.0 (nnSdk 13.4.0) +public class SparseStorage : IndirectStorage +{ + private class ZeroStorage : IStorage + { + protected override Result DoRead(long offset, Span destination) + { + Assert.SdkRequiresGreaterEqual(offset, 0); + + if (destination.Length > 0) + destination.Clear(); + + return Result.Success; + } + + protected override Result DoWrite(long offset, ReadOnlySpan source) + { + return ResultFs.UnsupportedWriteForZeroStorage.Log(); + } + + protected override Result DoFlush() + { + return Result.Success; + } + + protected override Result DoGetSize(out long size) + { + size = long.MaxValue; + return Result.Success; + } + + protected override Result DoSetSize(long size) + { + return ResultFs.UnsupportedSetSizeForZeroStorage.Log(); + } + + protected override Result DoOperateRange(Span outBuffer, OperationId operationId, long offset, long size, + ReadOnlySpan inBuffer) + { + return Result.Success; + } + } + + private ZeroStorage _zeroStorage; + + public SparseStorage() + { + _zeroStorage = new ZeroStorage(); + } + + public override void Dispose() + { + _zeroStorage.Dispose(); + base.Dispose(); + } + + public void Initialize(long size) + { + GetEntryTable().Initialize(NodeSize, size); + SetZeroStorage(); + } + + public void SetDataStorage(in ValueSubStorage storage) + { + Assert.SdkRequires(IsInitialized()); + + SetStorage(0, in storage); + SetZeroStorage(); + } + + private void SetZeroStorage() + { + SetStorage(1, _zeroStorage, 0, long.MaxValue); + } + + protected override Result DoRead(long offset, Span destination) + { + // Validate pre-conditions + Assert.SdkRequiresLessEqual(0, offset); + Assert.SdkRequires(IsInitialized()); + + // Succeed if there's nothing to read + if (destination.Length == 0) + return Result.Success; + + if (GetEntryTable().IsEmpty()) + { + Result rc = GetEntryTable().GetOffsets(out BucketTree.Offsets offsets); + if (rc.IsFailure()) return rc.Miss(); + + if (!offsets.IsInclude(offset, destination.Length)) + return ResultFs.OutOfRange.Log(); + + destination.Clear(); + } + else + { + var closure = new OperatePerEntryClosure(); + closure.OutBuffer = destination; + closure.Offset = offset; + + Result rc = OperatePerEntry(offset, destination.Length, ReadImpl, ref closure, + enableContinuousReading: false, verifyEntryRanges: true); + if (rc.IsFailure()) return rc.Miss(); + } + + return Result.Success; + + static Result ReadImpl(ref ValueSubStorage storage, long physicalOffset, long virtualOffset, long processSize, + ref OperatePerEntryClosure closure) + { + int bufferPosition = (int)(virtualOffset - closure.Offset); + Result rc = storage.Read(physicalOffset, closure.OutBuffer.Slice(bufferPosition, (int)processSize)); + if (rc.IsFailure()) return rc.Miss(); + + return Result.Success; + } + } +} \ No newline at end of file diff --git a/tests/LibHac.Tests/FsSystem/BucketTreeBuilderTests.cs b/tests/LibHac.Tests/FsSystem/BucketTreeBuilderTests.cs index 7ad213fc..f64296fa 100644 --- a/tests/LibHac.Tests/FsSystem/BucketTreeBuilderTests.cs +++ b/tests/LibHac.Tests/FsSystem/BucketTreeBuilderTests.cs @@ -117,15 +117,15 @@ public class BucketTreeBuilderTests const int nodeSize = 0x4000; const int entryCount = 10; - byte[] headerBuffer = new byte[BucketTree2.QueryHeaderStorageSize()]; - byte[] nodeBuffer = new byte[(int)BucketTree2.QueryNodeStorageSize(nodeSize, Unsafe.SizeOf(), entryCount)]; - byte[] entryBuffer = new byte[(int)BucketTree2.QueryEntryStorageSize(nodeSize, Unsafe.SizeOf(), entryCount)]; + byte[] headerBuffer = new byte[BucketTree.QueryHeaderStorageSize()]; + byte[] nodeBuffer = new byte[(int)BucketTree.QueryNodeStorageSize(nodeSize, Unsafe.SizeOf(), entryCount)]; + byte[] entryBuffer = new byte[(int)BucketTree.QueryEntryStorageSize(nodeSize, Unsafe.SizeOf(), entryCount)]; using var headerStorage = new ValueSubStorage(new MemoryStorage(headerBuffer), 0, headerBuffer.Length); using var nodeStorage = new ValueSubStorage(new MemoryStorage(nodeBuffer), 0, nodeBuffer.Length); using var entryStorage = new ValueSubStorage(new MemoryStorage(entryBuffer), 0, entryBuffer.Length); - var builder = new BucketTree2.Builder(); + var builder = new BucketTree.Builder(); Assert.Success(builder.Initialize(new ArrayPoolMemoryResource(), in headerStorage, in nodeStorage, in entryStorage, nodeSize, Unsafe.SizeOf(), entryCount)); @@ -141,15 +141,15 @@ public class BucketTreeBuilderTests const int nodeSize = 0x4000; const int entryCount = 2; - byte[] headerBuffer = new byte[BucketTree2.QueryHeaderStorageSize()]; - byte[] nodeBuffer = new byte[(int)BucketTree2.QueryNodeStorageSize(nodeSize, Unsafe.SizeOf(), entryCount)]; - byte[] entryBuffer = new byte[(int)BucketTree2.QueryEntryStorageSize(nodeSize, Unsafe.SizeOf(), entryCount)]; + byte[] headerBuffer = new byte[BucketTree.QueryHeaderStorageSize()]; + byte[] nodeBuffer = new byte[(int)BucketTree.QueryNodeStorageSize(nodeSize, Unsafe.SizeOf(), entryCount)]; + byte[] entryBuffer = new byte[(int)BucketTree.QueryEntryStorageSize(nodeSize, Unsafe.SizeOf(), entryCount)]; using var headerStorage = new ValueSubStorage(new MemoryStorage(headerBuffer), 0, headerBuffer.Length); using var nodeStorage = new ValueSubStorage(new MemoryStorage(nodeBuffer), 0, nodeBuffer.Length); using var entryStorage = new ValueSubStorage(new MemoryStorage(entryBuffer), 0, entryBuffer.Length); - var builder = new BucketTree2.Builder(); + var builder = new BucketTree.Builder(); Assert.Success(builder.Initialize(new ArrayPoolMemoryResource(), in headerStorage, in nodeStorage, in entryStorage, nodeSize, Unsafe.SizeOf(), entryCount)); diff --git a/tests/LibHac.Tests/FsSystem/BucketTreeCreator.cs b/tests/LibHac.Tests/FsSystem/BucketTreeCreator.cs index e31039fb..2dcb4be3 100644 --- a/tests/LibHac.Tests/FsSystem/BucketTreeCreator.cs +++ b/tests/LibHac.Tests/FsSystem/BucketTreeCreator.cs @@ -88,16 +88,16 @@ internal static class BucketTreeCreator public static BucketTreeTests.BucketTreeData Create(ulong rngSeed, SizeRange entrySizes, int nodeSize, int entryCount) { - byte[] headerBuffer = new byte[BucketTree2.QueryHeaderStorageSize()]; - byte[] nodeBuffer = new byte[(int)BucketTree2.QueryNodeStorageSize(nodeSize, Unsafe.SizeOf(), entryCount)]; - byte[] entryBuffer = new byte[(int)BucketTree2.QueryEntryStorageSize(nodeSize, Unsafe.SizeOf(), entryCount)]; + byte[] headerBuffer = new byte[BucketTree.QueryHeaderStorageSize()]; + byte[] nodeBuffer = new byte[(int)BucketTree.QueryNodeStorageSize(nodeSize, Unsafe.SizeOf(), entryCount)]; + byte[] entryBuffer = new byte[(int)BucketTree.QueryEntryStorageSize(nodeSize, Unsafe.SizeOf(), entryCount)]; using var headerStorage = new ValueSubStorage(new MemoryStorage(headerBuffer), 0, headerBuffer.Length); using var nodeStorage = new ValueSubStorage(new MemoryStorage(nodeBuffer), 0, nodeBuffer.Length); using var entryStorage = new ValueSubStorage(new MemoryStorage(entryBuffer), 0, entryBuffer.Length); var generator = new EntryGenerator(rngSeed, entrySizes); - var builder = new BucketTree2.Builder(); + var builder = new BucketTree.Builder(); Assert.Success(builder.Initialize(new ArrayPoolMemoryResource(), in headerStorage, in nodeStorage, in entryStorage, nodeSize, Unsafe.SizeOf(), entryCount)); diff --git a/tests/LibHac.Tests/FsSystem/BucketTreeTests.cs b/tests/LibHac.Tests/FsSystem/BucketTreeTests.cs index 8f79a820..988fe454 100644 --- a/tests/LibHac.Tests/FsSystem/BucketTreeTests.cs +++ b/tests/LibHac.Tests/FsSystem/BucketTreeTests.cs @@ -60,7 +60,7 @@ public class BucketTreeTests : IClassFixture public byte[] Nodes; public byte[] Entries; - public BucketTree2 CreateBucketTree() + public BucketTree CreateBucketTree() { int entrySize = Unsafe.SizeOf(); @@ -68,7 +68,7 @@ public class BucketTreeTests : IClassFixture using var nodeStorage = new ValueSubStorage(new MemoryStorage(Nodes), 0, Nodes.Length); using var entryStorage = new ValueSubStorage(new MemoryStorage(Entries), 0, Entries.Length); - var tree = new BucketTree2(); + var tree = new BucketTree(); Assert.Success(tree.Initialize(new ArrayPoolMemoryResource(), in nodeStorage, in entryStorage, NodeSize, entrySize, header.EntryCount)); return tree; @@ -79,9 +79,9 @@ public class BucketTreeTests : IClassFixture private void MoveNext_IterateAllFromStart_ReturnsCorrectEntries(int treeIndex) { ReadOnlySpan entries = _entries.AsSpan(0, _treeData[treeIndex].EntryCount); - BucketTree2 tree = _treeData[treeIndex].CreateBucketTree(); + BucketTree tree = _treeData[treeIndex].CreateBucketTree(); - using var visitor = new BucketTree2.Visitor(); + using var visitor = new BucketTree.Visitor(); Assert.Success(tree.Find(ref visitor.Ref, 0)); for (int i = 0; i < entries.Length; i++) @@ -118,9 +118,9 @@ public class BucketTreeTests : IClassFixture private void MovePrevious_IterateAllFromEnd_ReturnsCorrectEntries(int treeIndex) { ReadOnlySpan entries = _entries.AsSpan(0, _treeData[treeIndex].EntryCount); - BucketTree2 tree = _treeData[treeIndex].CreateBucketTree(); + BucketTree tree = _treeData[treeIndex].CreateBucketTree(); - using var visitor = new BucketTree2.Visitor(); + using var visitor = new BucketTree.Visitor(); Assert.Success(tree.Find(ref visitor.Ref, entries[^1].GetVirtualOffset())); for (int i = entries.Length - 1; i >= 0; i--) @@ -158,7 +158,7 @@ public class BucketTreeTests : IClassFixture const int findCount = 10000; ReadOnlySpan entries = _entries.AsSpan(0, _treeData[treeIndex].EntryCount); - BucketTree2 tree = _treeData[treeIndex].CreateBucketTree(); + BucketTree tree = _treeData[treeIndex].CreateBucketTree(); var random = new Random(123456); @@ -170,7 +170,7 @@ public class BucketTreeTests : IClassFixture // Add a random shift amount to test finding offsets in the middle of an entry int offsetShift = random.Next(0, 1) * 0x500; - using var visitor = new BucketTree2.Visitor(); + using var visitor = new BucketTree.Visitor(); Assert.Success(tree.Find(ref visitor.Ref, expectedEntry.GetVirtualOffset() + offsetShift)); ref readonly IndirectStorage.Entry actualEntry = ref visitor.Get(); diff --git a/tests/LibHac.Tests/FsSystem/IndirectStorageCreator.cs b/tests/LibHac.Tests/FsSystem/IndirectStorageCreator.cs new file mode 100644 index 00000000..3aa13a8f --- /dev/null +++ b/tests/LibHac.Tests/FsSystem/IndirectStorageCreator.cs @@ -0,0 +1,154 @@ +using System; +using System.Runtime.CompilerServices; +using LibHac.Fs; +using LibHac.FsSystem; +using Xunit; + +namespace LibHac.Tests.FsSystem; + +internal class IndirectStorageCreator +{ + private const int NodeSize = 0x4000; + + private readonly ulong _rngSeed; + private readonly long _targetSize; + private readonly SizeRange _originalEntrySizeRange; + private readonly SizeRange _patchEntrySizeRange; + + private int _maxEntrySize; + private int _entryCount; + + private IndirectStorageTests.IndirectStorageData _buffers; + + public static IndirectStorageTests.IndirectStorageData Create(ulong rngSeed, SizeRange originalEntrySizeRange, + SizeRange patchEntrySizeRange, long storageSize) + { + return new IndirectStorageCreator(rngSeed, originalEntrySizeRange, patchEntrySizeRange, storageSize)._buffers; + } + + private IndirectStorageCreator(ulong rngSeed, SizeRange originalEntrySizeRange, SizeRange patchEntrySizeRange, long storageSize) + { + _rngSeed = rngSeed; + _originalEntrySizeRange = originalEntrySizeRange; + _patchEntrySizeRange = patchEntrySizeRange; + _targetSize = storageSize; + + CreateBuffers(); + FillBuffers(); + } + + private void CreateBuffers() + { + var generator = new BucketTreeCreator.EntryGenerator(_rngSeed, _originalEntrySizeRange, _patchEntrySizeRange); + generator.MoveNext(); + _maxEntrySize = 0; + + long originalSize = 0, patchSize = 0, sparseOriginalSize = 0; + + while (generator.PatchedStorageSize < _targetSize) + { + _maxEntrySize = Math.Max(_maxEntrySize, generator.CurrentEntrySize); + originalSize = generator.OriginalStorageSize; + patchSize = generator.PatchStorageSize; + sparseOriginalSize = originalSize - patchSize; + + generator.MoveNext(); + } + + _entryCount = generator.CurrentEntryIndex; + + _buffers = new() + { + OriginalStorageBuffer = new byte[originalSize], + SparseOriginalStorageBuffer = new byte[sparseOriginalSize], + PatchStorageBuffer = new byte[patchSize], + PatchedStorageBuffer = new byte[originalSize], + TableEntries = new IndirectStorage.Entry[_entryCount], + TableHeaderBuffer = new byte[BucketTree.QueryHeaderStorageSize()], + TableNodeBuffer = new byte[(int)BucketTree.QueryNodeStorageSize(NodeSize, Unsafe.SizeOf(), _entryCount)], + TableEntryBuffer = new byte[(int)BucketTree.QueryEntryStorageSize(NodeSize, Unsafe.SizeOf(), _entryCount)], + SparseTableHeaderBuffer = new byte[BucketTree.QueryHeaderStorageSize()], + SparseTableNodeBuffer = new byte[(int)BucketTree.QueryNodeStorageSize(NodeSize, Unsafe.SizeOf(), _entryCount)], + SparseTableEntryBuffer = new byte[(int)BucketTree.QueryEntryStorageSize(NodeSize, Unsafe.SizeOf(), _entryCount)] + }; + } + + private void FillBuffers() + { + byte[] randomBuffer = new byte[_maxEntrySize]; + var generator = new BucketTreeCreator.EntryGenerator(_rngSeed, _originalEntrySizeRange, _patchEntrySizeRange); + + using var headerStorage = new ValueSubStorage(new MemoryStorage(_buffers.TableHeaderBuffer), 0, _buffers.TableHeaderBuffer.Length); + using var nodeStorage = new ValueSubStorage(new MemoryStorage(_buffers.TableNodeBuffer), 0, _buffers.TableNodeBuffer.Length); + using var entryStorage = new ValueSubStorage(new MemoryStorage(_buffers.TableEntryBuffer), 0, _buffers.TableEntryBuffer.Length); + + using var sparseHeaderStorage = new ValueSubStorage(new MemoryStorage(_buffers.SparseTableHeaderBuffer), 0, _buffers.SparseTableHeaderBuffer.Length); + using var sparseNodeStorage = new ValueSubStorage(new MemoryStorage(_buffers.SparseTableNodeBuffer), 0, _buffers.SparseTableNodeBuffer.Length); + using var sparseEntryStorage = new ValueSubStorage(new MemoryStorage(_buffers.SparseTableEntryBuffer), 0, _buffers.SparseTableEntryBuffer.Length); + + var builder = new BucketTree.Builder(); + var sparseTableBuilder = new BucketTree.Builder(); + + Assert.Success(builder.Initialize(new ArrayPoolMemoryResource(), in headerStorage, in nodeStorage, + in entryStorage, NodeSize, Unsafe.SizeOf(), _entryCount)); + + Assert.Success(sparseTableBuilder.Initialize(new ArrayPoolMemoryResource(), in sparseHeaderStorage, + in sparseNodeStorage, in sparseEntryStorage, NodeSize, Unsafe.SizeOf(), + _entryCount)); + + var random = new Random(_rngSeed); + + int originalStorageOffset = 0; + int sparseOriginalStorageOffset = 0; + int patchStorageOffset = 0; + int patchedStorageOffset = 0; + + for (int i = 0; i < _entryCount; i++) + { + generator.MoveNext(); + + IndirectStorage.Entry entry = generator.CurrentEntry; + + IndirectStorage.Entry sparseEntry = generator.CurrentEntry; + sparseEntry.SetPhysicalOffset(sparseOriginalStorageOffset); + + Assert.Success(builder.Add(in entry)); + Assert.Success(sparseTableBuilder.Add(in sparseEntry)); + + _buffers.TableEntries[i] = entry; + + Span randomData = randomBuffer.AsSpan(0, generator.CurrentEntrySize); + random.NextBytes(randomData); + + if (entry.StorageIndex == 0) + { + randomData.CopyTo(_buffers.OriginalStorageBuffer.AsSpan(originalStorageOffset)); + randomData.CopyTo(_buffers.SparseOriginalStorageBuffer.AsSpan(sparseOriginalStorageOffset)); + randomData.CopyTo(_buffers.PatchedStorageBuffer.AsSpan(patchedStorageOffset)); + + originalStorageOffset += randomData.Length; + sparseOriginalStorageOffset += randomData.Length; + patchedStorageOffset += randomData.Length; + } + else + { + // Fill the unused portions of the original storage with zeros so it matches the sparse original storage + _buffers.OriginalStorageBuffer.AsSpan(originalStorageOffset, generator.CurrentEntrySize); + randomData.CopyTo(_buffers.PatchStorageBuffer.AsSpan(patchStorageOffset)); + randomData.CopyTo(_buffers.PatchedStorageBuffer.AsSpan(patchedStorageOffset)); + + originalStorageOffset += randomData.Length; + patchStorageOffset += randomData.Length; + patchedStorageOffset += randomData.Length; + } + } + + Assert.Success(builder.Finalize(generator.PatchedStorageSize)); + Assert.Success(sparseTableBuilder.Finalize(generator.PatchedStorageSize)); + + Assert.Equal(_buffers.OriginalStorageBuffer.Length, originalStorageOffset); + Assert.Equal(_buffers.SparseOriginalStorageBuffer.Length, sparseOriginalStorageOffset); + Assert.Equal(_buffers.PatchStorageBuffer.Length, patchStorageOffset); + Assert.Equal(_buffers.PatchedStorageBuffer.Length, patchedStorageOffset); + } +} \ No newline at end of file diff --git a/tests/LibHac.Tests/FsSystem/IndirectStorageTests.cs b/tests/LibHac.Tests/FsSystem/IndirectStorageTests.cs new file mode 100644 index 00000000..77f2b8dc --- /dev/null +++ b/tests/LibHac.Tests/FsSystem/IndirectStorageTests.cs @@ -0,0 +1,356 @@ +using System; +using System.Linq; +using System.Runtime.InteropServices; +using LibHac.Fs; +using LibHac.FsSystem; +using LibHac.Tests.Common; +using LibHac.Tests.Fs; +using Xunit; + +namespace LibHac.Tests.FsSystem; + +public class IndirectStorageBuffers +{ + public IndirectStorageTests.IndirectStorageData[] Buffers { get; } + + public IndirectStorageBuffers() + { + IndirectStorageTests.IndirectStorageTestConfig[] storageConfig = IndirectStorageTests.IndirectStorageTestData; + Buffers = new IndirectStorageTests.IndirectStorageData[storageConfig.Length]; + + for (int i = 0; i < storageConfig.Length; i++) + { + IndirectStorageTests.IndirectStorageTestConfig config = storageConfig[i]; + + SizeRange patchSizeRange = config.PatchEntrySizeRange.BlockSize == 0 + ? config.OriginalEntrySizeRange + : config.PatchEntrySizeRange; + + Buffers[i] = IndirectStorageCreator.Create(config.RngSeed, config.OriginalEntrySizeRange, patchSizeRange, + config.StorageSize); + } + } +} + +public class IndirectStorageTests : IClassFixture +{ + // Keep the generated data between tests so it only has to be generated once + private readonly IndirectStorageData[] _storageBuffers; + + public IndirectStorageTests(IndirectStorageBuffers buffers) + { + _storageBuffers = buffers.Buffers; + } + + public class IndirectStorageTestConfig + { + public ulong RngSeed { get; init; } + public long StorageSize { get; init; } + + // If the patch size range is left blank, the same values will be used for both the original and patch entry sizes + public SizeRange OriginalEntrySizeRange { get; init; } + public SizeRange PatchEntrySizeRange { get; init; } + } + + private class RandomAccessTestConfig + { + public int[] SizeClassProbs { get; init; } + public int[] SizeClassMaxSizes { get; init; } + public int[] TaskProbs { get; init; } + public int[] AccessTypeProbs { get; init; } + public ulong RngSeed { get; init; } + public int FrequentAccessBlockCount { get; init; } + } + + public static readonly IndirectStorageTestConfig[] IndirectStorageTestData = + { + // Small patched regions to force continuous reading + new() + { + RngSeed = 948285, + OriginalEntrySizeRange = new SizeRange(0x10000, 1,5), + PatchEntrySizeRange = new SizeRange(1, 0x20, 0xFFF), + StorageSize = 1024 * 1024 * 10 + }, + // Small patch regions + new() + { + RngSeed = 236956, + OriginalEntrySizeRange = new SizeRange(0x1000, 1,10), + StorageSize = 1024 * 1024 * 10 + }, + // Medium patch regions + new() + { + RngSeed = 352174, + OriginalEntrySizeRange = new SizeRange(0x8000, 1,10), + StorageSize = 1024 * 1024 * 10 + }, + // Larger patch regions + new() + { + RngSeed = 220754, + OriginalEntrySizeRange = new SizeRange(0x10000, 10,50), + StorageSize = 1024 * 1024 * 10 + } + }; + + private static readonly RandomAccessTestConfig[] AccessTestConfigs = + { + new() + { + SizeClassProbs = new[] { 50, 50, 5 }, + SizeClassMaxSizes = new[] { 0x4000, 0x80000, 0x800000 }, // 16 KB, 512 KB, 8 MB + TaskProbs = new[] { 1, 0, 0 }, // Read, Write, Flush + AccessTypeProbs = new[] { 10, 10, 5 }, // Random, Sequential, Frequent block + RngSeed = 35467, + FrequentAccessBlockCount = 6, + }, + new() + { + SizeClassProbs = new[] { 50, 50, 5 }, + SizeClassMaxSizes = new[] { 0x800, 0x1000, 0x8000 }, // 2 KB, 4 KB, 32 KB + TaskProbs = new[] { 1, 0, 0 }, // Read, Write, Flush + AccessTypeProbs = new[] { 1, 10, 0 }, // Random, Sequential, Frequent block + RngSeed = 13579 + }, + }; + + public static TheoryData IndirectStorageTestTheoryData = + TheoryDataCreator.CreateSequence(0, IndirectStorageTestData.Length); + + public class IndirectStorageData + { + public IndirectStorage.Entry[] TableEntries; + public byte[] TableHeaderBuffer; + public byte[] TableNodeBuffer; + public byte[] TableEntryBuffer; + + public byte[] SparseTableHeaderBuffer; + public byte[] SparseTableNodeBuffer; + public byte[] SparseTableEntryBuffer; + + public byte[] OriginalStorageBuffer; + public byte[] SparseOriginalStorageBuffer; + public byte[] PatchStorageBuffer; + public byte[] PatchedStorageBuffer; + + public IndirectStorage CreateIndirectStorage(bool useSparseOriginalStorage) + { + BucketTree.Header header = MemoryMarshal.Cast(TableHeaderBuffer)[0]; + + using var nodeStorage = new ValueSubStorage(new MemoryStorage(TableNodeBuffer), 0, TableNodeBuffer.Length); + using var entryStorage = new ValueSubStorage(new MemoryStorage(TableEntryBuffer), 0, TableEntryBuffer.Length); + + IStorage originalStorageBase = useSparseOriginalStorage ? CreateSparseStorage() : new MemoryStorage(OriginalStorageBuffer); + + using var originalStorage = new ValueSubStorage(originalStorageBase, 0, OriginalStorageBuffer.Length); + using var patchStorage = new ValueSubStorage(new MemoryStorage(PatchStorageBuffer), 0, PatchStorageBuffer.Length); + + var storage = new IndirectStorage(); + Assert.Success(storage.Initialize(new ArrayPoolMemoryResource(), in nodeStorage, in entryStorage, header.EntryCount)); + storage.SetStorage(0, in originalStorage); + storage.SetStorage(1, in patchStorage); + + return storage; + } + + public SparseStorage CreateSparseStorage() + { + BucketTree.Header header = MemoryMarshal.Cast(SparseTableHeaderBuffer)[0]; + + using var nodeStorage = new ValueSubStorage(new MemoryStorage(SparseTableNodeBuffer), 0, SparseTableNodeBuffer.Length); + using var entryStorage = new ValueSubStorage(new MemoryStorage(SparseTableEntryBuffer), 0, SparseTableEntryBuffer.Length); + + using var sparseOriginalStorage = new ValueSubStorage(new MemoryStorage(SparseOriginalStorageBuffer), 0, SparseOriginalStorageBuffer.Length); + + var sparseStorage = new SparseStorage(); + Assert.Success(sparseStorage.Initialize(new ArrayPoolMemoryResource(), in nodeStorage, in entryStorage, header.EntryCount)); + sparseStorage.SetDataStorage(in sparseOriginalStorage); + + return sparseStorage; + } + } + + [Theory, MemberData(nameof(IndirectStorageTestTheoryData))] + public void Read_EntireStorageInSingleRead_DataIsCorrect(int index) + { + ReadEntireStorageImpl(index, false); + } + + [Theory, MemberData(nameof(IndirectStorageTestTheoryData))] + public void Read_EntireStorageInSingleRead_OriginalStorageIsSparse_DataIsCorrect(int index) + { + ReadEntireStorageImpl(index, true); + } + + private void ReadEntireStorageImpl(int index, bool useSparseOriginalStorage) + { + using IndirectStorage storage = _storageBuffers[index].CreateIndirectStorage(useSparseOriginalStorage); + + byte[] expectedPatchedData = _storageBuffers[index].PatchedStorageBuffer; + byte[] actualPatchedData = new byte[expectedPatchedData.Length]; + + Assert.Success(storage.GetSize(out long storageSize)); + Assert.Equal(actualPatchedData.Length, storageSize); + + Assert.Success(storage.Read(0, actualPatchedData)); + Assert.True(expectedPatchedData.SequenceEqual(actualPatchedData)); + } + + [Fact] + public void Initialize_SingleTableStorage() + { + const int index = 1; + IndirectStorageData buffers = _storageBuffers[index]; + + byte[] tableBuffer = buffers.TableHeaderBuffer.Concat(buffers.TableNodeBuffer.Concat(buffers.TableEntryBuffer)).ToArray(); + using var tableStorage = new ValueSubStorage(new MemoryStorage(tableBuffer), 0, tableBuffer.Length); + + using var originalStorage = new ValueSubStorage(new MemoryStorage(buffers.OriginalStorageBuffer), 0, buffers.OriginalStorageBuffer.Length); + using var patchStorage = new ValueSubStorage(new MemoryStorage(buffers.PatchStorageBuffer), 0, buffers.PatchStorageBuffer.Length); + + using var storage = new IndirectStorage(); + Assert.Success(storage.Initialize(new ArrayPoolMemoryResource(), in tableStorage)); + storage.SetStorage(0, in originalStorage); + storage.SetStorage(1, in patchStorage); + + byte[] expectedPatchedData = _storageBuffers[index].PatchedStorageBuffer; + byte[] actualPatchedData = new byte[expectedPatchedData.Length]; + + Assert.Success(storage.GetSize(out long storageSize)); + Assert.Equal(actualPatchedData.Length, storageSize); + + Assert.Success(storage.Read(0, actualPatchedData)); + Assert.True(expectedPatchedData.SequenceEqual(actualPatchedData)); + } + + [Theory, MemberData(nameof(IndirectStorageTestTheoryData))] + public void Read_RandomAccess_DataIsCorrect(int index) + { + foreach (RandomAccessTestConfig accessConfig in AccessTestConfigs) + { + StorageTester tester = SetupRandomAccessTest(index, accessConfig, true); + tester.Run(0x1000); + } + } + + [Theory, MemberData(nameof(IndirectStorageTestTheoryData))] + public void GetEntryList_GetAllEntries_ReturnsCorrectEntries(int index) + { + GetEntryListTestImpl(index, 0, _storageBuffers[index].PatchedStorageBuffer.Length); + } + + [Theory, MemberData(nameof(IndirectStorageTestTheoryData))] + public void GetEntryList_GetPartialEntries_ReturnsCorrectEntries(int index) + { + IndirectStorageData buffers = _storageBuffers[index]; + var random = new Random(IndirectStorageTestData[index].RngSeed); + + int endOffset = buffers.PatchedStorageBuffer.Length; + int maxSize = endOffset / 2; + const int testCount = 100; + + for (int i = 0; i < testCount; i++) + { + long offset = random.Next(0, endOffset); + long size = Math.Min(endOffset - offset, random.Next(0, maxSize)); + + GetEntryListTestImpl(index, offset, size); + } + + GetEntryListTestImpl(index, 0, _storageBuffers[index].PatchedStorageBuffer.Length); + } + + private void GetEntryListTestImpl(int index, long offset, long size) + { + Assert.True(size > 0); + + IndirectStorageData buffers = _storageBuffers[index]; + using IndirectStorage storage = buffers.CreateIndirectStorage(false); + IndirectStorage.Entry[] entries = buffers.TableEntries; + int endOffset = buffers.PatchedStorageBuffer.Length; + + int startIndex = FindEntry(entries, offset, endOffset); + int endIndex = FindEntry(entries, offset + size - 1, endOffset); + int count = endIndex - startIndex + 1; + + Span expectedEntries = buffers.TableEntries.AsSpan(startIndex, count); + var actualEntries = new IndirectStorage.Entry[expectedEntries.Length + 1]; + + Assert.Success(storage.GetEntryList(actualEntries, out int entryCount, offset, size)); + + Assert.Equal(expectedEntries.Length, entryCount); + Assert.True(actualEntries.AsSpan(0, entryCount).SequenceEqual(expectedEntries)); + } + + private int FindEntry(IndirectStorage.Entry[] entries, long offset, long endOffset) + { + Assert.True(offset >= 0); + Assert.True(offset < endOffset); + + for (int i = 0; i + 1 < entries.Length; i++) + { + if (offset >= entries[i].GetVirtualOffset() && offset < entries[i + 1].GetVirtualOffset()) + return i; + } + + return entries.Length - 1; + } + + [Theory, MemberData(nameof(IndirectStorageTestTheoryData))] + public void SparseStorage_Read_EntireStorageInSingleRead_DataIsCorrect(int index) + { + IndirectStorageData buffers = _storageBuffers[index]; + using SparseStorage storage = buffers.CreateSparseStorage(); + + byte[] expectedPatchedData = buffers.OriginalStorageBuffer; + byte[] actualPatchedData = new byte[expectedPatchedData.Length]; + + Assert.Success(storage.GetSize(out long storageSize)); + Assert.Equal(actualPatchedData.Length, storageSize); + + Assert.Success(storage.Read(0, actualPatchedData)); + Assert.True(expectedPatchedData.SequenceEqual(actualPatchedData)); + } + + [Theory, MemberData(nameof(IndirectStorageTestTheoryData))] + public void SparseStorage_Read_RandomAccess_DataIsCorrect(int index) + { + foreach (RandomAccessTestConfig accessConfig in AccessTestConfigs) + { + StorageTester tester = SetupRandomAccessTest(index, accessConfig, true); + tester.Run(0x1000); + } + } + + private StorageTester SetupRandomAccessTest(int storageConfigIndex, RandomAccessTestConfig accessConfig, bool getSparseStorage) + { + IStorage indirectStorage = getSparseStorage + ? _storageBuffers[storageConfigIndex].CreateSparseStorage() + : _storageBuffers[storageConfigIndex].CreateIndirectStorage(false); + + Assert.Success(indirectStorage.GetSize(out long storageSize)); + + byte[] expectedStorageArray = new byte[storageSize]; + Assert.Success(indirectStorage.Read(0, expectedStorageArray)); + + var memoryStorage = new MemoryStorage(expectedStorageArray); + + var memoryStorageEntry = new StorageTester.Entry(memoryStorage, expectedStorageArray); + var indirectStorageEntry = new StorageTester.Entry(indirectStorage, expectedStorageArray); + + var testerConfig = new StorageTester.Configuration() + { + Entries = new[] { memoryStorageEntry, indirectStorageEntry }, + SizeClassProbs = accessConfig.SizeClassProbs, + SizeClassMaxSizes = accessConfig.SizeClassMaxSizes, + TaskProbs = accessConfig.TaskProbs, + AccessTypeProbs = accessConfig.AccessTypeProbs, + RngSeed = accessConfig.RngSeed, + FrequentAccessBlockCount = accessConfig.FrequentAccessBlockCount + }; + + return new StorageTester(testerConfig); + } +} \ No newline at end of file