Update BucketTree for 13.0

This commit is contained in:
Alex Barney 2021-11-27 14:10:19 -07:00
parent 771f2cdb26
commit f180bfeef9
11 changed files with 2278 additions and 157 deletions

View File

@ -71,7 +71,20 @@ public abstract class IStorage : IDisposable
}
/// <summary>
/// Performs various operations on the file. Used to extend the functionality of the <see cref="IStorage"/> interface.
/// Performs various operations on the storage. Used to extend the functionality of the <see cref="IStorage"/> interface.
/// </summary>
/// <param name="operationId">The operation to be performed.</param>
/// <param name="offset">The offset of the range to operate on.</param>
/// <param name="size">The size of the range to operate on.</param>
/// <returns>The <see cref="Result"/> of the operation.</returns>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public Result OperateRange(OperationId operationId, long offset, long size)
{
return DoOperateRange(Span<byte>.Empty, operationId, offset, size, ReadOnlySpan<byte>.Empty);
}
/// <summary>
/// Performs various operations on the storage. Used to extend the functionality of the <see cref="IStorage"/> interface.
/// </summary>
/// <param name="outBuffer">A buffer that will contain the response from the operation.</param>
/// <param name="operationId">The operation to be performed.</param>

View File

@ -133,16 +133,10 @@ public partial class BucketTree
return visitor.Find(virtualAddress);
}
public static int QueryHeaderStorageSize() => Unsafe.SizeOf<Header>();
public static int QueryHeaderStorageSize() => 16;
public static long QueryNodeStorageSize(long nodeSize, long entrySize, int entryCount)
{
Assert.SdkRequiresLessEqual(sizeof(long), entrySize);
Assert.SdkRequiresLessEqual(entrySize + Unsafe.SizeOf<NodeHeader>(), nodeSize);
Assert.SdkRequiresWithinMinMax(nodeSize, NodeSizeMin, NodeSizeMax);
Assert.SdkRequires(BitUtil.IsPowerOfTwo(nodeSize));
Assert.SdkRequiresLessEqual(0, entryCount);
if (entryCount <= 0)
return 0;
@ -151,12 +145,6 @@ public partial class BucketTree
public static long QueryEntryStorageSize(long nodeSize, long entrySize, int entryCount)
{
Assert.SdkRequiresLessEqual(sizeof(long), entrySize);
Assert.SdkRequiresLessEqual(entrySize + Unsafe.SizeOf<NodeHeader>(), nodeSize);
Assert.SdkRequiresWithinMinMax(nodeSize, NodeSizeMin, NodeSizeMax);
Assert.SdkRequires(BitUtil.IsPowerOfTwo(nodeSize));
Assert.SdkRequiresLessEqual(0, entryCount);
if (entryCount <= 0)
return 0;
@ -165,18 +153,19 @@ public partial class BucketTree
private static int GetEntryCount(long nodeSize, long entrySize)
{
return (int)((nodeSize - Unsafe.SizeOf<NodeHeader>()) / entrySize);
return (int)((nodeSize - 16) / entrySize);
}
private static int GetOffsetCount(long nodeSize)
{
return (int)((nodeSize - Unsafe.SizeOf<NodeHeader>()) / sizeof(long));
return (int)((nodeSize - 16) / sizeof(long));
}
private static int GetEntrySetCount(long nodeSize, long entrySize, int entryCount)
{
int entryCountPerNode = GetEntryCount(nodeSize, entrySize);
return BitUtil.DivideUp(entryCount, entryCountPerNode);
uint divisor = (uint)entryCountPerNode;
return (int)(((uint)entryCount + divisor - 1) / divisor);
}
public static int GetNodeL2Count(long nodeSize, long entrySize, int entryCount)
@ -187,10 +176,12 @@ public partial class BucketTree
if (entrySetCount <= offsetCountPerNode)
return 0;
int nodeL2Count = BitUtil.DivideUp(entrySetCount, offsetCountPerNode);
Abort.DoAbortUnless(nodeL2Count <= offsetCountPerNode);
uint divisor1 = (uint)offsetCountPerNode;
int nodeL2Count = (int)(((uint)entrySetCount + divisor1 - 1) / divisor1);
Assert.SdkLessEqual(nodeL2Count, offsetCountPerNode);
return BitUtil.DivideUp(entrySetCount - (offsetCountPerNode - (nodeL2Count - 1)), offsetCountPerNode);
uint divisor = (uint)offsetCountPerNode;
return (int)(((uint)(entrySetCount - (offsetCountPerNode - (nodeL2Count - 1))) + divisor - 1) / divisor);
}
private static long GetBucketTreeEntryOffset(long entrySetOffset, long entrySize, int entryIndex)
@ -298,7 +289,7 @@ public partial class BucketTree
public ref NodeHeader GetHeader()
{
Assert.SdkRequiresGreaterEqual(_header.Length / sizeof(long), Unsafe.SizeOf<NodeHeader>());
Assert.SdkRequiresGreaterEqual(_header.Length * sizeof(long), Unsafe.SizeOf<NodeHeader>());
return ref Unsafe.As<long, NodeHeader>(ref _header[0]);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,314 @@
using System;
using System.Buffers.Binary;
using System.Runtime.CompilerServices;
using LibHac.Common;
using LibHac.Diag;
using LibHac.Fs;
using LibHac.Util;
namespace LibHac.FsSystem;
public partial class BucketTree2
{
public class Builder
{
private NodeBuffer _l1Node;
private NodeBuffer _l2Node;
private NodeBuffer _entrySet;
private ValueSubStorage _nodeStorage;
private ValueSubStorage _entryStorage;
private int _nodeSize;
private int _entrySize;
private int _entryCount;
private int _entriesPerEntrySet;
private int _offsetsPerNode;
private int _currentL2OffsetIndex;
private int _currentEntryIndex;
private long _currentOffset;
public Builder()
{
_currentOffset = -1;
}
/// <summary>
/// Initializes the bucket tree builder.
/// </summary>
/// <param name="allocator">The <see cref="MemoryResource"/> to use for buffer allocation.</param>
/// <param name="headerStorage">The <see cref="ValueSubStorage"/> the tree's header will be written to.Must be at least the size in bytes returned by <see cref="QueryHeaderStorageSize"/>.</param>
/// <param name="nodeStorage">The <see cref="ValueSubStorage"/> the tree's nodes will be written to. Must be at least the size in bytes returned by <see cref="QueryNodeStorageSize"/>.</param>
/// <param name="entryStorage">The <see cref="ValueSubStorage"/> the tree's entries will be written to. Must be at least the size in bytes returned by <see cref="QueryEntryStorageSize"/>.</param>
/// <param name="nodeSize">The size of each node in the bucket tree. Must be a power of 2.</param>
/// <param name="entrySize">The size of each entry that will be stored in the bucket tree.</param>
/// <param name="entryCount">The exact number of entries that will be added to the bucket tree.</param>
/// <returns>The <see cref="Result"/> of the operation.</returns>
public Result Initialize(MemoryResource allocator, in ValueSubStorage headerStorage,
in ValueSubStorage nodeStorage, in ValueSubStorage entryStorage, int nodeSize, int entrySize,
int entryCount)
{
Assert.NotNull(allocator);
Assert.SdkRequiresLessEqual(sizeof(long), entrySize);
Assert.SdkRequiresLessEqual(entrySize + Unsafe.SizeOf<NodeHeader>(), nodeSize);
Assert.SdkRequiresWithinMinMax(nodeSize, NodeSizeMin, NodeSizeMax);
Assert.SdkRequires(BitUtil.IsPowerOfTwo(nodeSize));
// Set the builder parameters
_nodeSize = nodeSize;
_entrySize = entrySize;
_entryCount = entryCount;
_entriesPerEntrySet = GetEntryCount(nodeSize, entrySize);
_offsetsPerNode = GetOffsetCount(nodeSize);
_currentL2OffsetIndex = GetNodeL2Count(nodeSize, entrySize, entryCount);
// Create and write the header
var header = new Header();
header.Format(entryCount);
Result rc = headerStorage.Write(0, SpanHelpers.AsByteSpan(ref header));
if (rc.IsFailure()) return rc;
// Allocate buffers for the L1 node and entry sets
_l1Node.Allocate(allocator, nodeSize);
_entrySet.Allocate(allocator, nodeSize);
int entrySetCount = GetEntrySetCount(nodeSize, entrySize, entryCount);
// Allocate an L2 node buffer if there are more entry sets than will fit in the L1 node
if (_offsetsPerNode < entrySetCount)
{
_l2Node.Allocate(allocator, nodeSize);
}
_l1Node.FillZero();
_l2Node.FillZero();
_entrySet.FillZero();
_nodeStorage.Set(in nodeStorage);
_entryStorage.Set(in entryStorage);
// Set the initial position
_currentEntryIndex = 0;
_currentOffset = -1;
return Result.Success;
}
/// <summary>
/// Adds a new entry to the bucket tree.
/// </summary>
/// <typeparam name="T">The type of the entry to add. Added entries should all be the same type.</typeparam>
/// <param name="entry">The entry to add.</param>
/// <returns>The <see cref="Result"/> of the operation.</returns>
public Result Add<T>(in T entry) where T : unmanaged
{
Assert.SdkRequiresEqual(Unsafe.SizeOf<T>(), _entrySize);
if (_currentEntryIndex >= _entryCount)
return ResultFs.OutOfRange.Log();
// The entry offset must always be the first 8 bytes of the struct
long entryOffset = BinaryPrimitives.ReadInt64LittleEndian(SpanHelpers.AsReadOnlyByteSpan(in entry));
if (entryOffset <= _currentOffset)
return ResultFs.InvalidOffset.Log();
Result rc = FinalizePreviousEntrySet(entryOffset);
if (rc.IsFailure()) return rc;
AddEntryOffset(entryOffset);
// Write the new entry
int indexInEntrySet = _currentEntryIndex % _entriesPerEntrySet;
_entrySet.GetNode<T>().GetWritableArray()[indexInEntrySet] = entry;
_currentOffset = entryOffset;
_currentEntryIndex++;
return Result.Success;
}
/// <summary>
/// Checks if a new entry set is being started. If so, sets the end offset of the previous
/// entry set and writes it to the output storage.
/// </summary>
/// <param name="endOffset">The end offset of the previous entry.</param>
/// <returns>The <see cref="Result"/> of the operation.</returns>
private Result FinalizePreviousEntrySet(long endOffset)
{
int prevEntrySetIndex = _currentEntryIndex / _entriesPerEntrySet - 1;
int indexInEntrySet = _currentEntryIndex % _entriesPerEntrySet;
// If the previous Add finished an entry set
if (_currentEntryIndex > 0 && indexInEntrySet == 0)
{
// Set the end offset of that entry set
ref NodeHeader entrySetHeader = ref _entrySet.GetHeader();
entrySetHeader.Index = prevEntrySetIndex;
entrySetHeader.EntryCount = _entriesPerEntrySet;
entrySetHeader.OffsetEnd = endOffset;
// Write the entry set to the entry storage
long storageOffset = (long)_nodeSize * prevEntrySetIndex;
Result rc = _entryStorage.Write(storageOffset, _entrySet.GetBuffer());
if (rc.IsFailure()) return rc;
// Clear the entry set buffer to begin the new entry set
_entrySet.FillZero();
// Check if we're writing in L2 nodes
if (_currentL2OffsetIndex > _offsetsPerNode)
{
int prevL2NodeIndex = _currentL2OffsetIndex / _offsetsPerNode - 2;
int indexInL2Node = _currentL2OffsetIndex % _offsetsPerNode;
// If the previous Add finished an L2 node
if (indexInL2Node == 0)
{
// Set the end offset of that node
ref NodeHeader l2NodeHeader = ref _l2Node.GetHeader();
l2NodeHeader.Index = prevL2NodeIndex;
l2NodeHeader.EntryCount = _offsetsPerNode;
l2NodeHeader.OffsetEnd = endOffset;
// Write the L2 node to the node storage
long nodeOffset = (long)_nodeSize * (prevL2NodeIndex + 1);
rc = _nodeStorage.Write(nodeOffset, _l2Node.GetBuffer());
if (rc.IsFailure()) return rc;
// Clear the L2 node buffer to begin the new node
_l2Node.FillZero();
}
}
}
return Result.Success;
}
/// <summary>
/// If needed, adds a new entry set's start offset to the L1 or L2 nodes.
/// </summary>
/// <param name="entryOffset">The start offset of the entry being added.</param>
private void AddEntryOffset(long entryOffset)
{
int entrySetIndex = _currentEntryIndex / _entriesPerEntrySet;
int indexInEntrySet = _currentEntryIndex % _entriesPerEntrySet;
// If we're starting a new entry set we need to add its start offset to the L1/L2 nodes
if (indexInEntrySet == 0)
{
Span<long> l1Data = _l1Node.GetNode<long>().GetWritableArray();
if (_currentL2OffsetIndex == 0)
{
// There are no L2 nodes. Write the entry set end offset directly to L1
l1Data[entrySetIndex] = entryOffset;
}
else
{
if (_currentL2OffsetIndex < _offsetsPerNode)
{
// The current L2 offset is stored in the L1 node
l1Data[_currentL2OffsetIndex] = entryOffset;
}
else
{
// Write the entry set offset to the current L2 node
int l2NodeIndex = _currentL2OffsetIndex / _offsetsPerNode;
int indexInL2Node = _currentL2OffsetIndex % _offsetsPerNode;
Span<long> l2Data = _l2Node.GetNode<long>().GetWritableArray();
l2Data[indexInL2Node] = entryOffset;
// If we're starting a new L2 node we need to add its start offset to the L1 node
if (indexInL2Node == 0)
{
l1Data[l2NodeIndex - 1] = entryOffset;
}
}
_currentL2OffsetIndex++;
}
}
}
/// <summary>
/// Finalizes the bucket tree. Must be called after all entries are added.
/// </summary>
/// <param name="endOffset">The end offset of the bucket tree.</param>
/// <returns>The <see cref="Result"/> of the operation.</returns>
public Result Finalize(long endOffset)
{
// Finalize must only be called after all entries are added
if (_entryCount != _currentEntryIndex)
return ResultFs.OutOfRange.Log();
if (endOffset <= _currentOffset)
return ResultFs.InvalidOffset.Log();
if (_currentOffset == -1)
return Result.Success;
Result rc = FinalizePreviousEntrySet(endOffset);
if (rc.IsFailure()) return rc;
int entrySetIndex = _currentEntryIndex / _entriesPerEntrySet;
int indexInEntrySet = _currentEntryIndex % _entriesPerEntrySet;
// Finalize the current entry set if needed
if (indexInEntrySet != 0)
{
ref NodeHeader entrySetHeader = ref _entrySet.GetHeader();
entrySetHeader.Index = entrySetIndex;
entrySetHeader.EntryCount = indexInEntrySet;
entrySetHeader.OffsetEnd = endOffset;
long entryStorageOffset = (long)_nodeSize * entrySetIndex;
rc = _entryStorage.Write(entryStorageOffset, _entrySet.GetBuffer());
if (rc.IsFailure()) return rc;
}
int l2NodeIndex = BitUtil.DivideUp(_currentL2OffsetIndex, _offsetsPerNode) - 2;
int indexInL2Node = _currentL2OffsetIndex % _offsetsPerNode;
// Finalize the current L2 node if needed
if (_currentL2OffsetIndex > _offsetsPerNode && (indexInEntrySet != 0 || indexInL2Node != 0))
{
ref NodeHeader l2NodeHeader = ref _l2Node.GetHeader();
l2NodeHeader.Index = l2NodeIndex;
l2NodeHeader.EntryCount = indexInL2Node != 0 ? indexInL2Node : _offsetsPerNode;
l2NodeHeader.OffsetEnd = endOffset;
long l2NodeStorageOffset = _nodeSize * (l2NodeIndex + 1);
rc = _nodeStorage.Write(l2NodeStorageOffset, _l2Node.GetBuffer());
if (rc.IsFailure()) return rc;
}
// Finalize the L1 node
ref NodeHeader l1NodeHeader = ref _l1Node.GetHeader();
l1NodeHeader.Index = 0;
l1NodeHeader.OffsetEnd = endOffset;
// L1 count depends on the existence or absence of L2 nodes
if (_currentL2OffsetIndex == 0)
{
l1NodeHeader.EntryCount = BitUtil.DivideUp(_currentEntryIndex, _entriesPerEntrySet);
}
else
{
l1NodeHeader.EntryCount = l2NodeIndex + 1;
}
rc = _nodeStorage.Write(0, _l1Node.GetBuffer());
if (rc.IsFailure()) return rc;
_currentOffset = long.MaxValue;
return Result.Success;
}
}
}

View File

@ -23,10 +23,10 @@ public class IndirectStorage : IStorage
public int StorageIndex;
public void SetVirtualOffset(long offset) => VirtualOffset = offset;
public long GetVirtualOffset() => VirtualOffset;
public readonly long GetVirtualOffset() => VirtualOffset;
public void SetPhysicalOffset(long offset) => PhysicalOffset = offset;
public long GetPhysicalOffset() => PhysicalOffset;
public readonly long GetPhysicalOffset() => PhysicalOffset;
}
public static long QueryHeaderStorageSize() => BucketTree.QueryHeaderStorageSize();

View File

@ -21,26 +21,26 @@ public struct PooledBuffer : IDisposable
private const int HeapAllocatableSizeMax = HeapBlockSize * (1 << HeapOrderMax);
private const int HeapAllocatableSizeMaxForLarge = HeapBlockSize * (1 << HeapOrderMaxForLarge);
private byte[] Array { get; set; }
private int Length { get; set; }
private byte[] _array;
private int _length;
public PooledBuffer(int idealSize, int requiredSize)
{
Array = null;
Length = default;
_array = null;
_length = default;
Allocate(idealSize, requiredSize);
}
public Span<byte> GetBuffer()
{
Assert.SdkRequiresNotNull(Array);
return Array.AsSpan(0, Length);
Assert.SdkRequiresNotNull(_array);
return _array.AsSpan(0, _length);
}
public int GetSize()
{
Assert.SdkRequiresNotNull(Array);
return Length;
Assert.SdkRequiresNotNull(_array);
return _length;
}
public static int GetAllocatableSizeMax() => GetAllocatableSizeMaxCore(false);
@ -56,7 +56,7 @@ public struct PooledBuffer : IDisposable
private void AllocateCore(int idealSize, int requiredSize, bool enableLargeCapacity)
{
Assert.SdkRequiresNull(Array);
Assert.SdkRequiresNull(_array);
// Check that we can allocate this size.
Assert.SdkRequiresLessEqual(requiredSize, GetAllocatableSizeMaxCore(enableLargeCapacity));
@ -66,21 +66,21 @@ public struct PooledBuffer : IDisposable
if (targetSize >= RentThresholdBytes)
{
Array = ArrayPool<byte>.Shared.Rent(targetSize);
_array = ArrayPool<byte>.Shared.Rent(targetSize);
}
else
{
Array = new byte[targetSize];
_array = new byte[targetSize];
}
Length = Array.Length;
_length = _array.Length;
}
public void Deallocate()
{
// Shrink the buffer to empty.
Shrink(0);
Assert.SdkNull(Array);
Assert.SdkNull(_array);
}
public void Shrink(int idealSize)
@ -88,23 +88,23 @@ public struct PooledBuffer : IDisposable
Assert.SdkRequiresLessEqual(idealSize, GetAllocatableSizeMaxCore(true));
// Check if we actually need to shrink.
if (Length > idealSize)
if (_length > idealSize)
{
Assert.SdkRequiresNotNull(Array);
Assert.SdkRequiresNotNull(_array);
// Pretend we shrank the buffer.
Length = idealSize;
_length = idealSize;
// Shrinking to zero means that we have no buffer.
if (Length == 0)
if (_length == 0)
{
// Return the array if we rented it.
if (Array?.Length >= RentThresholdBytes)
if (_array?.Length >= RentThresholdBytes)
{
ArrayPool<byte>.Shared.Return(Array);
ArrayPool<byte>.Shared.Return(_array);
}
Array = null;
_array = null;
}
}
}

View File

@ -0,0 +1,18 @@
using Xunit;
namespace LibHac.Tests.Common;
public static class TheoryDataCreator
{
public static TheoryData<int> CreateSequence(int start, int count)
{
var data = new TheoryData<int>();
for (int i = 0; i < count; i++)
{
data.Add(start + i);
}
return data;
}
}

View File

@ -0,0 +1,164 @@
using System.Runtime.CompilerServices;
using LibHac.Fs;
using LibHac.FsSystem;
using LibHac.Tests.Common;
using LibHac.Util;
using Xunit;
namespace LibHac.Tests.FsSystem;
public class BucketTreeBuilderTests
{
public class BucketTreeBuilderTestConfig
{
public string ExpectedHeaderDigest { get; init; }
public string ExpectedNodeDigest { get; init; }
public string ExpectedEntryDigest { get; init; }
public ulong RngSeed { get; init; }
public SizeRange EntrySizeRange { get; init; }
public int NodeSize { get; init; }
public int EntryCount { get; init; }
}
// Bucket tree builder parameters and output digests that have been verified manually
private static readonly BucketTreeBuilderTestConfig[] BucketTreeBuilderTestData =
{
// Tiny tree
new()
{
ExpectedHeaderDigest = "34C3355A9C67F91A978FD8CD51A1FB69FB4A6575FA93EEA03FF94E3FDA4FF918",
ExpectedNodeDigest = "38B1BAA521BBD24204A2846A184C276DAA46065964910A5FC132BED73187B9F2",
ExpectedEntryDigest = "7D61723D0A332128713120961E607188F50A8870360328594F4A5CC1731B10EE",
RngSeed = 0,
EntrySizeRange = new SizeRange(0x1000, 1, 10),
NodeSize = 0x4000,
EntryCount = 5
},
// Slightly larger tree
new()
{
ExpectedHeaderDigest = "B297BF6EE037B9179CA78618D73B1F51F4C980DF18CA79D00BD99EA3CB801491",
ExpectedNodeDigest = "DEB446E4EF36937ED253D912D48BCB74C9745E55647E3B900B3730379285580F",
ExpectedEntryDigest = "ED8CBA7E42A03D9399562A577E5FE3203DCA6CDAEA44F9EB9D6EFEC174638AE1",
RngSeed = 0,
EntrySizeRange = new SizeRange(0x1000, 1, 10),
NodeSize = 0x4000,
EntryCount = 10000
},
// Very large tree that contains a L2 node
new()
{
ExpectedHeaderDigest = "D36E9BC6C618637F3C615A861826DEE9CA8E0AB37C51D7124D0112E2B2D666C2",
ExpectedNodeDigest = "FBB238FFAF8A7585A1413CA9BF12E0C70BCF2B12DA3399F1077C6E3D364886B9",
ExpectedEntryDigest = "F3A452EC58B7C937E6AACC31680CAFAEEA63B0BA4D26F7A2EAEAF2FF11ABCF26",
RngSeed = 0,
EntrySizeRange = new SizeRange(0x1000, 1, 10),
NodeSize = 0x4000,
EntryCount = 2_000_000
},
// Tree with node size of 0x400 containing multiple L2 nodes
new()
{
ExpectedHeaderDigest = "B0520728AAD615F48BD45EAD1D8BC953AE0B912C5DB9429DD8DF2BC7B656FBEC",
ExpectedNodeDigest = "F785D455960298F7EABAD6E1997CE1FD298BFD802788E84E35FBA4E65FCE90E9",
ExpectedEntryDigest = "B467120D77D2ECBD039D9E171F8D604D3F3ED7C60C3551878EF21ED52B02690C",
RngSeed = 0,
EntrySizeRange = new SizeRange(0x1000, 1, 10),
NodeSize = 0x400,
EntryCount = 50_000
},
// Tree with node size of 0x400 containing the maximum number of entries possible with that node size
new()
{
ExpectedHeaderDigest = "33C6DBFDC95C8F5DC75DFE1BD027E9943FAA1B90DEB33039827860BCEC31CAA2",
ExpectedNodeDigest = "A732F462E8D545C7409FFB5DE6BDB460A3D466BDBD730173A453FD81C82AA38C",
ExpectedEntryDigest = "9EE6FBA4E0D336A7082EF46EC64FD8CEC2BAA5C8CF760C357B9193FE37A04CE3",
RngSeed = 0,
EntrySizeRange = new SizeRange(0x1000, 1, 10),
NodeSize = 0x400,
EntryCount = 793_800
}
};
public static TheoryData<int> BucketTreeBuilderTestTheoryData =
TheoryDataCreator.CreateSequence(0, BucketTreeBuilderTestData.Length);
[Theory, MemberData(nameof(BucketTreeBuilderTestTheoryData))]
public void BuildTree_TreeIsGeneratedCorrectly(int index)
{
BucketTreeBuilderTestConfig config = BucketTreeBuilderTestData[index];
BucketTreeTests.BucketTreeData data = BucketTreeCreator.Create(config.RngSeed, config.EntrySizeRange,
config.NodeSize, config.EntryCount);
byte[] headerDigest = new byte[0x20];
byte[] nodeDigest = new byte[0x20];
byte[] entryDigest = new byte[0x20];
Crypto.Sha256.GenerateSha256Hash(data.Header, headerDigest);
Crypto.Sha256.GenerateSha256Hash(data.Nodes, nodeDigest);
Crypto.Sha256.GenerateSha256Hash(data.Entries, entryDigest);
Assert.Equal(config.ExpectedHeaderDigest, headerDigest.ToHexString());
Assert.Equal(config.ExpectedNodeDigest, nodeDigest.ToHexString());
Assert.Equal(config.ExpectedEntryDigest, entryDigest.ToHexString());
}
[Fact]
public void Initialize_TooManyEntries_ReturnsException()
{
Assert.Throws<HorizonResultException>(() =>
BucketTreeCreator.Create(0, new SizeRange(0x1000, 1, 10), 0x400, 793_801));
}
[Fact]
public void Finalize_NotAllEntriesAdded_ReturnsOutOfRange()
{
const int nodeSize = 0x4000;
const int entryCount = 10;
byte[] headerBuffer = new byte[BucketTree2.QueryHeaderStorageSize()];
byte[] nodeBuffer = new byte[(int)BucketTree2.QueryNodeStorageSize(nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount)];
byte[] entryBuffer = new byte[(int)BucketTree2.QueryEntryStorageSize(nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount)];
using var headerStorage = new ValueSubStorage(new MemoryStorage(headerBuffer), 0, headerBuffer.Length);
using var nodeStorage = new ValueSubStorage(new MemoryStorage(nodeBuffer), 0, nodeBuffer.Length);
using var entryStorage = new ValueSubStorage(new MemoryStorage(entryBuffer), 0, entryBuffer.Length);
var builder = new BucketTree2.Builder();
Assert.Success(builder.Initialize(new ArrayPoolMemoryResource(), in headerStorage, in nodeStorage, in entryStorage, nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount));
var entry = new IndirectStorage.Entry();
Assert.Success(builder.Add(in entry));
Assert.Result(ResultFs.OutOfRange, builder.Finalize(0x1000));
}
[Fact]
public void Finalize_InvalidEndOffset_ReturnsInvalidOffset()
{
const int nodeSize = 0x4000;
const int entryCount = 2;
byte[] headerBuffer = new byte[BucketTree2.QueryHeaderStorageSize()];
byte[] nodeBuffer = new byte[(int)BucketTree2.QueryNodeStorageSize(nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount)];
byte[] entryBuffer = new byte[(int)BucketTree2.QueryEntryStorageSize(nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount)];
using var headerStorage = new ValueSubStorage(new MemoryStorage(headerBuffer), 0, headerBuffer.Length);
using var nodeStorage = new ValueSubStorage(new MemoryStorage(nodeBuffer), 0, nodeBuffer.Length);
using var entryStorage = new ValueSubStorage(new MemoryStorage(entryBuffer), 0, entryBuffer.Length);
var builder = new BucketTree2.Builder();
Assert.Success(builder.Initialize(new ArrayPoolMemoryResource(), in headerStorage, in nodeStorage, in entryStorage, nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount));
var entry = new IndirectStorage.Entry();
Assert.Success(builder.Add(in entry));
entry.SetVirtualOffset(0x10000);
Assert.Success(builder.Add(in entry));
Assert.Result(ResultFs.InvalidOffset, builder.Finalize(0x1000));
}
}

View File

@ -0,0 +1,139 @@
using System.Runtime.CompilerServices;
using LibHac.Fs;
using LibHac.FsSystem;
using Xunit;
namespace LibHac.Tests.FsSystem;
public record struct SizeRange(int BlockSize, int MinBlockCount, int MaxBlockCount);
internal static class BucketTreeCreator
{
public class EntryGenerator
{
private Random _random;
private readonly SizeRange _originalEntrySizeRange;
private readonly SizeRange _patchEntrySizeRange;
private IndirectStorage.Entry _currentEntry;
private long _originalStorageOffset;
private long _patchStorageOffset;
private long _patchedStorageOffset;
private bool _isOriginal;
public IndirectStorage.Entry CurrentEntry => _currentEntry;
public int CurrentEntryIndex { get; private set; }
public int CurrentEntrySize { get; private set; }
public long OriginalStorageSize => _originalStorageOffset;
public long PatchStorageSize => _patchStorageOffset;
public long PatchedStorageSize => _patchedStorageOffset;
public EntryGenerator(ulong rngSeed, SizeRange entrySizes)
{
_random = new Random(rngSeed);
_originalEntrySizeRange = entrySizes;
_patchEntrySizeRange = entrySizes;
_isOriginal = false;
CurrentEntryIndex = -1;
}
public EntryGenerator(ulong rngSeed, SizeRange originalEntrySizes, SizeRange patchEntrySizes)
{
_random = new Random(rngSeed);
_originalEntrySizeRange = originalEntrySizes;
_patchEntrySizeRange = patchEntrySizes;
_isOriginal = false;
CurrentEntryIndex = -1;
}
public void MoveNext()
{
_isOriginal = !_isOriginal;
SizeRange range = _isOriginal ? _originalEntrySizeRange : _patchEntrySizeRange;
int blockCount = _random.Next(range.MinBlockCount, range.MaxBlockCount);
int entrySize = blockCount * range.BlockSize;
CurrentEntryIndex++;
CurrentEntrySize = entrySize;
_currentEntry.SetVirtualOffset(_patchedStorageOffset);
_patchedStorageOffset += entrySize;
if (_isOriginal)
{
_currentEntry.SetPhysicalOffset(_originalStorageOffset);
_currentEntry.StorageIndex = 0;
_originalStorageOffset += entrySize;
}
else
{
_currentEntry.SetPhysicalOffset(_patchStorageOffset);
_currentEntry.StorageIndex = 1;
// Advance the original offset too to account for the data that's being replaced in the original storage
_originalStorageOffset += entrySize;
_patchStorageOffset += entrySize;
}
}
}
public static BucketTreeTests.BucketTreeData Create(ulong rngSeed, SizeRange entrySizes, int nodeSize, int entryCount)
{
byte[] headerBuffer = new byte[BucketTree2.QueryHeaderStorageSize()];
byte[] nodeBuffer = new byte[(int)BucketTree2.QueryNodeStorageSize(nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount)];
byte[] entryBuffer = new byte[(int)BucketTree2.QueryEntryStorageSize(nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount)];
using var headerStorage = new ValueSubStorage(new MemoryStorage(headerBuffer), 0, headerBuffer.Length);
using var nodeStorage = new ValueSubStorage(new MemoryStorage(nodeBuffer), 0, nodeBuffer.Length);
using var entryStorage = new ValueSubStorage(new MemoryStorage(entryBuffer), 0, entryBuffer.Length);
var generator = new EntryGenerator(rngSeed, entrySizes);
var builder = new BucketTree2.Builder();
Assert.Success(builder.Initialize(new ArrayPoolMemoryResource(), in headerStorage, in nodeStorage,
in entryStorage, nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount));
for (int i = 0; i < entryCount; i++)
{
generator.MoveNext();
IndirectStorage.Entry entry = generator.CurrentEntry;
Assert.Success(builder.Add(in entry));
}
Assert.Success(builder.Finalize(generator.PatchedStorageSize));
return new BucketTreeTests.BucketTreeData
{
NodeSize = nodeSize,
EntryCount = entryCount,
Header = headerBuffer,
Nodes = nodeBuffer,
Entries = entryBuffer
};
}
public static IndirectStorage.Entry[] GenerateEntries(ulong rngSeed, SizeRange entrySizeRange, int entryCount)
{
var entries = new IndirectStorage.Entry[entryCount];
var generator = new EntryGenerator(rngSeed, entrySizeRange);
for (int i = 0; i < entryCount; i++)
{
generator.MoveNext();
entries[i] = generator.CurrentEntry;
}
return entries;
}
}

View File

@ -0,0 +1,185 @@
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using LibHac.Fs;
using LibHac.FsSystem;
using LibHac.Tests.Common;
using Xunit;
namespace LibHac.Tests.FsSystem;
public class BucketTreeBuffers
{
public IndirectStorage.Entry[] Entries { get; }
public BucketTreeTests.BucketTreeData[] TreeData { get; }
public BucketTreeBuffers()
{
(int nodeSize, int entryCount)[] treeConfig = BucketTreeTests.BucketTreeTestParams;
TreeData = new BucketTreeTests.BucketTreeData[treeConfig.Length];
Entries = BucketTreeCreator.GenerateEntries(0, new SizeRange(0x1000, 1, 10), 2_000_001);
for (int i = 0; i < treeConfig.Length; i++)
{
(int nodeSize, int entryCount) = treeConfig[i];
TreeData[i] = BucketTreeCreator.Create(0, new SizeRange(0x1000, 1, 10), nodeSize, entryCount);
}
}
}
public class BucketTreeTests : IClassFixture<BucketTreeBuffers>
{
// Keep the generated data between tests so it only has to be generated once
private readonly IndirectStorage.Entry[] _entries;
private readonly BucketTreeData[] _treeData;
public BucketTreeTests(BucketTreeBuffers buffers)
{
_entries = buffers.Entries;
_treeData = buffers.TreeData;
}
public static readonly (int nodeSize, int entryCount)[] BucketTreeTestParams =
{
(0x4000, 5),
(0x4000, 10000),
(0x4000, 2_000_000),
(0x400, 50_000),
(0x400, 793_800)
};
public static TheoryData<int> BucketTreeTestTheoryData =
TheoryDataCreator.CreateSequence(0, BucketTreeTestParams.Length);
public class BucketTreeData
{
public int NodeSize;
public int EntryCount;
public byte[] Header;
public byte[] Nodes;
public byte[] Entries;
public BucketTree2 CreateBucketTree()
{
int entrySize = Unsafe.SizeOf<IndirectStorage.Entry>();
BucketTree.Header header = MemoryMarshal.Cast<byte, BucketTree.Header>(Header.AsSpan())[0];
using var nodeStorage = new ValueSubStorage(new MemoryStorage(Nodes), 0, Nodes.Length);
using var entryStorage = new ValueSubStorage(new MemoryStorage(Entries), 0, Entries.Length);
var tree = new BucketTree2();
Assert.Success(tree.Initialize(new ArrayPoolMemoryResource(), in nodeStorage, in entryStorage, NodeSize, entrySize, header.EntryCount));
return tree;
}
}
[Theory, MemberData(nameof(BucketTreeTestTheoryData))]
private void MoveNext_IterateAllFromStart_ReturnsCorrectEntries(int treeIndex)
{
ReadOnlySpan<IndirectStorage.Entry> entries = _entries.AsSpan(0, _treeData[treeIndex].EntryCount);
BucketTree2 tree = _treeData[treeIndex].CreateBucketTree();
using var visitor = new BucketTree2.Visitor();
Assert.Success(tree.Find(ref visitor.Ref, 0));
for (int i = 0; i < entries.Length; i++)
{
if (i != 0)
{
Result rc = visitor.MoveNext();
if (!rc.IsSuccess())
Assert.Success(rc);
}
// These tests run about 4x slower if we let Assert.Equal check the values every time
if (visitor.CanMovePrevious() != (i != 0))
Assert.Equal(i != 0, visitor.CanMovePrevious());
if (visitor.CanMoveNext() != (i != entries.Length - 1))
Assert.Equal(i != entries.Length - 1, visitor.CanMoveNext());
ref readonly IndirectStorage.Entry entry = ref visitor.Get<IndirectStorage.Entry>();
if (entries[i].GetVirtualOffset() != entry.GetVirtualOffset())
Assert.Equal(entries[i].GetVirtualOffset(), entry.GetVirtualOffset());
if (entries[i].GetPhysicalOffset() != entry.GetPhysicalOffset())
Assert.Equal(entries[i].GetPhysicalOffset(), entry.GetPhysicalOffset());
if (entries[i].StorageIndex != entry.StorageIndex)
Assert.Equal(entries[i].StorageIndex, entry.StorageIndex);
}
}
[Theory, MemberData(nameof(BucketTreeTestTheoryData))]
private void MovePrevious_IterateAllFromEnd_ReturnsCorrectEntries(int treeIndex)
{
ReadOnlySpan<IndirectStorage.Entry> entries = _entries.AsSpan(0, _treeData[treeIndex].EntryCount);
BucketTree2 tree = _treeData[treeIndex].CreateBucketTree();
using var visitor = new BucketTree2.Visitor();
Assert.Success(tree.Find(ref visitor.Ref, entries[^1].GetVirtualOffset()));
for (int i = entries.Length - 1; i >= 0; i--)
{
if (i != entries.Length - 1)
{
Result rc = visitor.MovePrevious();
if (!rc.IsSuccess())
Assert.Success(rc);
}
if (visitor.CanMovePrevious() != (i != 0))
Assert.Equal(i != 0, visitor.CanMovePrevious());
if (visitor.CanMoveNext() != (i != entries.Length - 1))
Assert.Equal(i != entries.Length - 1, visitor.CanMoveNext());
ref readonly IndirectStorage.Entry entry = ref visitor.Get<IndirectStorage.Entry>();
if (entries[i].GetVirtualOffset() != entry.GetVirtualOffset())
Assert.Equal(entries[i].GetVirtualOffset(), entry.GetVirtualOffset());
if (entries[i].GetPhysicalOffset() != entry.GetPhysicalOffset())
Assert.Equal(entries[i].GetPhysicalOffset(), entry.GetPhysicalOffset());
if (entries[i].StorageIndex != entry.StorageIndex)
Assert.Equal(entries[i].StorageIndex, entry.StorageIndex);
}
}
[Theory, MemberData(nameof(BucketTreeTestTheoryData))]
private void Find_RandomAccess_ReturnsCorrectEntries(int treeIndex)
{
const int findCount = 10000;
ReadOnlySpan<IndirectStorage.Entry> entries = _entries.AsSpan(0, _treeData[treeIndex].EntryCount);
BucketTree2 tree = _treeData[treeIndex].CreateBucketTree();
var random = new Random(123456);
for (int i = 0; i < findCount; i++)
{
int entryIndex = random.Next(0, entries.Length);
ref readonly IndirectStorage.Entry expectedEntry = ref entries[entryIndex];
// Add a random shift amount to test finding offsets in the middle of an entry
int offsetShift = random.Next(0, 1) * 0x500;
using var visitor = new BucketTree2.Visitor();
Assert.Success(tree.Find(ref visitor.Ref, expectedEntry.GetVirtualOffset() + offsetShift));
ref readonly IndirectStorage.Entry actualEntry = ref visitor.Get<IndirectStorage.Entry>();
Assert.Equal(entryIndex != 0, visitor.CanMovePrevious());
Assert.Equal(entryIndex != entries.Length - 1, visitor.CanMoveNext());
Assert.Equal(expectedEntry.GetVirtualOffset(), actualEntry.GetVirtualOffset());
Assert.Equal(expectedEntry.GetPhysicalOffset(), actualEntry.GetPhysicalOffset());
Assert.Equal(expectedEntry.StorageIndex, actualEntry.StorageIndex);
}
}
}

View File

@ -51,13 +51,12 @@ public class BufferedStorageTests
public int BufferManagerCacheCount { get; set; }
}
public static AccessTestConfig[] AccessTestConfigs =
{
new()
{
SizeClassProbs = new[] { 50, 50, 5 },
SizeClassMaxSizes = new[] {0x4000, 0x80000, 0x800000}, // 4 KB, 512 KB, 8 MB
SizeClassMaxSizes = new[] { 0x4000, 0x80000, 0x800000 }, // 16 KB, 512 KB, 8 MB
TaskProbs = new[] { 50, 50, 1 }, // Read, Write, Flush
AccessTypeProbs = new[] { 10, 10, 5 }, // Random, Sequential, Frequent block
RngSeed = 35467,
@ -73,7 +72,7 @@ public class BufferedStorageTests
new()
{
SizeClassProbs = new[] { 50, 50, 5 },
SizeClassMaxSizes = new[] {0x4000, 0x80000, 0x800000}, // 4 KB, 512 KB, 8 MB
SizeClassMaxSizes = new[] { 0x4000, 0x80000, 0x800000 }, // 16 KB, 512 KB, 8 MB
TaskProbs = new[] { 50, 50, 1 }, // Read, Write, Flush
AccessTypeProbs = new[] { 10, 10, 5 }, // Random, Sequential, Frequent block
RngSeed = 6548433,
@ -89,7 +88,7 @@ public class BufferedStorageTests
new()
{
SizeClassProbs = new[] { 50, 50, 0 },
SizeClassMaxSizes = new[] {0x4000, 0x80000, 0x800000}, // 4 KB, 512 KB, 8 MB
SizeClassMaxSizes = new[] { 0x4000, 0x80000, 0x800000 }, // 16 KB, 512 KB, 8 MB
TaskProbs = new[] { 50, 0, 0 },
AccessTypeProbs = new[] { 10, 10, 5 }, // Random, Sequential, Frequent block
RngSeed = 756478,
@ -105,7 +104,7 @@ public class BufferedStorageTests
new()
{
SizeClassProbs = new[] { 50, 50, 0 },
SizeClassMaxSizes = new[] {0x4000, 0x80000, 0x800000}, // 4 KB, 512 KB, 8 MB
SizeClassMaxSizes = new[] { 0x4000, 0x80000, 0x800000 }, // 16 KB, 512 KB, 8 MB
TaskProbs = new[] { 50, 0, 0 },
AccessTypeProbs = new[] { 0, 0, 5 }, // Random, Sequential, Frequent block
RngSeed = 38197549,
@ -121,7 +120,7 @@ public class BufferedStorageTests
new()
{
SizeClassProbs = new[] { 50, 50, 0 },
SizeClassMaxSizes = new[] {0x4000, 0x80000, 0x800000}, // 4 KB, 512 KB, 8 MB
SizeClassMaxSizes = new[] { 0x4000, 0x80000, 0x800000 }, // 16 KB, 512 KB, 8 MB
TaskProbs = new[] { 50, 50, 1 }, // Read, Write, Flush
AccessTypeProbs = new[] { 10, 10, 5 }, // Random, Sequential, Frequent block
RngSeed = 567365,
@ -137,7 +136,7 @@ public class BufferedStorageTests
new()
{
SizeClassProbs = new[] { 50, 50, 0 },
SizeClassMaxSizes = new[] {0x4000, 0x80000, 0x800000}, // 4 KB, 512 KB, 8 MB
SizeClassMaxSizes = new[] { 0x4000, 0x80000, 0x800000 }, // 16 KB, 512 KB, 8 MB
TaskProbs = new[] { 50, 50, 1 }, // Read, Write, Flush
AccessTypeProbs = new[] { 10, 10, 5 }, // Random, Sequential, Frequent block
RngSeed = 949365,
@ -153,7 +152,7 @@ public class BufferedStorageTests
new()
{
SizeClassProbs = new[] { 50, 50, 10 },
SizeClassMaxSizes = new[] {0x4000, 0x80000, 0x800000}, // 4 KB, 512 KB, 8 MB
SizeClassMaxSizes = new[] { 0x4000, 0x80000, 0x800000 }, // 16 KB, 512 KB, 8 MB
TaskProbs = new[] { 50, 50, 1 }, // Read, Write, Flush
AccessTypeProbs = new[] { 10, 10, 5 }, // Random, Sequential, Frequent block
RngSeed = 670670,