Update IndirectStorage and SparseStorage for 13.0

This commit is contained in:
Alex Barney 2021-12-02 16:15:44 -07:00
parent f180bfeef9
commit 40925034e1
13 changed files with 2205 additions and 2377 deletions

View File

@ -25,7 +25,14 @@ public class Aes128CtrExStorage : Aes128CtrStorage
int entryCount, byte[] key, byte[] counter, bool leaveOpen)
: base(baseStorage, key, counter, leaveOpen)
{
Result rc = Table.Initialize(nodeStorage, entryStorage, NodeSize, Unsafe.SizeOf<Entry>(), entryCount);
nodeStorage.GetSize(out long nodeStorageSize).ThrowIfFailure();
entryStorage.GetSize(out long entryStorageSize).ThrowIfFailure();
using var valueNodeStorage = new ValueSubStorage(nodeStorage, 0, nodeStorageSize);
using var valueEntryStorage = new ValueSubStorage(entryStorage, 0, entryStorageSize);
Result rc = Table.Initialize(new ArrayPoolMemoryResource(), in valueNodeStorage, in valueEntryStorage, NodeSize,
Unsafe.SizeOf<Entry>(), entryCount);
rc.ThrowIfFailure();
}
@ -34,53 +41,55 @@ public class Aes128CtrExStorage : Aes128CtrStorage
if (destination.Length == 0)
return Result.Success;
var visitor = new BucketTree.Visitor();
Result rc = Table.GetOffsets(out BucketTree.Offsets offsets);
if (rc.IsFailure()) return rc.Miss();
try
if (!offsets.IsInclude(offset, destination.Length))
return ResultFs.OutOfRange.Log();
using var visitor = new BucketTree.Visitor();
rc = Table.Find(ref visitor.Ref, offset);
if (rc.IsFailure()) return rc;
long inPos = offset;
int outPos = 0;
int remaining = destination.Length;
while (remaining > 0)
{
Result rc = Table.Find(ref visitor, offset);
if (rc.IsFailure()) return rc;
var currentEntry = visitor.Get<Entry>();
long inPos = offset;
int outPos = 0;
int remaining = destination.Length;
while (remaining > 0)
// Get and validate the next entry offset
long nextEntryOffset;
if (visitor.CanMoveNext())
{
var currentEntry = visitor.Get<Entry>();
rc = visitor.MoveNext();
if (rc.IsFailure()) return rc;
// Get and validate the next entry offset
long nextEntryOffset;
if (visitor.CanMoveNext())
{
rc = visitor.MoveNext();
if (rc.IsFailure()) return rc;
nextEntryOffset = visitor.Get<Entry>().Offset;
if (!Table.Includes(nextEntryOffset))
return ResultFs.InvalidIndirectEntryOffset.Log();
}
else
{
nextEntryOffset = Table.GetEnd();
}
int bytesToRead = (int)Math.Min(nextEntryOffset - inPos, remaining);
lock (_locker)
{
UpdateCounterSubsection((uint)currentEntry.Generation);
rc = base.DoRead(inPos, destination.Slice(outPos, bytesToRead));
if (rc.IsFailure()) return rc;
}
outPos += bytesToRead;
inPos += bytesToRead;
remaining -= bytesToRead;
nextEntryOffset = visitor.Get<Entry>().Offset;
if (!offsets.IsInclude(nextEntryOffset))
return ResultFs.InvalidIndirectEntryOffset.Log();
}
else
{
nextEntryOffset = offsets.EndOffset;
}
int bytesToRead = (int)Math.Min(nextEntryOffset - inPos, remaining);
lock (_locker)
{
UpdateCounterSubsection((uint)currentEntry.Generation);
rc = base.DoRead(inPos, destination.Slice(outPos, bytesToRead));
if (rc.IsFailure()) return rc;
}
outPos += bytesToRead;
inPos += bytesToRead;
remaining -= bytesToRead;
}
finally { visitor.Dispose(); }
return Result.Success;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -12,52 +12,57 @@ public partial class BucketTree
{
public class Builder
{
private SubStorage NodeStorage { get; set; }
private SubStorage EntryStorage { get; set; }
private NodeBuffer _l1Node;
private NodeBuffer _l2Node;
private NodeBuffer _entrySet;
private int NodeSize { get; set; }
private int EntrySize { get; set; }
private int EntryCount { get; set; }
private int EntriesPerEntrySet { get; set; }
private int OffsetsPerNode { get; set; }
private ValueSubStorage _nodeStorage;
private ValueSubStorage _entryStorage;
private int CurrentL2OffsetIndex { get; set; }
private int CurrentEntryIndex { get; set; }
private long CurrentOffset { get; set; } = -1;
private int _nodeSize;
private int _entrySize;
private int _entryCount;
private int _entriesPerEntrySet;
private int _offsetsPerNode;
private int _currentL2OffsetIndex;
private int _currentEntryIndex;
private long _currentOffset;
public Builder()
{
_currentOffset = -1;
}
/// <summary>
/// Initializes the bucket tree builder.
/// </summary>
/// <param name="headerStorage">The <see cref="SubStorage"/> the tree's header will be written to.Must be at least the size in bytes returned by <see cref="QueryHeaderStorageSize"/>.</param>
/// <param name="nodeStorage">The <see cref="SubStorage"/> the tree's nodes will be written to. Must be at least the size in bytes returned by <see cref="QueryNodeStorageSize"/>.</param>
/// <param name="entryStorage">The <see cref="SubStorage"/> the tree's entries will be written to. Must be at least the size in bytes returned by <see cref="QueryEntryStorageSize"/>.</param>
/// <param name="allocator">The <see cref="MemoryResource"/> to use for buffer allocation.</param>
/// <param name="headerStorage">The <see cref="ValueSubStorage"/> the tree's header will be written to.Must be at least the size in bytes returned by <see cref="QueryHeaderStorageSize"/>.</param>
/// <param name="nodeStorage">The <see cref="ValueSubStorage"/> the tree's nodes will be written to. Must be at least the size in bytes returned by <see cref="QueryNodeStorageSize"/>.</param>
/// <param name="entryStorage">The <see cref="ValueSubStorage"/> the tree's entries will be written to. Must be at least the size in bytes returned by <see cref="QueryEntryStorageSize"/>.</param>
/// <param name="nodeSize">The size of each node in the bucket tree. Must be a power of 2.</param>
/// <param name="entrySize">The size of each entry that will be stored in the bucket tree.</param>
/// <param name="entryCount">The exact number of entries that will be added to the bucket tree.</param>
/// <returns>The <see cref="Result"/> of the operation.</returns>
public Result Initialize(SubStorage headerStorage, SubStorage nodeStorage, SubStorage entryStorage,
int nodeSize, int entrySize, int entryCount)
public Result Initialize(MemoryResource allocator, in ValueSubStorage headerStorage,
in ValueSubStorage nodeStorage, in ValueSubStorage entryStorage, int nodeSize, int entrySize,
int entryCount)
{
Assert.NotNull(allocator);
Assert.SdkRequiresLessEqual(sizeof(long), entrySize);
Assert.SdkRequiresLessEqual(entrySize + Unsafe.SizeOf<NodeHeader>(), nodeSize);
Assert.SdkRequiresWithinMinMax(nodeSize, NodeSizeMin, NodeSizeMax);
Assert.SdkRequires(BitUtil.IsPowerOfTwo(nodeSize));
if (headerStorage is null || nodeStorage is null || entryStorage is null)
return ResultFs.NullptrArgument.Log();
// Set the builder parameters
NodeSize = nodeSize;
EntrySize = entrySize;
EntryCount = entryCount;
_nodeSize = nodeSize;
_entrySize = entrySize;
_entryCount = entryCount;
EntriesPerEntrySet = GetEntryCount(nodeSize, entrySize);
OffsetsPerNode = GetOffsetCount(nodeSize);
CurrentL2OffsetIndex = GetNodeL2Count(nodeSize, entrySize, entryCount);
_entriesPerEntrySet = GetEntryCount(nodeSize, entrySize);
_offsetsPerNode = GetOffsetCount(nodeSize);
_currentL2OffsetIndex = GetNodeL2Count(nodeSize, entrySize, entryCount);
// Create and write the header
var header = new Header();
@ -66,27 +71,27 @@ public partial class BucketTree
if (rc.IsFailure()) return rc;
// Allocate buffers for the L1 node and entry sets
_l1Node.Allocate(nodeSize);
_entrySet.Allocate(nodeSize);
_l1Node.Allocate(allocator, nodeSize);
_entrySet.Allocate(allocator, nodeSize);
int entrySetCount = GetEntrySetCount(nodeSize, entrySize, entryCount);
// Allocate an L2 node buffer if there are more entry sets than will fit in the L1 node
if (OffsetsPerNode < entrySetCount)
if (_offsetsPerNode < entrySetCount)
{
_l2Node.Allocate(nodeSize);
_l2Node.Allocate(allocator, nodeSize);
}
_l1Node.FillZero();
_l2Node.FillZero();
_entrySet.FillZero();
NodeStorage = nodeStorage;
EntryStorage = entryStorage;
_nodeStorage.Set(in nodeStorage);
_entryStorage.Set(in entryStorage);
// Set the initial position
CurrentEntryIndex = 0;
CurrentOffset = -1;
_currentEntryIndex = 0;
_currentOffset = -1;
return Result.Success;
}
@ -97,17 +102,17 @@ public partial class BucketTree
/// <typeparam name="T">The type of the entry to add. Added entries should all be the same type.</typeparam>
/// <param name="entry">The entry to add.</param>
/// <returns>The <see cref="Result"/> of the operation.</returns>
public Result Add<T>(ref T entry) where T : unmanaged
public Result Add<T>(in T entry) where T : unmanaged
{
Assert.SdkRequiresEqual(Unsafe.SizeOf<T>(), EntrySize);
Assert.SdkRequiresEqual(Unsafe.SizeOf<T>(), _entrySize);
if (CurrentEntryIndex >= EntryCount)
if (_currentEntryIndex >= _entryCount)
return ResultFs.OutOfRange.Log();
// The entry offset must always be the first 8 bytes of the struct
long entryOffset = BinaryPrimitives.ReadInt64LittleEndian(SpanHelpers.AsByteSpan(ref entry));
long entryOffset = BinaryPrimitives.ReadInt64LittleEndian(SpanHelpers.AsReadOnlyByteSpan(in entry));
if (entryOffset <= CurrentOffset)
if (entryOffset <= _currentOffset)
return ResultFs.InvalidOffset.Log();
Result rc = FinalizePreviousEntrySet(entryOffset);
@ -116,11 +121,11 @@ public partial class BucketTree
AddEntryOffset(entryOffset);
// Write the new entry
int indexInEntrySet = CurrentEntryIndex % EntriesPerEntrySet;
int indexInEntrySet = _currentEntryIndex % _entriesPerEntrySet;
_entrySet.GetNode<T>().GetWritableArray()[indexInEntrySet] = entry;
CurrentOffset = entryOffset;
CurrentEntryIndex++;
_currentOffset = entryOffset;
_currentEntryIndex++;
return Result.Success;
}
@ -133,32 +138,32 @@ public partial class BucketTree
/// <returns>The <see cref="Result"/> of the operation.</returns>
private Result FinalizePreviousEntrySet(long endOffset)
{
int prevEntrySetIndex = CurrentEntryIndex / EntriesPerEntrySet - 1;
int indexInEntrySet = CurrentEntryIndex % EntriesPerEntrySet;
int prevEntrySetIndex = _currentEntryIndex / _entriesPerEntrySet - 1;
int indexInEntrySet = _currentEntryIndex % _entriesPerEntrySet;
// If the previous Add finished an entry set
if (CurrentEntryIndex > 0 && indexInEntrySet == 0)
if (_currentEntryIndex > 0 && indexInEntrySet == 0)
{
// Set the end offset of that entry set
ref NodeHeader entrySetHeader = ref _entrySet.GetHeader();
entrySetHeader.Index = prevEntrySetIndex;
entrySetHeader.Count = EntriesPerEntrySet;
entrySetHeader.Offset = endOffset;
entrySetHeader.EntryCount = _entriesPerEntrySet;
entrySetHeader.OffsetEnd = endOffset;
// Write the entry set to the entry storage
long storageOffset = (long)NodeSize * prevEntrySetIndex;
Result rc = EntryStorage.Write(storageOffset, _entrySet.GetBuffer());
long storageOffset = (long)_nodeSize * prevEntrySetIndex;
Result rc = _entryStorage.Write(storageOffset, _entrySet.GetBuffer());
if (rc.IsFailure()) return rc;
// Clear the entry set buffer to begin the new entry set
_entrySet.FillZero();
// Check if we're writing in L2 nodes
if (CurrentL2OffsetIndex > OffsetsPerNode)
if (_currentL2OffsetIndex > _offsetsPerNode)
{
int prevL2NodeIndex = CurrentL2OffsetIndex / OffsetsPerNode - 2;
int indexInL2Node = CurrentL2OffsetIndex % OffsetsPerNode;
int prevL2NodeIndex = _currentL2OffsetIndex / _offsetsPerNode - 2;
int indexInL2Node = _currentL2OffsetIndex % _offsetsPerNode;
// If the previous Add finished an L2 node
if (indexInL2Node == 0)
@ -167,12 +172,12 @@ public partial class BucketTree
ref NodeHeader l2NodeHeader = ref _l2Node.GetHeader();
l2NodeHeader.Index = prevL2NodeIndex;
l2NodeHeader.Count = OffsetsPerNode;
l2NodeHeader.Offset = endOffset;
l2NodeHeader.EntryCount = _offsetsPerNode;
l2NodeHeader.OffsetEnd = endOffset;
// Write the L2 node to the node storage
long nodeOffset = (long)NodeSize * (prevL2NodeIndex + 1);
rc = NodeStorage.Write(nodeOffset, _l2Node.GetBuffer());
long nodeOffset = (long)_nodeSize * (prevL2NodeIndex + 1);
rc = _nodeStorage.Write(nodeOffset, _l2Node.GetBuffer());
if (rc.IsFailure()) return rc;
// Clear the L2 node buffer to begin the new node
@ -190,31 +195,31 @@ public partial class BucketTree
/// <param name="entryOffset">The start offset of the entry being added.</param>
private void AddEntryOffset(long entryOffset)
{
int entrySetIndex = CurrentEntryIndex / EntriesPerEntrySet;
int indexInEntrySet = CurrentEntryIndex % EntriesPerEntrySet;
int entrySetIndex = _currentEntryIndex / _entriesPerEntrySet;
int indexInEntrySet = _currentEntryIndex % _entriesPerEntrySet;
// If we're starting a new entry set we need to add its start offset to the L1/L2 nodes
if (indexInEntrySet == 0)
{
Span<long> l1Data = _l1Node.GetNode<long>().GetWritableArray();
if (CurrentL2OffsetIndex == 0)
if (_currentL2OffsetIndex == 0)
{
// There are no L2 nodes. Write the entry set end offset directly to L1
l1Data[entrySetIndex] = entryOffset;
}
else
{
if (CurrentL2OffsetIndex < OffsetsPerNode)
if (_currentL2OffsetIndex < _offsetsPerNode)
{
// The current L2 offset is stored in the L1 node
l1Data[CurrentL2OffsetIndex] = entryOffset;
l1Data[_currentL2OffsetIndex] = entryOffset;
}
else
{
// Write the entry set offset to the current L2 node
int l2NodeIndex = CurrentL2OffsetIndex / OffsetsPerNode;
int indexInL2Node = CurrentL2OffsetIndex % OffsetsPerNode;
int l2NodeIndex = _currentL2OffsetIndex / _offsetsPerNode;
int indexInL2Node = _currentL2OffsetIndex % _offsetsPerNode;
Span<long> l2Data = _l2Node.GetNode<long>().GetWritableArray();
l2Data[indexInL2Node] = entryOffset;
@ -226,7 +231,7 @@ public partial class BucketTree
}
}
CurrentL2OffsetIndex++;
_currentL2OffsetIndex++;
}
}
}
@ -239,20 +244,20 @@ public partial class BucketTree
public Result Finalize(long endOffset)
{
// Finalize must only be called after all entries are added
if (EntryCount != CurrentEntryIndex)
if (_entryCount != _currentEntryIndex)
return ResultFs.OutOfRange.Log();
if (endOffset <= CurrentOffset)
if (endOffset <= _currentOffset)
return ResultFs.InvalidOffset.Log();
if (CurrentOffset == -1)
if (_currentOffset == -1)
return Result.Success;
Result rc = FinalizePreviousEntrySet(endOffset);
if (rc.IsFailure()) return rc;
int entrySetIndex = CurrentEntryIndex / EntriesPerEntrySet;
int indexInEntrySet = CurrentEntryIndex % EntriesPerEntrySet;
int entrySetIndex = _currentEntryIndex / _entriesPerEntrySet;
int indexInEntrySet = _currentEntryIndex % _entriesPerEntrySet;
// Finalize the current entry set if needed
if (indexInEntrySet != 0)
@ -260,49 +265,49 @@ public partial class BucketTree
ref NodeHeader entrySetHeader = ref _entrySet.GetHeader();
entrySetHeader.Index = entrySetIndex;
entrySetHeader.Count = indexInEntrySet;
entrySetHeader.Offset = endOffset;
entrySetHeader.EntryCount = indexInEntrySet;
entrySetHeader.OffsetEnd = endOffset;
long entryStorageOffset = (long)NodeSize * entrySetIndex;
rc = EntryStorage.Write(entryStorageOffset, _entrySet.GetBuffer());
long entryStorageOffset = (long)_nodeSize * entrySetIndex;
rc = _entryStorage.Write(entryStorageOffset, _entrySet.GetBuffer());
if (rc.IsFailure()) return rc;
}
int l2NodeIndex = BitUtil.DivideUp(CurrentL2OffsetIndex, OffsetsPerNode) - 2;
int indexInL2Node = CurrentL2OffsetIndex % OffsetsPerNode;
int l2NodeIndex = BitUtil.DivideUp(_currentL2OffsetIndex, _offsetsPerNode) - 2;
int indexInL2Node = _currentL2OffsetIndex % _offsetsPerNode;
// Finalize the current L2 node if needed
if (CurrentL2OffsetIndex > OffsetsPerNode && (indexInEntrySet != 0 || indexInL2Node != 0))
if (_currentL2OffsetIndex > _offsetsPerNode && (indexInEntrySet != 0 || indexInL2Node != 0))
{
ref NodeHeader l2NodeHeader = ref _l2Node.GetHeader();
l2NodeHeader.Index = l2NodeIndex;
l2NodeHeader.Count = indexInL2Node != 0 ? indexInL2Node : OffsetsPerNode;
l2NodeHeader.Offset = endOffset;
l2NodeHeader.EntryCount = indexInL2Node != 0 ? indexInL2Node : _offsetsPerNode;
l2NodeHeader.OffsetEnd = endOffset;
long l2NodeStorageOffset = NodeSize * (l2NodeIndex + 1);
rc = NodeStorage.Write(l2NodeStorageOffset, _l2Node.GetBuffer());
long l2NodeStorageOffset = _nodeSize * (l2NodeIndex + 1);
rc = _nodeStorage.Write(l2NodeStorageOffset, _l2Node.GetBuffer());
if (rc.IsFailure()) return rc;
}
// Finalize the L1 node
ref NodeHeader l1NodeHeader = ref _l1Node.GetHeader();
l1NodeHeader.Index = 0;
l1NodeHeader.Offset = endOffset;
l1NodeHeader.OffsetEnd = endOffset;
// L1 count depends on the existence or absence of L2 nodes
if (CurrentL2OffsetIndex == 0)
if (_currentL2OffsetIndex == 0)
{
l1NodeHeader.Count = BitUtil.DivideUp(CurrentEntryIndex, EntriesPerEntrySet);
l1NodeHeader.EntryCount = BitUtil.DivideUp(_currentEntryIndex, _entriesPerEntrySet);
}
else
{
l1NodeHeader.Count = l2NodeIndex + 1;
l1NodeHeader.EntryCount = l2NodeIndex + 1;
}
rc = NodeStorage.Write(0, _l1Node.GetBuffer());
rc = _nodeStorage.Write(0, _l1Node.GetBuffer());
if (rc.IsFailure()) return rc;
CurrentOffset = long.MaxValue;
_currentOffset = long.MaxValue;
return Result.Success;
}
}

View File

@ -1,314 +0,0 @@
using System;
using System.Buffers.Binary;
using System.Runtime.CompilerServices;
using LibHac.Common;
using LibHac.Diag;
using LibHac.Fs;
using LibHac.Util;
namespace LibHac.FsSystem;
public partial class BucketTree2
{
public class Builder
{
private NodeBuffer _l1Node;
private NodeBuffer _l2Node;
private NodeBuffer _entrySet;
private ValueSubStorage _nodeStorage;
private ValueSubStorage _entryStorage;
private int _nodeSize;
private int _entrySize;
private int _entryCount;
private int _entriesPerEntrySet;
private int _offsetsPerNode;
private int _currentL2OffsetIndex;
private int _currentEntryIndex;
private long _currentOffset;
public Builder()
{
_currentOffset = -1;
}
/// <summary>
/// Initializes the bucket tree builder.
/// </summary>
/// <param name="allocator">The <see cref="MemoryResource"/> to use for buffer allocation.</param>
/// <param name="headerStorage">The <see cref="ValueSubStorage"/> the tree's header will be written to.Must be at least the size in bytes returned by <see cref="QueryHeaderStorageSize"/>.</param>
/// <param name="nodeStorage">The <see cref="ValueSubStorage"/> the tree's nodes will be written to. Must be at least the size in bytes returned by <see cref="QueryNodeStorageSize"/>.</param>
/// <param name="entryStorage">The <see cref="ValueSubStorage"/> the tree's entries will be written to. Must be at least the size in bytes returned by <see cref="QueryEntryStorageSize"/>.</param>
/// <param name="nodeSize">The size of each node in the bucket tree. Must be a power of 2.</param>
/// <param name="entrySize">The size of each entry that will be stored in the bucket tree.</param>
/// <param name="entryCount">The exact number of entries that will be added to the bucket tree.</param>
/// <returns>The <see cref="Result"/> of the operation.</returns>
public Result Initialize(MemoryResource allocator, in ValueSubStorage headerStorage,
in ValueSubStorage nodeStorage, in ValueSubStorage entryStorage, int nodeSize, int entrySize,
int entryCount)
{
Assert.NotNull(allocator);
Assert.SdkRequiresLessEqual(sizeof(long), entrySize);
Assert.SdkRequiresLessEqual(entrySize + Unsafe.SizeOf<NodeHeader>(), nodeSize);
Assert.SdkRequiresWithinMinMax(nodeSize, NodeSizeMin, NodeSizeMax);
Assert.SdkRequires(BitUtil.IsPowerOfTwo(nodeSize));
// Set the builder parameters
_nodeSize = nodeSize;
_entrySize = entrySize;
_entryCount = entryCount;
_entriesPerEntrySet = GetEntryCount(nodeSize, entrySize);
_offsetsPerNode = GetOffsetCount(nodeSize);
_currentL2OffsetIndex = GetNodeL2Count(nodeSize, entrySize, entryCount);
// Create and write the header
var header = new Header();
header.Format(entryCount);
Result rc = headerStorage.Write(0, SpanHelpers.AsByteSpan(ref header));
if (rc.IsFailure()) return rc;
// Allocate buffers for the L1 node and entry sets
_l1Node.Allocate(allocator, nodeSize);
_entrySet.Allocate(allocator, nodeSize);
int entrySetCount = GetEntrySetCount(nodeSize, entrySize, entryCount);
// Allocate an L2 node buffer if there are more entry sets than will fit in the L1 node
if (_offsetsPerNode < entrySetCount)
{
_l2Node.Allocate(allocator, nodeSize);
}
_l1Node.FillZero();
_l2Node.FillZero();
_entrySet.FillZero();
_nodeStorage.Set(in nodeStorage);
_entryStorage.Set(in entryStorage);
// Set the initial position
_currentEntryIndex = 0;
_currentOffset = -1;
return Result.Success;
}
/// <summary>
/// Adds a new entry to the bucket tree.
/// </summary>
/// <typeparam name="T">The type of the entry to add. Added entries should all be the same type.</typeparam>
/// <param name="entry">The entry to add.</param>
/// <returns>The <see cref="Result"/> of the operation.</returns>
public Result Add<T>(in T entry) where T : unmanaged
{
Assert.SdkRequiresEqual(Unsafe.SizeOf<T>(), _entrySize);
if (_currentEntryIndex >= _entryCount)
return ResultFs.OutOfRange.Log();
// The entry offset must always be the first 8 bytes of the struct
long entryOffset = BinaryPrimitives.ReadInt64LittleEndian(SpanHelpers.AsReadOnlyByteSpan(in entry));
if (entryOffset <= _currentOffset)
return ResultFs.InvalidOffset.Log();
Result rc = FinalizePreviousEntrySet(entryOffset);
if (rc.IsFailure()) return rc;
AddEntryOffset(entryOffset);
// Write the new entry
int indexInEntrySet = _currentEntryIndex % _entriesPerEntrySet;
_entrySet.GetNode<T>().GetWritableArray()[indexInEntrySet] = entry;
_currentOffset = entryOffset;
_currentEntryIndex++;
return Result.Success;
}
/// <summary>
/// Checks if a new entry set is being started. If so, sets the end offset of the previous
/// entry set and writes it to the output storage.
/// </summary>
/// <param name="endOffset">The end offset of the previous entry.</param>
/// <returns>The <see cref="Result"/> of the operation.</returns>
private Result FinalizePreviousEntrySet(long endOffset)
{
int prevEntrySetIndex = _currentEntryIndex / _entriesPerEntrySet - 1;
int indexInEntrySet = _currentEntryIndex % _entriesPerEntrySet;
// If the previous Add finished an entry set
if (_currentEntryIndex > 0 && indexInEntrySet == 0)
{
// Set the end offset of that entry set
ref NodeHeader entrySetHeader = ref _entrySet.GetHeader();
entrySetHeader.Index = prevEntrySetIndex;
entrySetHeader.EntryCount = _entriesPerEntrySet;
entrySetHeader.OffsetEnd = endOffset;
// Write the entry set to the entry storage
long storageOffset = (long)_nodeSize * prevEntrySetIndex;
Result rc = _entryStorage.Write(storageOffset, _entrySet.GetBuffer());
if (rc.IsFailure()) return rc;
// Clear the entry set buffer to begin the new entry set
_entrySet.FillZero();
// Check if we're writing in L2 nodes
if (_currentL2OffsetIndex > _offsetsPerNode)
{
int prevL2NodeIndex = _currentL2OffsetIndex / _offsetsPerNode - 2;
int indexInL2Node = _currentL2OffsetIndex % _offsetsPerNode;
// If the previous Add finished an L2 node
if (indexInL2Node == 0)
{
// Set the end offset of that node
ref NodeHeader l2NodeHeader = ref _l2Node.GetHeader();
l2NodeHeader.Index = prevL2NodeIndex;
l2NodeHeader.EntryCount = _offsetsPerNode;
l2NodeHeader.OffsetEnd = endOffset;
// Write the L2 node to the node storage
long nodeOffset = (long)_nodeSize * (prevL2NodeIndex + 1);
rc = _nodeStorage.Write(nodeOffset, _l2Node.GetBuffer());
if (rc.IsFailure()) return rc;
// Clear the L2 node buffer to begin the new node
_l2Node.FillZero();
}
}
}
return Result.Success;
}
/// <summary>
/// If needed, adds a new entry set's start offset to the L1 or L2 nodes.
/// </summary>
/// <param name="entryOffset">The start offset of the entry being added.</param>
private void AddEntryOffset(long entryOffset)
{
int entrySetIndex = _currentEntryIndex / _entriesPerEntrySet;
int indexInEntrySet = _currentEntryIndex % _entriesPerEntrySet;
// If we're starting a new entry set we need to add its start offset to the L1/L2 nodes
if (indexInEntrySet == 0)
{
Span<long> l1Data = _l1Node.GetNode<long>().GetWritableArray();
if (_currentL2OffsetIndex == 0)
{
// There are no L2 nodes. Write the entry set end offset directly to L1
l1Data[entrySetIndex] = entryOffset;
}
else
{
if (_currentL2OffsetIndex < _offsetsPerNode)
{
// The current L2 offset is stored in the L1 node
l1Data[_currentL2OffsetIndex] = entryOffset;
}
else
{
// Write the entry set offset to the current L2 node
int l2NodeIndex = _currentL2OffsetIndex / _offsetsPerNode;
int indexInL2Node = _currentL2OffsetIndex % _offsetsPerNode;
Span<long> l2Data = _l2Node.GetNode<long>().GetWritableArray();
l2Data[indexInL2Node] = entryOffset;
// If we're starting a new L2 node we need to add its start offset to the L1 node
if (indexInL2Node == 0)
{
l1Data[l2NodeIndex - 1] = entryOffset;
}
}
_currentL2OffsetIndex++;
}
}
}
/// <summary>
/// Finalizes the bucket tree. Must be called after all entries are added.
/// </summary>
/// <param name="endOffset">The end offset of the bucket tree.</param>
/// <returns>The <see cref="Result"/> of the operation.</returns>
public Result Finalize(long endOffset)
{
// Finalize must only be called after all entries are added
if (_entryCount != _currentEntryIndex)
return ResultFs.OutOfRange.Log();
if (endOffset <= _currentOffset)
return ResultFs.InvalidOffset.Log();
if (_currentOffset == -1)
return Result.Success;
Result rc = FinalizePreviousEntrySet(endOffset);
if (rc.IsFailure()) return rc;
int entrySetIndex = _currentEntryIndex / _entriesPerEntrySet;
int indexInEntrySet = _currentEntryIndex % _entriesPerEntrySet;
// Finalize the current entry set if needed
if (indexInEntrySet != 0)
{
ref NodeHeader entrySetHeader = ref _entrySet.GetHeader();
entrySetHeader.Index = entrySetIndex;
entrySetHeader.EntryCount = indexInEntrySet;
entrySetHeader.OffsetEnd = endOffset;
long entryStorageOffset = (long)_nodeSize * entrySetIndex;
rc = _entryStorage.Write(entryStorageOffset, _entrySet.GetBuffer());
if (rc.IsFailure()) return rc;
}
int l2NodeIndex = BitUtil.DivideUp(_currentL2OffsetIndex, _offsetsPerNode) - 2;
int indexInL2Node = _currentL2OffsetIndex % _offsetsPerNode;
// Finalize the current L2 node if needed
if (_currentL2OffsetIndex > _offsetsPerNode && (indexInEntrySet != 0 || indexInL2Node != 0))
{
ref NodeHeader l2NodeHeader = ref _l2Node.GetHeader();
l2NodeHeader.Index = l2NodeIndex;
l2NodeHeader.EntryCount = indexInL2Node != 0 ? indexInL2Node : _offsetsPerNode;
l2NodeHeader.OffsetEnd = endOffset;
long l2NodeStorageOffset = _nodeSize * (l2NodeIndex + 1);
rc = _nodeStorage.Write(l2NodeStorageOffset, _l2Node.GetBuffer());
if (rc.IsFailure()) return rc;
}
// Finalize the L1 node
ref NodeHeader l1NodeHeader = ref _l1Node.GetHeader();
l1NodeHeader.Index = 0;
l1NodeHeader.OffsetEnd = endOffset;
// L1 count depends on the existence or absence of L2 nodes
if (_currentL2OffsetIndex == 0)
{
l1NodeHeader.EntryCount = BitUtil.DivideUp(_currentEntryIndex, _entriesPerEntrySet);
}
else
{
l1NodeHeader.EntryCount = l2NodeIndex + 1;
}
rc = _nodeStorage.Write(0, _l1Node.GetBuffer());
if (rc.IsFailure()) return rc;
_currentOffset = long.MaxValue;
return Result.Success;
}
}
}

View File

@ -2,20 +2,27 @@
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using LibHac.Common;
using LibHac.Common.FixedArrays;
using LibHac.Diag;
using LibHac.Fs;
namespace LibHac.FsSystem;
/// <summary>
/// Combines multiple <see cref="IStorage"/>s into a single <see cref="IStorage"/>.
/// </summary>
/// <remarks><para>The <see cref="IndirectStorage"/>'s <see cref="BucketTree"/> contains <see cref="Entry"/>
/// values that describe how the created storage is to be built from the base storages.</para>
/// <para>Based on FS 13.1.0 (nnSdk 13.4.0)</para></remarks>
public class IndirectStorage : IStorage
{
public static readonly int StorageCount = 2;
public static readonly int NodeSize = 1024 * 16;
private BucketTree Table { get; } = new BucketTree();
private SubStorage[] DataStorage { get; } = new SubStorage[StorageCount];
private BucketTree _table;
private Array2<ValueSubStorage> _dataStorage;
[StructLayout(LayoutKind.Sequential, Size = 0x14, Pack = 4)]
[StructLayout(LayoutKind.Sequential, Pack = 4)]
public struct Entry
{
private long VirtualOffset;
@ -29,6 +36,52 @@ public class IndirectStorage : IStorage
public readonly long GetPhysicalOffset() => PhysicalOffset;
}
public struct EntryData
{
public long VirtualOffset;
public long PhysicalOffset;
public int StorageIndex;
public void Set(in Entry entry)
{
VirtualOffset = entry.GetVirtualOffset();
PhysicalOffset = entry.GetPhysicalOffset();
StorageIndex = entry.StorageIndex;
}
}
private struct ContinuousReadingEntry : BucketTree.IContinuousReadingEntry
{
public int FragmentSizeMax => 1024 * 4;
#pragma warning disable CS0649
// This field will be read in by BucketTree.Visitor.ScanContinuousReading
private Entry _entry;
#pragma warning restore CS0649
public readonly long GetVirtualOffset() => _entry.GetVirtualOffset();
public readonly long GetPhysicalOffset() => _entry.GetPhysicalOffset();
public readonly bool IsFragment() => _entry.StorageIndex != 0;
}
public IndirectStorage()
{
_table = new BucketTree();
}
public override void Dispose()
{
FinalizeObject();
Span<ValueSubStorage> items = _dataStorage.Items;
for (int i = 0; i < items.Length; i++)
items[i].Dispose();
_table.Dispose();
base.Dispose();
}
public static long QueryHeaderStorageSize() => BucketTree.QueryHeaderStorageSize();
public static long QueryNodeStorageSize(int entryCount) =>
@ -37,120 +90,86 @@ public class IndirectStorage : IStorage
public static long QueryEntryStorageSize(int entryCount) =>
BucketTree.QueryEntryStorageSize(NodeSize, Unsafe.SizeOf<Entry>(), entryCount);
public bool IsInitialized() => Table.IsInitialized();
public Result Initialize(SubStorage tableStorage)
{
// Read and verify the bucket tree header.
// note: skip init
var header = new BucketTree.Header();
Result rc = tableStorage.Read(0, SpanHelpers.AsByteSpan(ref header));
if (rc.IsFailure()) return rc;
rc = header.Verify();
if (rc.IsFailure()) return rc;
// Determine extents.
long nodeStorageSize = QueryNodeStorageSize(header.EntryCount);
long entryStorageSize = QueryEntryStorageSize(header.EntryCount);
long nodeStorageOffset = QueryHeaderStorageSize();
long entryStorageOffset = nodeStorageOffset + nodeStorageSize;
// Initialize.
var nodeStorage = new SubStorage(tableStorage, nodeStorageOffset, nodeStorageSize);
var entryStorage = new SubStorage(tableStorage, entryStorageOffset, entryStorageSize);
return Initialize(nodeStorage, entryStorage, header.EntryCount);
}
public Result Initialize(SubStorage nodeStorage, SubStorage entryStorage, int entryCount)
{
return Table.Initialize(nodeStorage, entryStorage, NodeSize, Unsafe.SizeOf<Entry>(), entryCount);
}
public void SetStorage(int index, SubStorage storage)
public void SetStorage(int index, in ValueSubStorage storage)
{
Assert.SdkRequiresInRange(index, 0, StorageCount);
DataStorage[index] = storage;
_dataStorage[index].Set(in storage);
}
public void SetStorage(int index, IStorage storage, long offset, long size)
{
Assert.SdkRequiresInRange(index, 0, StorageCount);
DataStorage[index] = new SubStorage(storage, offset, size);
using var subStorage = new ValueSubStorage(storage, offset, size);
_dataStorage[index].Set(in subStorage);
}
public Result GetEntryList(Span<Entry> entryBuffer, out int outputEntryCount, long offset, long size)
protected ref ValueSubStorage GetDataStorage(int index)
{
// Validate pre-conditions
Assert.SdkRequiresLessEqual(0, offset);
Assert.SdkRequiresLessEqual(0, size);
Assert.SdkRequires(IsInitialized());
// Clear the out count
outputEntryCount = 0;
// Succeed if there's no range
if (size == 0)
return Result.Success;
// Check that our range is valid
if (!Table.Includes(offset, size))
return ResultFs.OutOfRange.Log();
// Find the offset in our tree
var visitor = new BucketTree.Visitor();
try
{
Result rc = Table.Find(ref visitor, offset);
if (rc.IsFailure()) return rc;
long entryOffset = visitor.Get<Entry>().GetVirtualOffset();
if (entryOffset > 0 || !Table.Includes(entryOffset))
return ResultFs.InvalidIndirectEntryOffset.Log();
// Prepare to loop over entries
long endOffset = offset + size;
int count = 0;
ref Entry currentEntry = ref visitor.Get<Entry>();
while (currentEntry.GetVirtualOffset() < endOffset)
{
// Try to write the entry to the out list
if (entryBuffer.Length != 0)
{
if (count >= entryBuffer.Length)
break;
entryBuffer[count] = currentEntry;
}
count++;
// Advance
if (visitor.CanMoveNext())
{
rc = visitor.MoveNext();
if (rc.IsFailure()) return rc;
currentEntry = ref visitor.Get<Entry>();
}
else
{
break;
}
}
// Write the entry count
outputEntryCount = count;
return Result.Success;
}
finally { visitor.Dispose(); }
Assert.SdkRequiresInRange(index, 0, StorageCount);
return ref _dataStorage[index];
}
protected override unsafe Result DoRead(long offset, Span<byte> destination)
protected BucketTree GetEntryTable()
{
return _table;
}
public bool IsInitialized()
{
return _table.IsInitialized();
}
public Result Initialize(MemoryResource allocator, in ValueSubStorage tableStorage)
{
Unsafe.SkipInit(out BucketTree.Header header);
Result rc = tableStorage.Read(0, SpanHelpers.AsByteSpan(ref header));
if (rc.IsFailure()) return rc.Miss();
rc = header.Verify();
if (rc.IsFailure()) return rc.Miss();
long nodeStorageSize = QueryNodeStorageSize(header.EntryCount);
long entryStorageSize = QueryEntryStorageSize(header.EntryCount);
long nodeStorageOffset = QueryHeaderStorageSize();
long entryStorageOffset = nodeStorageSize + nodeStorageOffset;
rc = tableStorage.GetSize(out long storageSize);
if (rc.IsFailure()) return rc.Miss();
if (storageSize < entryStorageOffset + entryStorageSize)
return ResultFs.InvalidIndirectStorageBucketTreeSize.Log();
using var nodeStorage = new ValueSubStorage(tableStorage, nodeStorageOffset, nodeStorageSize);
using var entryStorage = new ValueSubStorage(tableStorage, entryStorageOffset, entryStorageSize);
return Initialize(allocator, in nodeStorage, in entryStorage, header.EntryCount);
}
public Result Initialize(MemoryResource allocator, in ValueSubStorage nodeStorage, in ValueSubStorage entryStorage,
int entryCount)
{
return _table.Initialize(allocator, in nodeStorage, in entryStorage, NodeSize, Unsafe.SizeOf<Entry>(),
entryCount);
}
public void FinalizeObject()
{
if (IsInitialized())
{
_table.FinalizeObject();
Span<ValueSubStorage> storages = _dataStorage.Items;
for (int i = 0; i < storages.Length; i++)
{
using var emptySubStorage = new ValueSubStorage();
storages[i].Set(in emptySubStorage);
}
}
}
protected override Result DoRead(long offset, Span<byte> destination)
{
// Validate pre-conditions
Assert.SdkRequiresLessEqual(0, offset);
@ -160,23 +179,24 @@ public class IndirectStorage : IStorage
if (destination.Length == 0)
return Result.Success;
// Pin and recreate the span because C# can't use byref-like types in a closure
int bufferSize = destination.Length;
fixed (byte* pBuffer = destination)
var closure = new OperatePerEntryClosure();
closure.OutBuffer = destination;
closure.Offset = offset;
Result rc = OperatePerEntry(offset, destination.Length, ReadImpl, ref closure, enableContinuousReading: true,
verifyEntryRanges: true);
if (rc.IsFailure()) return rc.Miss();
return Result.Success;
static Result ReadImpl(ref ValueSubStorage storage, long physicalOffset, long virtualOffset, long processSize,
ref OperatePerEntryClosure closure)
{
// Copy the pointer to workaround CS1764.
// OperatePerEntry won't store the delegate anywhere, so it should be safe
byte* pBuffer2 = pBuffer;
int bufferPosition = (int)(virtualOffset - closure.Offset);
Result rc = storage.Read(physicalOffset, closure.OutBuffer.Slice(bufferPosition, (int)processSize));
if (rc.IsFailure()) return rc.Miss();
Result Operate(IStorage storage, long dataOffset, long currentOffset, long currentSize)
{
var buffer = new Span<byte>(pBuffer2, bufferSize);
return storage.Read(dataOffset,
buffer.Slice((int)(currentOffset - offset), (int)currentSize));
}
return OperatePerEntry(offset, destination.Length, Operate);
return Result.Success;
}
}
@ -190,20 +210,169 @@ public class IndirectStorage : IStorage
return Result.Success;
}
protected override Result DoGetSize(out long size)
{
UnsafeHelpers.SkipParamInit(out size);
Result rc = _table.GetOffsets(out BucketTree.Offsets offsets);
if (rc.IsFailure()) return rc.Miss();
size = offsets.EndOffset;
return Result.Success;
}
protected override Result DoSetSize(long size)
{
return ResultFs.UnsupportedSetSizeForIndirectStorage.Log();
}
protected override Result DoGetSize(out long size)
public Result GetEntryList(Span<Entry> entryBuffer, out int outputEntryCount, long offset, long size)
{
size = Table.GetEnd();
UnsafeHelpers.SkipParamInit(out outputEntryCount);
// Validate pre-conditions
Assert.SdkRequiresLessEqual(0, offset);
Assert.SdkRequiresLessEqual(0, size);
Assert.SdkRequires(IsInitialized());
// Succeed if there's no range
if (size == 0)
{
outputEntryCount = 0;
return Result.Success;
}
// Check that our range is valid
Result rc = _table.GetOffsets(out BucketTree.Offsets offsets);
if (rc.IsFailure()) return rc.Miss();
if (!offsets.IsInclude(offset, size))
return ResultFs.OutOfRange.Log();
// Find the offset in our tree
using var visitor = new BucketTree.Visitor();
rc = _table.Find(ref visitor.Ref, offset);
if (rc.IsFailure()) return rc.Miss();
long entryOffset = visitor.Get<Entry>().GetVirtualOffset();
if (entryOffset < 0 || !offsets.IsInclude(entryOffset))
return ResultFs.InvalidIndirectEntryOffset.Log();
// Prepare to loop over entries
long endOffset = offset + size;
int count = 0;
var currentEntry = visitor.Get<Entry>();
while (currentEntry.GetVirtualOffset() < endOffset)
{
// Try to write the entry to the out list
if (entryBuffer.Length != 0)
{
if (count >= entryBuffer.Length)
break;
entryBuffer[count] = currentEntry;
}
count++;
// Advance
if (!visitor.CanMoveNext())
break;
rc = visitor.MoveNext();
if (rc.IsFailure()) return rc;
currentEntry = visitor.Get<Entry>();
}
outputEntryCount = count;
return Result.Success;
}
private delegate Result OperateFunc(IStorage storage, long dataOffset, long currentOffset, long currentSize);
protected override Result DoOperateRange(Span<byte> outBuffer, OperationId operationId, long offset, long size,
ReadOnlySpan<byte> inBuffer)
{
Assert.SdkRequiresLessEqual(0, offset);
Assert.SdkRequiresLessEqual(0, size);
Assert.SdkRequires(IsInitialized());
private Result OperatePerEntry(long offset, long size, OperateFunc func)
switch (operationId)
{
case OperationId.InvalidateCache:
if (!_table.IsEmpty())
{
Result rc = _table.InvalidateCache();
if (rc.IsFailure()) return rc.Miss();
for (int i = 0; i < _dataStorage.Items.Length; i++)
{
rc = _dataStorage.Items[i].OperateRange(OperationId.InvalidateCache, 0, long.MaxValue);
if (rc.IsFailure()) return rc.Miss();
}
}
break;
case OperationId.QueryRange:
if (outBuffer.Length != Unsafe.SizeOf<QueryRangeInfo>())
return ResultFs.InvalidArgument.Log();
if (size > 0)
{
Result rc = _table.GetOffsets(out BucketTree.Offsets offsets);
if (rc.IsFailure()) return rc.Miss();
if (!offsets.IsInclude(offset, size))
return ResultFs.OutOfRange.Log();
if (!_table.IsEmpty())
{
var closure = new OperatePerEntryClosure();
closure.OperationId = operationId;
closure.InBuffer = inBuffer;
static Result QueryRangeImpl(ref ValueSubStorage storage, long physicalOffset,
long virtualOffset, long processSize, ref OperatePerEntryClosure closure)
{
Unsafe.SkipInit(out QueryRangeInfo currentInfo);
Result rc = storage.OperateRange(SpanHelpers.AsByteSpan(ref currentInfo),
closure.OperationId, physicalOffset, processSize, closure.InBuffer);
if (rc.IsFailure()) return rc.Miss();
closure.InfoMerged.Merge(in currentInfo);
return Result.Success;
}
rc = OperatePerEntry(offset, size, QueryRangeImpl, ref closure, enableContinuousReading: false,
verifyEntryRanges: true);
if (rc.IsFailure()) return rc.Miss();
SpanHelpers.AsByteSpan(ref closure.InfoMerged).CopyTo(outBuffer);
}
}
break;
default:
return ResultFs.UnsupportedOperateRangeForIndirectStorage.Log();
}
return Result.Success;
}
protected delegate Result OperatePerEntryFunc(ref ValueSubStorage storage, long physicalOffset, long virtualOffset,
long processSize, ref OperatePerEntryClosure closure);
protected ref struct OperatePerEntryClosure
{
public Span<byte> OutBuffer;
public ReadOnlySpan<byte> InBuffer;
public long Offset;
public OperationId OperationId;
public QueryRangeInfo InfoMerged;
}
protected Result OperatePerEntry(long offset, long size, OperatePerEntryFunc func,
ref OperatePerEntryClosure closure, bool enableContinuousReading, bool verifyEntryRanges)
{
// Validate preconditions
Assert.SdkRequiresLessEqual(0, offset);
@ -215,94 +384,146 @@ public class IndirectStorage : IStorage
return Result.Success;
// Validate arguments
if (!Table.Includes(offset, size))
Result rc = _table.GetOffsets(out BucketTree.Offsets offsets);
if (rc.IsFailure()) return rc.Miss();
if (!offsets.IsInclude(offset, size))
return ResultFs.OutOfRange.Log();
// Find the offset in our tree
var visitor = new BucketTree.Visitor();
try
{
Result rc = Table.Find(ref visitor, offset);
if (rc.IsFailure()) return rc;
rc = _table.Find(ref visitor, offset);
if (rc.IsFailure()) return rc;
long entryOffset = visitor.Get<Entry>().GetVirtualOffset();
if (entryOffset < 0 || !Table.Includes(entryOffset))
long entryOffset = visitor.Get<Entry>().GetVirtualOffset();
if (entryOffset < 0 || !offsets.IsInclude(entryOffset))
return ResultFs.InvalidIndirectEntryOffset.Log();
// Prepare to operate in chunks
long currentOffset = offset;
long endOffset = offset + size;
var continuousReading = new BucketTree.ContinuousReadingInfo();
while (currentOffset < endOffset)
{
// Get the current entry
var currentEntry = visitor.Get<Entry>();
// Get and validate the entry's offset
long currentEntryOffset = currentEntry.GetVirtualOffset();
if (currentEntryOffset > currentOffset)
return ResultFs.InvalidIndirectEntryOffset.Log();
// Validate the storage index
if (currentEntry.StorageIndex < 0 || currentEntry.StorageIndex >= StorageCount)
return ResultFs.InvalidIndirectEntryStorageIndex.Log();
// Prepare to operate in chunks
long currentOffset = offset;
long endOffset = offset + size;
while (currentOffset < endOffset)
if (enableContinuousReading)
{
// Get the current entry
var currentEntry = visitor.Get<Entry>();
// Get and validate the entry's offset
long currentEntryOffset = currentEntry.GetVirtualOffset();
if (currentEntryOffset > currentOffset)
return ResultFs.InvalidIndirectEntryOffset.Log();
// Validate the storage index
if (currentEntry.StorageIndex < 0 || currentEntry.StorageIndex >= StorageCount)
return ResultFs.InvalidIndirectEntryStorageIndex.Log();
// todo: Implement continuous reading
// Get and validate the next entry offset
long nextEntryOffset;
if (visitor.CanMoveNext())
if (continuousReading.CheckNeedScan())
{
rc = visitor.MoveNext();
if (rc.IsFailure()) return rc;
nextEntryOffset = visitor.Get<Entry>().GetVirtualOffset();
if (!Table.Includes(nextEntryOffset))
return ResultFs.InvalidIndirectEntryOffset.Log();
}
else
{
nextEntryOffset = Table.GetEnd();
rc = visitor.ScanContinuousReading<ContinuousReadingEntry>(out continuousReading, currentOffset,
endOffset - currentOffset);
if (rc.IsFailure()) return rc.Miss();
}
if (currentOffset >= nextEntryOffset)
return ResultFs.InvalidIndirectEntryOffset.Log();
// Get the offset of the entry in the data we read
long dataOffset = currentOffset - currentEntryOffset;
long dataSize = nextEntryOffset - currentEntryOffset - dataOffset;
Assert.SdkLess(0, dataSize);
// Determine how much is left
long remainingSize = endOffset - currentOffset;
long currentSize = Math.Min(remainingSize, dataSize);
Assert.SdkLessEqual(currentSize, size);
if (continuousReading.CanDo())
{
SubStorage currentStorage = DataStorage[currentEntry.StorageIndex];
if (currentEntry.StorageIndex != 0)
return ResultFs.InvalidIndirectStorageIndex.Log();
// Get the current data storage's size.
rc = currentStorage.GetSize(out long currentDataStorageSize);
if (rc.IsFailure()) return rc;
long offsetInEntry = currentOffset - currentEntryOffset;
long entryStorageOffset = currentEntry.GetPhysicalOffset();
long dataStorageOffset = entryStorageOffset + offsetInEntry;
// Ensure that we remain within range.
long currentEntryPhysicalOffset = currentEntry.GetPhysicalOffset();
long continuousReadSize = continuousReading.GetReadSize();
if (currentEntryPhysicalOffset < 0 || currentEntryPhysicalOffset > currentDataStorageSize)
return ResultFs.IndirectStorageCorrupted.Log();
if (verifyEntryRanges)
{
rc = _dataStorage[0].GetSize(out long storageSize);
if (rc.IsFailure()) return rc.Miss();
if (currentDataStorageSize < currentEntryPhysicalOffset + dataOffset + currentSize)
return ResultFs.IndirectStorageCorrupted.Log();
// Ensure that we remain within range
if (entryStorageOffset < 0 || entryStorageOffset > storageSize)
return ResultFs.InvalidIndirectEntryOffset.Log();
rc = func(currentStorage, currentEntryPhysicalOffset + dataOffset, currentOffset, currentSize);
if (rc.IsFailure()) return rc;
if (dataStorageOffset + continuousReadSize > storageSize)
return ResultFs.InvalidIndirectStorageSize.Log();
}
rc = func(ref _dataStorage[0], dataStorageOffset, currentOffset, continuousReadSize, ref closure);
if (rc.IsFailure()) return rc.Miss();
continuousReading.Done();
}
currentOffset += currentSize;
}
// Get and validate the next entry offset
long nextEntryOffset;
if (visitor.CanMoveNext())
{
rc = visitor.MoveNext();
if (rc.IsFailure()) return rc;
nextEntryOffset = visitor.Get<Entry>().GetVirtualOffset();
if (!offsets.IsInclude(nextEntryOffset))
return ResultFs.InvalidIndirectEntryOffset.Log();
}
else
{
nextEntryOffset = offsets.EndOffset;
}
if (currentOffset >= nextEntryOffset)
return ResultFs.InvalidIndirectEntryOffset.Log();
// Get the offset of the data we need in the entry
long dataOffsetInEntry = currentOffset - currentEntryOffset;
long dataSize = nextEntryOffset - currentEntryOffset - dataOffsetInEntry;
Assert.SdkLess(0, dataSize);
// Determine how much is left
long remainingSize = endOffset - currentOffset;
long processSize = Math.Min(remainingSize, dataSize);
Assert.SdkLessEqual(processSize, size);
// Operate, if we need to
bool needsOperate;
if (!enableContinuousReading)
{
needsOperate = true;
}
else
{
needsOperate = !continuousReading.IsDone() || currentEntry.StorageIndex != 0;
}
if (needsOperate)
{
long entryStorageOffset = currentEntry.GetPhysicalOffset();
long dataStorageOffset = entryStorageOffset + dataOffsetInEntry;
if (verifyEntryRanges)
{
rc = _dataStorage[currentEntry.StorageIndex].GetSize(out long storageSize);
if (rc.IsFailure()) return rc.Miss();
// Ensure that we remain within range
if (entryStorageOffset < 0 || entryStorageOffset > storageSize)
return ResultFs.IndirectStorageCorrupted.Log();
if (dataStorageOffset + processSize > storageSize)
return ResultFs.IndirectStorageCorrupted.Log();
}
rc = func(ref _dataStorage[currentEntry.StorageIndex], dataStorageOffset, currentOffset, processSize,
ref closure);
if (rc.IsFailure()) return rc.Miss();
}
currentOffset += processSize;
}
finally { visitor.Dispose(); }
return Result.Success;
}

View File

@ -293,11 +293,11 @@ public class Nca
var relocationTableStorage = new SubStorage(patchStorage, patchInfo.RelocationTreeOffset, patchInfo.RelocationTreeSize);
var cachedTableStorage = new CachedStorage(relocationTableStorage, IndirectStorage.NodeSize, 4, true);
var tableNodeStorage = new SubStorage(cachedTableStorage, 0, nodeStorageSize);
var tableEntryStorage = new SubStorage(cachedTableStorage, nodeStorageSize, entryStorageSize);
using var tableNodeStorage = new ValueSubStorage(cachedTableStorage, 0, nodeStorageSize);
using var tableEntryStorage = new ValueSubStorage(cachedTableStorage, nodeStorageSize, entryStorageSize);
var storage = new IndirectStorage();
storage.Initialize(tableNodeStorage, tableEntryStorage, treeHeader.EntryCount).ThrowIfFailure();
storage.Initialize(new ArrayPoolMemoryResource(), in tableNodeStorage, in tableEntryStorage, treeHeader.EntryCount).ThrowIfFailure();
storage.SetStorage(0, baseStorage, 0, baseSize);
storage.SetStorage(1, patchStorage, 0, patchSize);

View File

@ -0,0 +1,132 @@
using System;
using LibHac.Diag;
using LibHac.Fs;
namespace LibHac.FsSystem;
/// <summary>
/// Represents a sparse <see cref="IStorage"/> where blocks of empty data containing all
/// zeros are not written to disk in order to save space.
/// </summary>
/// <remarks><para>The <see cref="SparseStorage"/>'s <see cref="BucketTree"/> contains <see cref="IndirectStorage.Entry"/>
/// values describing which portions of the storage are empty. This is accomplished by using a standard
/// <see cref="IndirectStorage"/> where the second <see cref="IStorage"/> contains only zeros.</para>
/// <para>Based on FS 13.1.0 (nnSdk 13.4.0)</para></remarks>
public class SparseStorage : IndirectStorage
{
private class ZeroStorage : IStorage
{
protected override Result DoRead(long offset, Span<byte> destination)
{
Assert.SdkRequiresGreaterEqual(offset, 0);
if (destination.Length > 0)
destination.Clear();
return Result.Success;
}
protected override Result DoWrite(long offset, ReadOnlySpan<byte> source)
{
return ResultFs.UnsupportedWriteForZeroStorage.Log();
}
protected override Result DoFlush()
{
return Result.Success;
}
protected override Result DoGetSize(out long size)
{
size = long.MaxValue;
return Result.Success;
}
protected override Result DoSetSize(long size)
{
return ResultFs.UnsupportedSetSizeForZeroStorage.Log();
}
protected override Result DoOperateRange(Span<byte> outBuffer, OperationId operationId, long offset, long size,
ReadOnlySpan<byte> inBuffer)
{
return Result.Success;
}
}
private ZeroStorage _zeroStorage;
public SparseStorage()
{
_zeroStorage = new ZeroStorage();
}
public override void Dispose()
{
_zeroStorage.Dispose();
base.Dispose();
}
public void Initialize(long size)
{
GetEntryTable().Initialize(NodeSize, size);
SetZeroStorage();
}
public void SetDataStorage(in ValueSubStorage storage)
{
Assert.SdkRequires(IsInitialized());
SetStorage(0, in storage);
SetZeroStorage();
}
private void SetZeroStorage()
{
SetStorage(1, _zeroStorage, 0, long.MaxValue);
}
protected override Result DoRead(long offset, Span<byte> destination)
{
// Validate pre-conditions
Assert.SdkRequiresLessEqual(0, offset);
Assert.SdkRequires(IsInitialized());
// Succeed if there's nothing to read
if (destination.Length == 0)
return Result.Success;
if (GetEntryTable().IsEmpty())
{
Result rc = GetEntryTable().GetOffsets(out BucketTree.Offsets offsets);
if (rc.IsFailure()) return rc.Miss();
if (!offsets.IsInclude(offset, destination.Length))
return ResultFs.OutOfRange.Log();
destination.Clear();
}
else
{
var closure = new OperatePerEntryClosure();
closure.OutBuffer = destination;
closure.Offset = offset;
Result rc = OperatePerEntry(offset, destination.Length, ReadImpl, ref closure,
enableContinuousReading: false, verifyEntryRanges: true);
if (rc.IsFailure()) return rc.Miss();
}
return Result.Success;
static Result ReadImpl(ref ValueSubStorage storage, long physicalOffset, long virtualOffset, long processSize,
ref OperatePerEntryClosure closure)
{
int bufferPosition = (int)(virtualOffset - closure.Offset);
Result rc = storage.Read(physicalOffset, closure.OutBuffer.Slice(bufferPosition, (int)processSize));
if (rc.IsFailure()) return rc.Miss();
return Result.Success;
}
}
}

View File

@ -117,15 +117,15 @@ public class BucketTreeBuilderTests
const int nodeSize = 0x4000;
const int entryCount = 10;
byte[] headerBuffer = new byte[BucketTree2.QueryHeaderStorageSize()];
byte[] nodeBuffer = new byte[(int)BucketTree2.QueryNodeStorageSize(nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount)];
byte[] entryBuffer = new byte[(int)BucketTree2.QueryEntryStorageSize(nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount)];
byte[] headerBuffer = new byte[BucketTree.QueryHeaderStorageSize()];
byte[] nodeBuffer = new byte[(int)BucketTree.QueryNodeStorageSize(nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount)];
byte[] entryBuffer = new byte[(int)BucketTree.QueryEntryStorageSize(nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount)];
using var headerStorage = new ValueSubStorage(new MemoryStorage(headerBuffer), 0, headerBuffer.Length);
using var nodeStorage = new ValueSubStorage(new MemoryStorage(nodeBuffer), 0, nodeBuffer.Length);
using var entryStorage = new ValueSubStorage(new MemoryStorage(entryBuffer), 0, entryBuffer.Length);
var builder = new BucketTree2.Builder();
var builder = new BucketTree.Builder();
Assert.Success(builder.Initialize(new ArrayPoolMemoryResource(), in headerStorage, in nodeStorage, in entryStorage, nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount));
@ -141,15 +141,15 @@ public class BucketTreeBuilderTests
const int nodeSize = 0x4000;
const int entryCount = 2;
byte[] headerBuffer = new byte[BucketTree2.QueryHeaderStorageSize()];
byte[] nodeBuffer = new byte[(int)BucketTree2.QueryNodeStorageSize(nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount)];
byte[] entryBuffer = new byte[(int)BucketTree2.QueryEntryStorageSize(nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount)];
byte[] headerBuffer = new byte[BucketTree.QueryHeaderStorageSize()];
byte[] nodeBuffer = new byte[(int)BucketTree.QueryNodeStorageSize(nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount)];
byte[] entryBuffer = new byte[(int)BucketTree.QueryEntryStorageSize(nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount)];
using var headerStorage = new ValueSubStorage(new MemoryStorage(headerBuffer), 0, headerBuffer.Length);
using var nodeStorage = new ValueSubStorage(new MemoryStorage(nodeBuffer), 0, nodeBuffer.Length);
using var entryStorage = new ValueSubStorage(new MemoryStorage(entryBuffer), 0, entryBuffer.Length);
var builder = new BucketTree2.Builder();
var builder = new BucketTree.Builder();
Assert.Success(builder.Initialize(new ArrayPoolMemoryResource(), in headerStorage, in nodeStorage, in entryStorage, nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount));

View File

@ -88,16 +88,16 @@ internal static class BucketTreeCreator
public static BucketTreeTests.BucketTreeData Create(ulong rngSeed, SizeRange entrySizes, int nodeSize, int entryCount)
{
byte[] headerBuffer = new byte[BucketTree2.QueryHeaderStorageSize()];
byte[] nodeBuffer = new byte[(int)BucketTree2.QueryNodeStorageSize(nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount)];
byte[] entryBuffer = new byte[(int)BucketTree2.QueryEntryStorageSize(nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount)];
byte[] headerBuffer = new byte[BucketTree.QueryHeaderStorageSize()];
byte[] nodeBuffer = new byte[(int)BucketTree.QueryNodeStorageSize(nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount)];
byte[] entryBuffer = new byte[(int)BucketTree.QueryEntryStorageSize(nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount)];
using var headerStorage = new ValueSubStorage(new MemoryStorage(headerBuffer), 0, headerBuffer.Length);
using var nodeStorage = new ValueSubStorage(new MemoryStorage(nodeBuffer), 0, nodeBuffer.Length);
using var entryStorage = new ValueSubStorage(new MemoryStorage(entryBuffer), 0, entryBuffer.Length);
var generator = new EntryGenerator(rngSeed, entrySizes);
var builder = new BucketTree2.Builder();
var builder = new BucketTree.Builder();
Assert.Success(builder.Initialize(new ArrayPoolMemoryResource(), in headerStorage, in nodeStorage,
in entryStorage, nodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), entryCount));

View File

@ -60,7 +60,7 @@ public class BucketTreeTests : IClassFixture<BucketTreeBuffers>
public byte[] Nodes;
public byte[] Entries;
public BucketTree2 CreateBucketTree()
public BucketTree CreateBucketTree()
{
int entrySize = Unsafe.SizeOf<IndirectStorage.Entry>();
@ -68,7 +68,7 @@ public class BucketTreeTests : IClassFixture<BucketTreeBuffers>
using var nodeStorage = new ValueSubStorage(new MemoryStorage(Nodes), 0, Nodes.Length);
using var entryStorage = new ValueSubStorage(new MemoryStorage(Entries), 0, Entries.Length);
var tree = new BucketTree2();
var tree = new BucketTree();
Assert.Success(tree.Initialize(new ArrayPoolMemoryResource(), in nodeStorage, in entryStorage, NodeSize, entrySize, header.EntryCount));
return tree;
@ -79,9 +79,9 @@ public class BucketTreeTests : IClassFixture<BucketTreeBuffers>
private void MoveNext_IterateAllFromStart_ReturnsCorrectEntries(int treeIndex)
{
ReadOnlySpan<IndirectStorage.Entry> entries = _entries.AsSpan(0, _treeData[treeIndex].EntryCount);
BucketTree2 tree = _treeData[treeIndex].CreateBucketTree();
BucketTree tree = _treeData[treeIndex].CreateBucketTree();
using var visitor = new BucketTree2.Visitor();
using var visitor = new BucketTree.Visitor();
Assert.Success(tree.Find(ref visitor.Ref, 0));
for (int i = 0; i < entries.Length; i++)
@ -118,9 +118,9 @@ public class BucketTreeTests : IClassFixture<BucketTreeBuffers>
private void MovePrevious_IterateAllFromEnd_ReturnsCorrectEntries(int treeIndex)
{
ReadOnlySpan<IndirectStorage.Entry> entries = _entries.AsSpan(0, _treeData[treeIndex].EntryCount);
BucketTree2 tree = _treeData[treeIndex].CreateBucketTree();
BucketTree tree = _treeData[treeIndex].CreateBucketTree();
using var visitor = new BucketTree2.Visitor();
using var visitor = new BucketTree.Visitor();
Assert.Success(tree.Find(ref visitor.Ref, entries[^1].GetVirtualOffset()));
for (int i = entries.Length - 1; i >= 0; i--)
@ -158,7 +158,7 @@ public class BucketTreeTests : IClassFixture<BucketTreeBuffers>
const int findCount = 10000;
ReadOnlySpan<IndirectStorage.Entry> entries = _entries.AsSpan(0, _treeData[treeIndex].EntryCount);
BucketTree2 tree = _treeData[treeIndex].CreateBucketTree();
BucketTree tree = _treeData[treeIndex].CreateBucketTree();
var random = new Random(123456);
@ -170,7 +170,7 @@ public class BucketTreeTests : IClassFixture<BucketTreeBuffers>
// Add a random shift amount to test finding offsets in the middle of an entry
int offsetShift = random.Next(0, 1) * 0x500;
using var visitor = new BucketTree2.Visitor();
using var visitor = new BucketTree.Visitor();
Assert.Success(tree.Find(ref visitor.Ref, expectedEntry.GetVirtualOffset() + offsetShift));
ref readonly IndirectStorage.Entry actualEntry = ref visitor.Get<IndirectStorage.Entry>();

View File

@ -0,0 +1,154 @@
using System;
using System.Runtime.CompilerServices;
using LibHac.Fs;
using LibHac.FsSystem;
using Xunit;
namespace LibHac.Tests.FsSystem;
internal class IndirectStorageCreator
{
private const int NodeSize = 0x4000;
private readonly ulong _rngSeed;
private readonly long _targetSize;
private readonly SizeRange _originalEntrySizeRange;
private readonly SizeRange _patchEntrySizeRange;
private int _maxEntrySize;
private int _entryCount;
private IndirectStorageTests.IndirectStorageData _buffers;
public static IndirectStorageTests.IndirectStorageData Create(ulong rngSeed, SizeRange originalEntrySizeRange,
SizeRange patchEntrySizeRange, long storageSize)
{
return new IndirectStorageCreator(rngSeed, originalEntrySizeRange, patchEntrySizeRange, storageSize)._buffers;
}
private IndirectStorageCreator(ulong rngSeed, SizeRange originalEntrySizeRange, SizeRange patchEntrySizeRange, long storageSize)
{
_rngSeed = rngSeed;
_originalEntrySizeRange = originalEntrySizeRange;
_patchEntrySizeRange = patchEntrySizeRange;
_targetSize = storageSize;
CreateBuffers();
FillBuffers();
}
private void CreateBuffers()
{
var generator = new BucketTreeCreator.EntryGenerator(_rngSeed, _originalEntrySizeRange, _patchEntrySizeRange);
generator.MoveNext();
_maxEntrySize = 0;
long originalSize = 0, patchSize = 0, sparseOriginalSize = 0;
while (generator.PatchedStorageSize < _targetSize)
{
_maxEntrySize = Math.Max(_maxEntrySize, generator.CurrentEntrySize);
originalSize = generator.OriginalStorageSize;
patchSize = generator.PatchStorageSize;
sparseOriginalSize = originalSize - patchSize;
generator.MoveNext();
}
_entryCount = generator.CurrentEntryIndex;
_buffers = new()
{
OriginalStorageBuffer = new byte[originalSize],
SparseOriginalStorageBuffer = new byte[sparseOriginalSize],
PatchStorageBuffer = new byte[patchSize],
PatchedStorageBuffer = new byte[originalSize],
TableEntries = new IndirectStorage.Entry[_entryCount],
TableHeaderBuffer = new byte[BucketTree.QueryHeaderStorageSize()],
TableNodeBuffer = new byte[(int)BucketTree.QueryNodeStorageSize(NodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), _entryCount)],
TableEntryBuffer = new byte[(int)BucketTree.QueryEntryStorageSize(NodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), _entryCount)],
SparseTableHeaderBuffer = new byte[BucketTree.QueryHeaderStorageSize()],
SparseTableNodeBuffer = new byte[(int)BucketTree.QueryNodeStorageSize(NodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), _entryCount)],
SparseTableEntryBuffer = new byte[(int)BucketTree.QueryEntryStorageSize(NodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), _entryCount)]
};
}
private void FillBuffers()
{
byte[] randomBuffer = new byte[_maxEntrySize];
var generator = new BucketTreeCreator.EntryGenerator(_rngSeed, _originalEntrySizeRange, _patchEntrySizeRange);
using var headerStorage = new ValueSubStorage(new MemoryStorage(_buffers.TableHeaderBuffer), 0, _buffers.TableHeaderBuffer.Length);
using var nodeStorage = new ValueSubStorage(new MemoryStorage(_buffers.TableNodeBuffer), 0, _buffers.TableNodeBuffer.Length);
using var entryStorage = new ValueSubStorage(new MemoryStorage(_buffers.TableEntryBuffer), 0, _buffers.TableEntryBuffer.Length);
using var sparseHeaderStorage = new ValueSubStorage(new MemoryStorage(_buffers.SparseTableHeaderBuffer), 0, _buffers.SparseTableHeaderBuffer.Length);
using var sparseNodeStorage = new ValueSubStorage(new MemoryStorage(_buffers.SparseTableNodeBuffer), 0, _buffers.SparseTableNodeBuffer.Length);
using var sparseEntryStorage = new ValueSubStorage(new MemoryStorage(_buffers.SparseTableEntryBuffer), 0, _buffers.SparseTableEntryBuffer.Length);
var builder = new BucketTree.Builder();
var sparseTableBuilder = new BucketTree.Builder();
Assert.Success(builder.Initialize(new ArrayPoolMemoryResource(), in headerStorage, in nodeStorage,
in entryStorage, NodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(), _entryCount));
Assert.Success(sparseTableBuilder.Initialize(new ArrayPoolMemoryResource(), in sparseHeaderStorage,
in sparseNodeStorage, in sparseEntryStorage, NodeSize, Unsafe.SizeOf<IndirectStorage.Entry>(),
_entryCount));
var random = new Random(_rngSeed);
int originalStorageOffset = 0;
int sparseOriginalStorageOffset = 0;
int patchStorageOffset = 0;
int patchedStorageOffset = 0;
for (int i = 0; i < _entryCount; i++)
{
generator.MoveNext();
IndirectStorage.Entry entry = generator.CurrentEntry;
IndirectStorage.Entry sparseEntry = generator.CurrentEntry;
sparseEntry.SetPhysicalOffset(sparseOriginalStorageOffset);
Assert.Success(builder.Add(in entry));
Assert.Success(sparseTableBuilder.Add(in sparseEntry));
_buffers.TableEntries[i] = entry;
Span<byte> randomData = randomBuffer.AsSpan(0, generator.CurrentEntrySize);
random.NextBytes(randomData);
if (entry.StorageIndex == 0)
{
randomData.CopyTo(_buffers.OriginalStorageBuffer.AsSpan(originalStorageOffset));
randomData.CopyTo(_buffers.SparseOriginalStorageBuffer.AsSpan(sparseOriginalStorageOffset));
randomData.CopyTo(_buffers.PatchedStorageBuffer.AsSpan(patchedStorageOffset));
originalStorageOffset += randomData.Length;
sparseOriginalStorageOffset += randomData.Length;
patchedStorageOffset += randomData.Length;
}
else
{
// Fill the unused portions of the original storage with zeros so it matches the sparse original storage
_buffers.OriginalStorageBuffer.AsSpan(originalStorageOffset, generator.CurrentEntrySize);
randomData.CopyTo(_buffers.PatchStorageBuffer.AsSpan(patchStorageOffset));
randomData.CopyTo(_buffers.PatchedStorageBuffer.AsSpan(patchedStorageOffset));
originalStorageOffset += randomData.Length;
patchStorageOffset += randomData.Length;
patchedStorageOffset += randomData.Length;
}
}
Assert.Success(builder.Finalize(generator.PatchedStorageSize));
Assert.Success(sparseTableBuilder.Finalize(generator.PatchedStorageSize));
Assert.Equal(_buffers.OriginalStorageBuffer.Length, originalStorageOffset);
Assert.Equal(_buffers.SparseOriginalStorageBuffer.Length, sparseOriginalStorageOffset);
Assert.Equal(_buffers.PatchStorageBuffer.Length, patchStorageOffset);
Assert.Equal(_buffers.PatchedStorageBuffer.Length, patchedStorageOffset);
}
}

View File

@ -0,0 +1,356 @@
using System;
using System.Linq;
using System.Runtime.InteropServices;
using LibHac.Fs;
using LibHac.FsSystem;
using LibHac.Tests.Common;
using LibHac.Tests.Fs;
using Xunit;
namespace LibHac.Tests.FsSystem;
public class IndirectStorageBuffers
{
public IndirectStorageTests.IndirectStorageData[] Buffers { get; }
public IndirectStorageBuffers()
{
IndirectStorageTests.IndirectStorageTestConfig[] storageConfig = IndirectStorageTests.IndirectStorageTestData;
Buffers = new IndirectStorageTests.IndirectStorageData[storageConfig.Length];
for (int i = 0; i < storageConfig.Length; i++)
{
IndirectStorageTests.IndirectStorageTestConfig config = storageConfig[i];
SizeRange patchSizeRange = config.PatchEntrySizeRange.BlockSize == 0
? config.OriginalEntrySizeRange
: config.PatchEntrySizeRange;
Buffers[i] = IndirectStorageCreator.Create(config.RngSeed, config.OriginalEntrySizeRange, patchSizeRange,
config.StorageSize);
}
}
}
public class IndirectStorageTests : IClassFixture<IndirectStorageBuffers>
{
// Keep the generated data between tests so it only has to be generated once
private readonly IndirectStorageData[] _storageBuffers;
public IndirectStorageTests(IndirectStorageBuffers buffers)
{
_storageBuffers = buffers.Buffers;
}
public class IndirectStorageTestConfig
{
public ulong RngSeed { get; init; }
public long StorageSize { get; init; }
// If the patch size range is left blank, the same values will be used for both the original and patch entry sizes
public SizeRange OriginalEntrySizeRange { get; init; }
public SizeRange PatchEntrySizeRange { get; init; }
}
private class RandomAccessTestConfig
{
public int[] SizeClassProbs { get; init; }
public int[] SizeClassMaxSizes { get; init; }
public int[] TaskProbs { get; init; }
public int[] AccessTypeProbs { get; init; }
public ulong RngSeed { get; init; }
public int FrequentAccessBlockCount { get; init; }
}
public static readonly IndirectStorageTestConfig[] IndirectStorageTestData =
{
// Small patched regions to force continuous reading
new()
{
RngSeed = 948285,
OriginalEntrySizeRange = new SizeRange(0x10000, 1,5),
PatchEntrySizeRange = new SizeRange(1, 0x20, 0xFFF),
StorageSize = 1024 * 1024 * 10
},
// Small patch regions
new()
{
RngSeed = 236956,
OriginalEntrySizeRange = new SizeRange(0x1000, 1,10),
StorageSize = 1024 * 1024 * 10
},
// Medium patch regions
new()
{
RngSeed = 352174,
OriginalEntrySizeRange = new SizeRange(0x8000, 1,10),
StorageSize = 1024 * 1024 * 10
},
// Larger patch regions
new()
{
RngSeed = 220754,
OriginalEntrySizeRange = new SizeRange(0x10000, 10,50),
StorageSize = 1024 * 1024 * 10
}
};
private static readonly RandomAccessTestConfig[] AccessTestConfigs =
{
new()
{
SizeClassProbs = new[] { 50, 50, 5 },
SizeClassMaxSizes = new[] { 0x4000, 0x80000, 0x800000 }, // 16 KB, 512 KB, 8 MB
TaskProbs = new[] { 1, 0, 0 }, // Read, Write, Flush
AccessTypeProbs = new[] { 10, 10, 5 }, // Random, Sequential, Frequent block
RngSeed = 35467,
FrequentAccessBlockCount = 6,
},
new()
{
SizeClassProbs = new[] { 50, 50, 5 },
SizeClassMaxSizes = new[] { 0x800, 0x1000, 0x8000 }, // 2 KB, 4 KB, 32 KB
TaskProbs = new[] { 1, 0, 0 }, // Read, Write, Flush
AccessTypeProbs = new[] { 1, 10, 0 }, // Random, Sequential, Frequent block
RngSeed = 13579
},
};
public static TheoryData<int> IndirectStorageTestTheoryData =
TheoryDataCreator.CreateSequence(0, IndirectStorageTestData.Length);
public class IndirectStorageData
{
public IndirectStorage.Entry[] TableEntries;
public byte[] TableHeaderBuffer;
public byte[] TableNodeBuffer;
public byte[] TableEntryBuffer;
public byte[] SparseTableHeaderBuffer;
public byte[] SparseTableNodeBuffer;
public byte[] SparseTableEntryBuffer;
public byte[] OriginalStorageBuffer;
public byte[] SparseOriginalStorageBuffer;
public byte[] PatchStorageBuffer;
public byte[] PatchedStorageBuffer;
public IndirectStorage CreateIndirectStorage(bool useSparseOriginalStorage)
{
BucketTree.Header header = MemoryMarshal.Cast<byte, BucketTree.Header>(TableHeaderBuffer)[0];
using var nodeStorage = new ValueSubStorage(new MemoryStorage(TableNodeBuffer), 0, TableNodeBuffer.Length);
using var entryStorage = new ValueSubStorage(new MemoryStorage(TableEntryBuffer), 0, TableEntryBuffer.Length);
IStorage originalStorageBase = useSparseOriginalStorage ? CreateSparseStorage() : new MemoryStorage(OriginalStorageBuffer);
using var originalStorage = new ValueSubStorage(originalStorageBase, 0, OriginalStorageBuffer.Length);
using var patchStorage = new ValueSubStorage(new MemoryStorage(PatchStorageBuffer), 0, PatchStorageBuffer.Length);
var storage = new IndirectStorage();
Assert.Success(storage.Initialize(new ArrayPoolMemoryResource(), in nodeStorage, in entryStorage, header.EntryCount));
storage.SetStorage(0, in originalStorage);
storage.SetStorage(1, in patchStorage);
return storage;
}
public SparseStorage CreateSparseStorage()
{
BucketTree.Header header = MemoryMarshal.Cast<byte, BucketTree.Header>(SparseTableHeaderBuffer)[0];
using var nodeStorage = new ValueSubStorage(new MemoryStorage(SparseTableNodeBuffer), 0, SparseTableNodeBuffer.Length);
using var entryStorage = new ValueSubStorage(new MemoryStorage(SparseTableEntryBuffer), 0, SparseTableEntryBuffer.Length);
using var sparseOriginalStorage = new ValueSubStorage(new MemoryStorage(SparseOriginalStorageBuffer), 0, SparseOriginalStorageBuffer.Length);
var sparseStorage = new SparseStorage();
Assert.Success(sparseStorage.Initialize(new ArrayPoolMemoryResource(), in nodeStorage, in entryStorage, header.EntryCount));
sparseStorage.SetDataStorage(in sparseOriginalStorage);
return sparseStorage;
}
}
[Theory, MemberData(nameof(IndirectStorageTestTheoryData))]
public void Read_EntireStorageInSingleRead_DataIsCorrect(int index)
{
ReadEntireStorageImpl(index, false);
}
[Theory, MemberData(nameof(IndirectStorageTestTheoryData))]
public void Read_EntireStorageInSingleRead_OriginalStorageIsSparse_DataIsCorrect(int index)
{
ReadEntireStorageImpl(index, true);
}
private void ReadEntireStorageImpl(int index, bool useSparseOriginalStorage)
{
using IndirectStorage storage = _storageBuffers[index].CreateIndirectStorage(useSparseOriginalStorage);
byte[] expectedPatchedData = _storageBuffers[index].PatchedStorageBuffer;
byte[] actualPatchedData = new byte[expectedPatchedData.Length];
Assert.Success(storage.GetSize(out long storageSize));
Assert.Equal(actualPatchedData.Length, storageSize);
Assert.Success(storage.Read(0, actualPatchedData));
Assert.True(expectedPatchedData.SequenceEqual(actualPatchedData));
}
[Fact]
public void Initialize_SingleTableStorage()
{
const int index = 1;
IndirectStorageData buffers = _storageBuffers[index];
byte[] tableBuffer = buffers.TableHeaderBuffer.Concat(buffers.TableNodeBuffer.Concat(buffers.TableEntryBuffer)).ToArray();
using var tableStorage = new ValueSubStorage(new MemoryStorage(tableBuffer), 0, tableBuffer.Length);
using var originalStorage = new ValueSubStorage(new MemoryStorage(buffers.OriginalStorageBuffer), 0, buffers.OriginalStorageBuffer.Length);
using var patchStorage = new ValueSubStorage(new MemoryStorage(buffers.PatchStorageBuffer), 0, buffers.PatchStorageBuffer.Length);
using var storage = new IndirectStorage();
Assert.Success(storage.Initialize(new ArrayPoolMemoryResource(), in tableStorage));
storage.SetStorage(0, in originalStorage);
storage.SetStorage(1, in patchStorage);
byte[] expectedPatchedData = _storageBuffers[index].PatchedStorageBuffer;
byte[] actualPatchedData = new byte[expectedPatchedData.Length];
Assert.Success(storage.GetSize(out long storageSize));
Assert.Equal(actualPatchedData.Length, storageSize);
Assert.Success(storage.Read(0, actualPatchedData));
Assert.True(expectedPatchedData.SequenceEqual(actualPatchedData));
}
[Theory, MemberData(nameof(IndirectStorageTestTheoryData))]
public void Read_RandomAccess_DataIsCorrect(int index)
{
foreach (RandomAccessTestConfig accessConfig in AccessTestConfigs)
{
StorageTester tester = SetupRandomAccessTest(index, accessConfig, true);
tester.Run(0x1000);
}
}
[Theory, MemberData(nameof(IndirectStorageTestTheoryData))]
public void GetEntryList_GetAllEntries_ReturnsCorrectEntries(int index)
{
GetEntryListTestImpl(index, 0, _storageBuffers[index].PatchedStorageBuffer.Length);
}
[Theory, MemberData(nameof(IndirectStorageTestTheoryData))]
public void GetEntryList_GetPartialEntries_ReturnsCorrectEntries(int index)
{
IndirectStorageData buffers = _storageBuffers[index];
var random = new Random(IndirectStorageTestData[index].RngSeed);
int endOffset = buffers.PatchedStorageBuffer.Length;
int maxSize = endOffset / 2;
const int testCount = 100;
for (int i = 0; i < testCount; i++)
{
long offset = random.Next(0, endOffset);
long size = Math.Min(endOffset - offset, random.Next(0, maxSize));
GetEntryListTestImpl(index, offset, size);
}
GetEntryListTestImpl(index, 0, _storageBuffers[index].PatchedStorageBuffer.Length);
}
private void GetEntryListTestImpl(int index, long offset, long size)
{
Assert.True(size > 0);
IndirectStorageData buffers = _storageBuffers[index];
using IndirectStorage storage = buffers.CreateIndirectStorage(false);
IndirectStorage.Entry[] entries = buffers.TableEntries;
int endOffset = buffers.PatchedStorageBuffer.Length;
int startIndex = FindEntry(entries, offset, endOffset);
int endIndex = FindEntry(entries, offset + size - 1, endOffset);
int count = endIndex - startIndex + 1;
Span<IndirectStorage.Entry> expectedEntries = buffers.TableEntries.AsSpan(startIndex, count);
var actualEntries = new IndirectStorage.Entry[expectedEntries.Length + 1];
Assert.Success(storage.GetEntryList(actualEntries, out int entryCount, offset, size));
Assert.Equal(expectedEntries.Length, entryCount);
Assert.True(actualEntries.AsSpan(0, entryCount).SequenceEqual(expectedEntries));
}
private int FindEntry(IndirectStorage.Entry[] entries, long offset, long endOffset)
{
Assert.True(offset >= 0);
Assert.True(offset < endOffset);
for (int i = 0; i + 1 < entries.Length; i++)
{
if (offset >= entries[i].GetVirtualOffset() && offset < entries[i + 1].GetVirtualOffset())
return i;
}
return entries.Length - 1;
}
[Theory, MemberData(nameof(IndirectStorageTestTheoryData))]
public void SparseStorage_Read_EntireStorageInSingleRead_DataIsCorrect(int index)
{
IndirectStorageData buffers = _storageBuffers[index];
using SparseStorage storage = buffers.CreateSparseStorage();
byte[] expectedPatchedData = buffers.OriginalStorageBuffer;
byte[] actualPatchedData = new byte[expectedPatchedData.Length];
Assert.Success(storage.GetSize(out long storageSize));
Assert.Equal(actualPatchedData.Length, storageSize);
Assert.Success(storage.Read(0, actualPatchedData));
Assert.True(expectedPatchedData.SequenceEqual(actualPatchedData));
}
[Theory, MemberData(nameof(IndirectStorageTestTheoryData))]
public void SparseStorage_Read_RandomAccess_DataIsCorrect(int index)
{
foreach (RandomAccessTestConfig accessConfig in AccessTestConfigs)
{
StorageTester tester = SetupRandomAccessTest(index, accessConfig, true);
tester.Run(0x1000);
}
}
private StorageTester SetupRandomAccessTest(int storageConfigIndex, RandomAccessTestConfig accessConfig, bool getSparseStorage)
{
IStorage indirectStorage = getSparseStorage
? _storageBuffers[storageConfigIndex].CreateSparseStorage()
: _storageBuffers[storageConfigIndex].CreateIndirectStorage(false);
Assert.Success(indirectStorage.GetSize(out long storageSize));
byte[] expectedStorageArray = new byte[storageSize];
Assert.Success(indirectStorage.Read(0, expectedStorageArray));
var memoryStorage = new MemoryStorage(expectedStorageArray);
var memoryStorageEntry = new StorageTester.Entry(memoryStorage, expectedStorageArray);
var indirectStorageEntry = new StorageTester.Entry(indirectStorage, expectedStorageArray);
var testerConfig = new StorageTester.Configuration()
{
Entries = new[] { memoryStorageEntry, indirectStorageEntry },
SizeClassProbs = accessConfig.SizeClassProbs,
SizeClassMaxSizes = accessConfig.SizeClassMaxSizes,
TaskProbs = accessConfig.TaskProbs,
AccessTypeProbs = accessConfig.AccessTypeProbs,
RngSeed = accessConfig.RngSeed,
FrequentAccessBlockCount = accessConfig.FrequentAccessBlockCount
};
return new StorageTester(testerConfig);
}
}