diff --git a/build/CodeGen/results.csv b/build/CodeGen/results.csv
index f6d4354f..bb1ddbd3 100644
--- a/build/CodeGen/results.csv
+++ b/build/CodeGen/results.csv
@@ -109,6 +109,7 @@ Module,DescriptionStart,DescriptionEnd,Name,Summary
2,3383,,AllocationFailureInAesXtsFileE,In Initialize
2,3394,,AllocationFailureInEncryptedFileSystemCreatorA,In Create allocating AesXtsFileSystem
2,3407,,AllocationFailureInFileSystemInterfaceAdapter, In OpenFile or OpenDirectory
+2,3411,,AllocationFailureInBufferedStorageA, In Initialize allocating Cache array
2,3420,,AllocationFailureInNew,
2,3421,,AllocationFailureInCreateShared,
2,3422,,AllocationFailureInMakeUnique,
diff --git a/src/LibHac/Common/Ref.cs b/src/LibHac/Common/Ref.cs
new file mode 100644
index 00000000..dc0a9614
--- /dev/null
+++ b/src/LibHac/Common/Ref.cs
@@ -0,0 +1,72 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+using System;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+
+namespace LibHac.Common
+{
+ ///
+ /// A that can store a reference to a value of a specified type.
+ ///
+ /// The type of value to reference.
+ public readonly ref struct Ref
+ {
+ ///
+ /// The 1-length instance used to track the target value.
+ ///
+ private readonly Span _span;
+
+ ///
+ /// Initializes a new instance of the struct.
+ ///
+ /// The reference to the target value.
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public Ref(ref T value)
+ {
+ _span = MemoryMarshal.CreateSpan(ref value, 1);
+ }
+
+ ///
+ /// Initializes a new instance of the struct.
+ ///
+ /// The pointer to the target value.
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public unsafe Ref(void* pointer)
+ : this(ref Unsafe.AsRef(pointer))
+ {
+ }
+
+ ///
+ /// Gets the reference represented by the current instance.
+ ///
+ public ref T Value
+ {
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ get => ref MemoryMarshal.GetReference(_span);
+ }
+
+ ///
+ /// Returns a value that indicates whether the current is .
+ ///
+ /// if the held reference is ;
+ /// otherwise .
+ public bool IsNull
+ {
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ get => Unsafe.IsNullRef(ref Value);
+ }
+
+ ///
+ /// Implicitly gets the value from a given instance.
+ ///
+ /// The input instance.
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public static implicit operator T(Ref reference)
+ {
+ return reference.Value;
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/LibHac/Diag/Assert.cs b/src/LibHac/Diag/Assert.cs
index d89994f6..5bf60a52 100644
--- a/src/LibHac/Diag/Assert.cs
+++ b/src/LibHac/Diag/Assert.cs
@@ -20,6 +20,20 @@ namespace LibHac.Diag
throw new LibHacException($"Assertion failed: {message}");
}
+ [Conditional("DEBUG")]
+ public static void False([DoesNotReturnIf(true)] bool condition, string message = null)
+ {
+ if (!condition)
+ return;
+
+ if (string.IsNullOrWhiteSpace(message))
+ {
+ throw new LibHacException("Assertion failed.");
+ }
+
+ throw new LibHacException($"Assertion failed: {message}");
+ }
+
[Conditional("DEBUG")]
public static void Null([NotNull] T item) where T : class
{
diff --git a/src/LibHac/Fs/Buffers/IBufferManager.cs b/src/LibHac/Fs/Buffers/IBufferManager.cs
index 17fcb32f..4775060d 100644
--- a/src/LibHac/Fs/Buffers/IBufferManager.cs
+++ b/src/LibHac/Fs/Buffers/IBufferManager.cs
@@ -6,6 +6,14 @@ using CacheHandle = System.Int64;
namespace LibHac.Fs
{
// ReSharper disable once InconsistentNaming
+ ///
+ /// Handles buffer allocation, deallocation, and caching.
+ /// An allocated buffer may be placed in the cache using .
+ /// Caching a buffer saves the buffer for later retrieval, but tells the buffer manager that it can deallocate the
+ /// buffer if the memory is needed elsewhere. Any cached buffer may be evicted from the cache if there is no free
+ /// space for a requested allocation or if the cache is full when caching a new buffer.
+ /// A cached buffer can be retrieved using .
+ ///
public abstract class IBufferManager : IDisposable
{
public readonly struct BufferAttribute
@@ -23,18 +31,80 @@ namespace LibHac.Fs
public Buffer AllocateBuffer(int size, BufferAttribute attribute) =>
DoAllocateBuffer(size, attribute);
+ ///
+ /// Allocates a new buffer with an attribute of level 0.
+ ///
+ /// The minimum size of the buffer to allocate
+ /// The allocated if successful. Otherwise a null .
+ public Buffer AllocateBuffer(int size) => DoAllocateBuffer(size, new BufferAttribute());
+
+ ///
+ /// Deallocates the provided .
+ ///
+ /// The Buffer to deallocate.
public void DeallocateBuffer(Buffer buffer) => DoDeallocateBuffer(buffer);
+ ///
+ /// Adds a to the cache.
+ /// The buffer must have been allocated from this .
+ /// The buffer must not be used after adding it to the cache.
+ ///
+ /// The buffer to cache.
+ /// The buffer attribute.
+ /// A handle that can be used to retrieve the buffer at a later time.
public CacheHandle RegisterCache(Buffer buffer, BufferAttribute attribute) =>
DoRegisterCache(buffer, attribute);
+ ///
+ /// Attempts to acquire a cached .
+ /// If the buffer was evicted from the cache, a null buffer is returned.
+ ///
+ /// The handle received when registering the buffer.
+ /// The requested if it's still in the cache;
+ /// otherwise a null
public Buffer AcquireCache(CacheHandle handle) => DoAcquireCache(handle);
+
+ ///
+ /// Gets the total size of the 's heap.
+ ///
+ /// The total size of the heap.
public int GetTotalSize() => DoGetTotalSize();
+
+ ///
+ /// Gets the amount of free space in the heap that is not currently allocated or cached.
+ ///
+ /// The amount of free space.
public int GetFreeSize() => DoGetFreeSize();
+
+ ///
+ /// Gets the amount of space that can be used for new allocations.
+ /// This includes free space and space used by cached buffers.
+ ///
+ /// The amount of allocatable space.
public int GetTotalAllocatableSize() => DoGetTotalAllocatableSize();
+
+ ///
+ /// Gets the largest amount of free space there's been at one time since the peak was last cleared.
+ ///
+ /// The peak amount of free space.
public int GetFreeSizePeak() => DoGetFreeSizePeak();
+
+ ///
+ /// Gets the largest amount of allocatable space there's been at one time since the peak was last cleared.
+ ///
+ /// The peak amount of allocatable space.
public int GetTotalAllocatableSizePeak() => DoGetTotalAllocatableSizePeak();
+
+ ///
+ /// Gets the number of times an allocation or cache registration needed to be retried after deallocating
+ /// a cache entry because of insufficient heap space or cache space.
+ ///
+ /// The number of retries.
public int GetRetriedCount() => DoGetRetriedCount();
+
+ ///
+ /// Resets the free and allocatable peak sizes, setting the peak sizes to the actual current sizes.
+ ///
public void ClearPeak() => DoClearPeak();
protected abstract Buffer DoAllocateBuffer(int size, BufferAttribute attribute);
diff --git a/src/LibHac/Fs/ResultFs.cs b/src/LibHac/Fs/ResultFs.cs
index 04baa397..d8d6ae18 100644
--- a/src/LibHac/Fs/ResultFs.cs
+++ b/src/LibHac/Fs/ResultFs.cs
@@ -136,6 +136,8 @@ namespace LibHac.Fs
public static Result.Base AllocationFailureInEncryptedFileSystemCreatorA => new Result.Base(ModuleFs, 3394);
/// In OpenFile or OpenDirectory
Error code: 2002-3407; Inner value: 0x1a9e02
public static Result.Base AllocationFailureInFileSystemInterfaceAdapter => new Result.Base(ModuleFs, 3407);
+ /// In Initialize allocating Cache array
Error code: 2002-3411; Inner value: 0x1aa602
+ public static Result.Base AllocationFailureInBufferedStorageA => new Result.Base(ModuleFs, 3411);
/// Error code: 2002-3420; Inner value: 0x1ab802
public static Result.Base AllocationFailureInNew => new Result.Base(ModuleFs, 3420);
/// Error code: 2002-3421; Inner value: 0x1aba02
diff --git a/src/LibHac/FsSystem/Buffers/BufferManagerUtility.cs b/src/LibHac/FsSystem/Buffers/BufferManagerUtility.cs
new file mode 100644
index 00000000..fd256106
--- /dev/null
+++ b/src/LibHac/FsSystem/Buffers/BufferManagerUtility.cs
@@ -0,0 +1,143 @@
+using System;
+using System.Runtime.CompilerServices;
+using System.Threading;
+using LibHac.Diag;
+using LibHac.Fs;
+using Buffer = LibHac.Fs.Buffer;
+
+namespace LibHac.FsSystem.Buffers
+{
+ public struct BufferManagerContext
+ {
+ private bool _needsBlocking;
+
+ public bool IsNeedBlocking() => _needsBlocking;
+ public void SetNeedBlocking(bool needsBlocking) => _needsBlocking = needsBlocking;
+ }
+
+ public struct ScopedBufferManagerContextRegistration : IDisposable
+ {
+ private BufferManagerContext _oldContext;
+
+ // ReSharper disable once UnusedParameter.Local
+ public ScopedBufferManagerContextRegistration(int unused = default)
+ {
+ _oldContext = BufferManagerUtility.GetBufferManagerContext();
+ }
+
+ public void Dispose()
+ {
+ BufferManagerUtility.RegisterBufferManagerContext(in _oldContext);
+ }
+ }
+
+ internal static class BufferManagerUtility
+ {
+ // Todo: Use TimeSpan
+ private const int RetryWait = 10;
+
+ [ThreadStatic]
+ private static BufferManagerContext _context;
+
+ public delegate bool IsValidBufferFunction(in Buffer buffer);
+
+ public static Result DoContinuouslyUntilBufferIsAllocated(Func function, Func onFailure,
+ [CallerMemberName] string callerName = "")
+ {
+ const int bufferAllocationRetryLogCountMax = 10;
+ const int bufferAllocationRetryLogInterval = 100;
+
+ Result result;
+
+ for (int count = 1; ; count++)
+ {
+ result = function();
+ if (!ResultFs.BufferAllocationFailed.Includes(result))
+ break;
+
+ // Failed to allocate. Wait and try again.
+ if (1 <= count && count <= bufferAllocationRetryLogCountMax ||
+ count % bufferAllocationRetryLogInterval == 0)
+ {
+ // Todo: Log allocation failure
+ }
+
+ Result rc = onFailure();
+ if (rc.IsFailure()) return rc;
+
+ Thread.Sleep(RetryWait);
+ }
+
+ return result;
+ }
+
+ public static Result DoContinuouslyUntilBufferIsAllocated(Func function,
+ [CallerMemberName] string callerName = "")
+ {
+ return DoContinuouslyUntilBufferIsAllocated(function, static () => Result.Success, callerName);
+ }
+
+ public static void RegisterBufferManagerContext(in BufferManagerContext context)
+ {
+ _context = context;
+ }
+
+ public static ref BufferManagerContext GetBufferManagerContext() => ref _context;
+
+ public static void EnableBlockingBufferManagerAllocation()
+ {
+ ref BufferManagerContext context = ref GetBufferManagerContext();
+ context.SetNeedBlocking(true);
+ }
+
+ public static Result AllocateBufferUsingBufferManagerContext(out Buffer outBuffer, IBufferManager bufferManager,
+ int size, IBufferManager.BufferAttribute attribute, IsValidBufferFunction isValidBuffer,
+ [CallerMemberName] string callerName = "")
+ {
+ Assert.NotNull(bufferManager);
+ Assert.NotNull(callerName);
+
+ // Clear the output.
+ outBuffer = default;
+ Buffer tempBuffer = default;
+
+ // Get the context.
+ ref BufferManagerContext context = ref GetBufferManagerContext();
+
+ Result AllocateBufferImpl()
+ {
+ Buffer buffer = bufferManager.AllocateBuffer(size, attribute);
+
+ if (!isValidBuffer(in buffer))
+ {
+ if (!buffer.IsNull)
+ {
+ bufferManager.DeallocateBuffer(buffer);
+ }
+
+ return ResultFs.BufferAllocationFailed.Log();
+ }
+
+ tempBuffer = buffer;
+ return Result.Success;
+ }
+
+ if (!context.IsNeedBlocking())
+ {
+ // If we don't need to block, just allocate the buffer.
+ Result rc = AllocateBufferImpl();
+ if (rc.IsFailure()) return rc;
+ }
+ else
+ {
+ // Otherwise, try to allocate repeatedly.
+ Result rc = DoContinuouslyUntilBufferIsAllocated(AllocateBufferImpl);
+ if (rc.IsFailure()) return rc;
+ }
+
+ Assert.True(!tempBuffer.IsNull);
+ outBuffer = tempBuffer;
+ return Result.Success;
+ }
+ }
+}
diff --git a/src/LibHac/FsSystem/Buffers/FileSystemBuddyHeap.cs b/src/LibHac/FsSystem/Buffers/FileSystemBuddyHeap.cs
index 92ef224b..9b97fef4 100644
--- a/src/LibHac/FsSystem/Buffers/FileSystemBuddyHeap.cs
+++ b/src/LibHac/FsSystem/Buffers/FileSystemBuddyHeap.cs
@@ -199,13 +199,15 @@ namespace LibHac.FsSystem
public Result Initialize(UIntPtr address, nuint size, nuint blockSize, int orderMax, void* workBuffer,
nuint workBufferSize)
{
- // Note: Buffer size assert is done before adjusting for alignment
Assert.True(workBufferSize >= QueryWorkBufferSize(orderMax));
uint pageListAlignment = (uint)Unsafe.SizeOf();
var alignedWork = (void*)Alignment.AlignUpPow2((ulong)workBuffer, pageListAlignment);
ExternalFreeLists = (PageList*)alignedWork;
+ // Note: The original code does not have a buffer size assert after adjusting for alignment.
+ Assert.True(workBufferSize - ((nuint)alignedWork - (nuint)workBuffer) >= QueryWorkBufferSize(orderMax));
+
return Initialize(address, size, blockSize, orderMax);
}
@@ -264,7 +266,7 @@ namespace LibHac.FsSystem
// Allocate remaining space to smaller orders as possible.
{
nuint remaining = HeapSize - (maxPageCount - 1) * maxPageSize;
- nuint curAddress = (nuint)HeapStart - (maxPageCount - 1) * maxPageSize;
+ nuint curAddress = HeapStart - (maxPageCount - 1) * maxPageSize;
Assert.True(Alignment.IsAlignedPow2(remaining, (uint)BlockSize));
do
@@ -572,7 +574,6 @@ namespace LibHac.FsSystem
private MemoryHandle PinnedHeapMemoryHandle { get; set; }
private Memory HeapBuffer { get; set; }
private MemoryHandle PinnedWorkMemoryHandle { get; set; }
- private Memory WorkBuffer { get; set; }
public Result Initialize(Memory heapBuffer, int blockSize, Memory workBuffer)
{
@@ -583,7 +584,6 @@ namespace LibHac.FsSystem
public Result Initialize(Memory heapBuffer, int blockSize, int orderMax, Memory workBuffer)
{
PinnedWorkMemoryHandle = workBuffer.Pin();
- WorkBuffer = workBuffer;
PinnedHeapMemoryHandle = heapBuffer.Pin();
HeapBuffer = heapBuffer;
@@ -591,8 +591,8 @@ namespace LibHac.FsSystem
var heapAddress = (UIntPtr)PinnedHeapMemoryHandle.Pointer;
var heapSize = (nuint)heapBuffer.Length;
- void* workAddress = PinnedHeapMemoryHandle.Pointer;
- var workSize = (nuint)heapBuffer.Length;
+ void* workAddress = PinnedWorkMemoryHandle.Pointer;
+ var workSize = (nuint)workBuffer.Length;
return Initialize(heapAddress, heapSize, (nuint)blockSize, orderMax, workAddress, workSize);
}
diff --git a/src/LibHac/FsSystem/Buffers/FileSystemBufferManager.cs b/src/LibHac/FsSystem/Buffers/FileSystemBufferManager.cs
index 6ee26f73..ba20f4a7 100644
--- a/src/LibHac/FsSystem/Buffers/FileSystemBufferManager.cs
+++ b/src/LibHac/FsSystem/Buffers/FileSystemBufferManager.cs
@@ -90,7 +90,7 @@ namespace LibHac.FsSystem
// Validate pre-conditions.
Assert.True(Entries == null);
- // Note: We don't have the option of using an external Entry buffer like the original
+ // Note: We don't have the option of using an external Entry buffer like the original C++ code
// because Entry includes managed references so we can't cast a byte* to Entry* without pinning.
// If we don't have an external buffer, try to allocate an internal one.
@@ -215,6 +215,7 @@ namespace LibHac.FsSystem
if (CanUnregister(this, ref Entries[i]))
{
entry = ref Entries[i];
+ break;
}
}
@@ -278,7 +279,7 @@ namespace LibHac.FsSystem
entry = ref Entries[EntryCount];
entry.Initialize(PublishCacheHandle(), buffer, attr);
EntryCount++;
- Assert.True(EntryCount == 1 || Entries[EntryCount - 1].GetHandle() < entry.GetHandle());
+ Assert.True(EntryCount == 1 || Entries[EntryCount - 2].GetHandle() < entry.GetHandle());
}
return ref entry;
@@ -292,7 +293,7 @@ namespace LibHac.FsSystem
// Ensure the entry is valid.
Span entryBuffer = Entries;
- Assert.True(Unsafe.IsAddressGreaterThan(ref entry, ref MemoryMarshal.GetReference(entryBuffer)));
+ Assert.True(!Unsafe.IsAddressLessThan(ref entry, ref MemoryMarshal.GetReference(entryBuffer)));
Assert.True(Unsafe.IsAddressLessThan(ref entry,
ref Unsafe.Add(ref MemoryMarshal.GetReference(entryBuffer), entryBuffer.Length)));
@@ -347,8 +348,9 @@ namespace LibHac.FsSystem
public Result Initialize(int maxCacheCount, Memory heapBuffer, int blockSize, Memory workBuffer)
{
- // Note: We can't use an external buffer for the cache handle table,
+ // Note: We can't use an external buffer for the cache handle table since it contains managed pointers,
// so pass the work buffer directly to the buddy heap.
+
Result rc = CacheTable.Initialize(maxCacheCount);
if (rc.IsFailure()) return rc;
diff --git a/src/LibHac/FsSystem/PooledBuffer.cs b/src/LibHac/FsSystem/PooledBuffer.cs
new file mode 100644
index 00000000..e2d84bd9
--- /dev/null
+++ b/src/LibHac/FsSystem/PooledBuffer.cs
@@ -0,0 +1,117 @@
+using System;
+using System.Buffers;
+using LibHac.Diag;
+
+namespace LibHac.FsSystem
+{
+ // Implement the PooledBuffer interface using .NET ArrayPools
+ public struct PooledBuffer : IDisposable
+ {
+ // It's faster to create new smaller arrays than rent them
+ private const int RentThresholdBytes = 512;
+
+ private const int HeapBlockSize = 1024 * 4;
+
+ // Keep the max sizes that FS uses.
+ // A heap block is 4KB.An order is a power of two.
+ // This gives blocks of the order 512KB, 4MB.
+ private const int HeapOrderMax = 7;
+ private const int HeapOrderMaxForLarge = HeapOrderMax + 3;
+
+ private const int HeapAllocatableSizeMax = HeapBlockSize * (1 << HeapOrderMax);
+ private const int HeapAllocatableSizeMaxForLarge = HeapBlockSize * (1 << HeapOrderMaxForLarge);
+
+ private byte[] Array { get; set; }
+ private int Length { get; set; }
+
+ public PooledBuffer(int idealSize, int requiredSize)
+ {
+ Array = null;
+ Length = default;
+ Allocate(idealSize, requiredSize);
+ }
+
+ public Span GetBuffer()
+ {
+ Assert.NotNull(Array);
+ return Array.AsSpan(0, Length);
+ }
+
+ public int GetSize()
+ {
+ Assert.NotNull(Array);
+ return Length;
+ }
+
+ public static int GetAllocatableSizeMax() => GetAllocatableSizeMaxCore(false);
+ public static int GetAllocatableParticularlyLargeSizeMax => GetAllocatableSizeMaxCore(true);
+
+ private static int GetAllocatableSizeMaxCore(bool enableLargeCapacity)
+ {
+ return enableLargeCapacity ? HeapAllocatableSizeMaxForLarge : HeapAllocatableSizeMax;
+ }
+
+ public void Allocate(int idealSize, int requiredSize) => AllocateCore(idealSize, requiredSize, false);
+ public void AllocateParticularlyLarge(int idealSize, int requiredSize) => AllocateCore(idealSize, requiredSize, true);
+
+ private void AllocateCore(int idealSize, int requiredSize, bool enableLargeCapacity)
+ {
+ Assert.Null(Array);
+
+ // Check that we can allocate this size.
+ Assert.True(requiredSize <= GetAllocatableSizeMaxCore(enableLargeCapacity));
+
+ int targetSize = Math.Min(Math.Max(idealSize, requiredSize),
+ GetAllocatableSizeMaxCore(enableLargeCapacity));
+
+ if (targetSize >= RentThresholdBytes)
+ {
+ Array = ArrayPool.Shared.Rent(targetSize);
+ }
+ else
+ {
+ Array = new byte[targetSize];
+ }
+
+ Length = Array.Length;
+ }
+
+ public void Deallocate()
+ {
+ // Shrink the buffer to empty.
+ Shrink(0);
+ Assert.Null(Array);
+ }
+
+ public void Shrink(int idealSize)
+ {
+ Assert.True(idealSize <= GetAllocatableSizeMaxCore(true));
+
+ // Check if we actually need to shrink.
+ if (Length > idealSize)
+ {
+ Assert.NotNull(Array);
+
+ // Pretend we shrank the buffer.
+ Length = idealSize;
+
+ // Shrinking to zero means that we have no buffer.
+ if (Length == 0)
+ {
+ // Return the array if we rented it.
+ if (Array?.Length >= RentThresholdBytes)
+ {
+ ArrayPool.Shared.Return(Array);
+ }
+
+ Array = null;
+ }
+ }
+ }
+
+ public void Dispose()
+ {
+ Deallocate();
+ }
+ }
+}
diff --git a/src/LibHac/FsSystem/Save/BufferedStorage.cs b/src/LibHac/FsSystem/Save/BufferedStorage.cs
new file mode 100644
index 00000000..29af5b84
--- /dev/null
+++ b/src/LibHac/FsSystem/Save/BufferedStorage.cs
@@ -0,0 +1,1716 @@
+using System;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+using System.Threading;
+using LibHac.Common;
+using LibHac.Diag;
+using LibHac.Fs;
+using LibHac.FsSystem.Buffers;
+using LibHac.Util;
+using Buffer = LibHac.Fs.Buffer;
+using CacheHandle = System.Int64;
+
+namespace LibHac.FsSystem.Save
+{
+ ///
+ /// An that provides buffered access to a base .
+ ///
+ public class BufferedStorage : IStorage
+ {
+ private const long InvalidOffset = long.MaxValue;
+ private const int InvalidIndex = -1;
+
+ ///
+ /// Caches a single block of data for a
+ ///
+ private struct Cache : IDisposable
+ {
+ private ref struct FetchParameter
+ {
+ public long Offset;
+ public Span Buffer;
+ }
+
+ private BufferedStorage BufferedStorage { get; set; }
+ private Buffer MemoryRange { get; set; }
+ private CacheHandle CacheHandle { get; set; }
+ private long Offset { get; set; }
+ private bool _isValid;
+ private bool _isDirty;
+ private int ReferenceCount { get; set; }
+ private int Index { get; set; }
+ private int NextIndex { get; set; }
+ private int PrevIndex { get; set; }
+
+ private ref Cache Next => ref BufferedStorage.Caches[NextIndex];
+ private ref Cache Prev => ref BufferedStorage.Caches[PrevIndex];
+
+ public void Dispose()
+ {
+ FinalizeObject();
+ }
+
+ public void Initialize(BufferedStorage bufferedStorage, int index)
+ {
+ // Note: C# can't have default constructors on structs, so the default constructor code was
+ // moved into Initialize since Initialize is always called right after the constructor.
+ Offset = InvalidOffset;
+ ReferenceCount = 1;
+ Index = index;
+ NextIndex = InvalidIndex;
+ PrevIndex = InvalidIndex;
+ // End default constructor code
+
+ Assert.NotNull(bufferedStorage);
+ Assert.True(BufferedStorage == null);
+
+ BufferedStorage = bufferedStorage;
+ Link();
+ }
+
+ public void FinalizeObject()
+ {
+ Assert.NotNull(BufferedStorage);
+ Assert.NotNull(BufferedStorage.BufferManager);
+ Assert.Equal(0, ReferenceCount);
+
+ // If we're valid, acquire our cache handle and free our buffer.
+ if (IsValid())
+ {
+ IBufferManager bufferManager = BufferedStorage.BufferManager;
+ if (!_isDirty)
+ {
+ Assert.True(MemoryRange.IsNull);
+ MemoryRange = bufferManager.AcquireCache(CacheHandle);
+ }
+
+ if (!MemoryRange.IsNull)
+ {
+ bufferManager.DeallocateBuffer(MemoryRange);
+ MemoryRange = Buffer.Empty;
+ }
+ }
+
+ // Clear all our members.
+ BufferedStorage = null;
+ Offset = InvalidOffset;
+ _isValid = false;
+ _isDirty = false;
+ NextIndex = InvalidIndex;
+ PrevIndex = InvalidIndex;
+ }
+
+ ///
+ /// Decrements the ref-count and adds the to its 's
+ /// fetch list if the has no more references, and registering the buffer with
+ /// the if not dirty.
+ ///
+ public void Link()
+ {
+ Assert.NotNull(BufferedStorage);
+ Assert.NotNull(BufferedStorage.BufferManager);
+ Assert.True(ReferenceCount > 0);
+
+ ReferenceCount--;
+ if (ReferenceCount == 0)
+ {
+ Assert.True(NextIndex == InvalidIndex);
+ Assert.True(PrevIndex == InvalidIndex);
+
+ // If the fetch list is empty we can simply add it as the only cache in the list.
+ if (BufferedStorage.NextFetchCacheIndex == InvalidIndex)
+ {
+ BufferedStorage.NextFetchCacheIndex = Index;
+ NextIndex = Index;
+ PrevIndex = Index;
+ }
+ else
+ {
+ // Check against a cache being registered twice.
+ ref Cache cache = ref BufferedStorage.NextFetchCache;
+ do
+ {
+ if (cache.IsValid() && Hits(cache.Offset, BufferedStorage.BlockSize))
+ {
+ _isValid = false;
+ break;
+ }
+
+ cache = ref cache.Next;
+ } while (cache.Index != BufferedStorage.NextFetchCacheIndex);
+
+ // Verify the end of the fetch list loops back to the start.
+ Assert.True(BufferedStorage.NextFetchCache.PrevIndex != InvalidIndex);
+ Assert.Equal(BufferedStorage.NextFetchCache.Prev.NextIndex,
+ BufferedStorage.NextFetchCacheIndex);
+
+ // Link into the fetch list.
+ NextIndex = BufferedStorage.NextFetchCacheIndex;
+ PrevIndex = BufferedStorage.NextFetchCache.PrevIndex;
+ Next.PrevIndex = Index;
+ Prev.NextIndex = Index;
+
+ // Insert invalid caches at the start of the list so they'll
+ // be used first when a fetch cache is needed.
+ if (!IsValid())
+ BufferedStorage.NextFetchCacheIndex = Index;
+ }
+
+ // If we're not valid, clear our offset.
+ if (!IsValid())
+ {
+ Offset = InvalidOffset;
+ _isDirty = false;
+ }
+
+ // Ensure our buffer state is coherent.
+ // We can let go of our buffer if it's not dirty, allowing the buffer to be used elsewhere if needed.
+ if (!MemoryRange.IsNull && !IsDirty())
+ {
+ // If we're valid, register the buffer with the buffer manager for possible later retrieval.
+ // Otherwise the the data in the buffer isn't needed, so deallocate it.
+ if (IsValid())
+ {
+ CacheHandle = BufferedStorage.BufferManager.RegisterCache(MemoryRange,
+ new IBufferManager.BufferAttribute());
+ }
+ else
+ {
+ BufferedStorage.BufferManager.DeallocateBuffer(MemoryRange);
+ }
+
+ MemoryRange = default;
+ }
+ }
+ }
+
+ ///
+ /// Increments the ref-count and removes the from its
+ /// 's fetch list if needed.
+ ///
+ public void Unlink()
+ {
+ Assert.NotNull(BufferedStorage);
+ Assert.True(ReferenceCount >= 0);
+
+ ReferenceCount++;
+ if (ReferenceCount == 1)
+ {
+ // If we're the first to grab this Cache, the Cache should be in the BufferedStorage's fetch list.
+ Assert.True(NextIndex != InvalidIndex);
+ Assert.True(PrevIndex != InvalidIndex);
+ Assert.True(Next.PrevIndex == Index);
+ Assert.True(Prev.NextIndex == Index);
+
+ // Set the new fetch list head if this Cache is the current head
+ if (BufferedStorage.NextFetchCacheIndex == Index)
+ {
+ if (NextIndex != Index)
+ {
+ BufferedStorage.NextFetchCacheIndex = NextIndex;
+ }
+ else
+ {
+ BufferedStorage.NextFetchCacheIndex = InvalidIndex;
+ }
+ }
+
+ BufferedStorage.NextAcquireCacheIndex = Index;
+
+ Next.PrevIndex = PrevIndex;
+ Prev.NextIndex = NextIndex;
+ NextIndex = InvalidIndex;
+ PrevIndex = InvalidIndex;
+ }
+ else
+ {
+ Assert.True(NextIndex == InvalidIndex);
+ Assert.True(PrevIndex == InvalidIndex);
+ }
+ }
+
+ ///
+ /// Reads the data from the base contained in this 's buffer.
+ /// The must contain valid data before calling, and the
+ /// must be inside the block of data held by this .
+ ///
+ /// The offset in the base to be read from.
+ /// The buffer in which to place the read data.
+ public void Read(long offset, Span buffer)
+ {
+ Assert.NotNull(BufferedStorage);
+ Assert.True(NextIndex == InvalidIndex);
+ Assert.True(PrevIndex == InvalidIndex);
+ Assert.True(IsValid());
+ Assert.True(Hits(offset, 1));
+ Assert.True(!MemoryRange.IsNull);
+
+ long readOffset = offset - Offset;
+ long readableOffsetMax = BufferedStorage.BlockSize - buffer.Length;
+
+ Assert.True(readOffset >= 0);
+ Assert.True(readOffset <= readableOffsetMax);
+
+ Span cacheBuffer = MemoryRange.Span.Slice((int)readOffset, buffer.Length);
+ cacheBuffer.CopyTo(buffer);
+ }
+
+ ///
+ /// Buffers data to be written to the base when this is flushed.
+ /// The must contain valid data before calling, and the
+ /// must be inside the block of data held by this .
+ ///
+ /// The offset in the base to be written to.
+ /// The buffer containing the data to be written.
+ public void Write(long offset, ReadOnlySpan buffer)
+ {
+ Assert.NotNull(BufferedStorage);
+ Assert.True(NextIndex == InvalidIndex);
+ Assert.True(PrevIndex == InvalidIndex);
+ Assert.True(IsValid());
+ Assert.True(Hits(offset, 1));
+ Assert.True(!MemoryRange.IsNull);
+
+ long writeOffset = offset - Offset;
+ long writableOffsetMax = BufferedStorage.BlockSize - buffer.Length;
+
+ Assert.True(writeOffset >= 0);
+ Assert.True(writeOffset <= writableOffsetMax);
+
+ Span cacheBuffer = MemoryRange.Span.Slice((int)writeOffset, buffer.Length);
+ buffer.CopyTo(cacheBuffer);
+ _isDirty = true;
+ }
+
+ ///
+ /// If this is dirty, flushes its data to the base .
+ /// The must contain valid data before calling.
+ ///
+ /// The of the operation.
+ public Result Flush()
+ {
+ Assert.NotNull(BufferedStorage);
+ Assert.True(NextIndex == InvalidIndex);
+ Assert.True(PrevIndex == InvalidIndex);
+ Assert.True(IsValid());
+
+ if (_isDirty)
+ {
+ Assert.True(!MemoryRange.IsNull);
+
+ long baseSize = BufferedStorage.BaseStorageSize;
+ long blockSize = BufferedStorage.BlockSize;
+ long flushSize = Math.Min(blockSize, baseSize - Offset);
+
+ SubStorage baseStorage = BufferedStorage.BaseStorage;
+ Span cacheBuffer = MemoryRange.Span;
+ Assert.True(flushSize == cacheBuffer.Length);
+
+ Result rc = baseStorage.Write(Offset, cacheBuffer);
+ if (rc.IsFailure()) return rc;
+
+ _isDirty = false;
+
+ BufferManagerUtility.EnableBlockingBufferManagerAllocation();
+ }
+
+ return Result.Success;
+ }
+
+ ///
+ /// Prepares this to fetch a new block from the base .
+ /// If the caller has the only reference to this Cache,
+ /// the Cache's buffer will be flushed and the Cache invalidated. While the Cache is
+ /// prepared to fetch, will skip it when iterating all the Caches.
+ ///
+ /// The of any attempted flush, and if the
+ /// is prepared to fetch; if not.
+ public (Result Result, bool IsPrepared) PrepareFetch()
+ {
+ Assert.NotNull(BufferedStorage);
+ Assert.NotNull(BufferedStorage.BufferManager);
+ Assert.True(NextIndex == InvalidIndex);
+ Assert.True(PrevIndex == InvalidIndex);
+ Assert.True(IsValid());
+ Assert.True(Monitor.IsEntered(BufferedStorage.Locker));
+
+ (Result Result, bool IsPrepared) result = (Result.Success, false);
+
+ if (ReferenceCount == 1)
+ {
+ result.Result = Flush();
+
+ if (result.Result.IsSuccess())
+ {
+ _isValid = false;
+ ReferenceCount = 0;
+ result.IsPrepared = true;
+ }
+ }
+
+ return result;
+ }
+
+ ///
+ /// Marks the as unprepared to cache a new block,
+ /// allowing to acquire it while iterating.
+ ///
+ public void UnprepareFetch()
+ {
+ Assert.NotNull(BufferedStorage);
+ Assert.NotNull(BufferedStorage.BufferManager);
+ Assert.True(NextIndex == InvalidIndex);
+ Assert.True(PrevIndex == InvalidIndex);
+ Assert.True(!IsValid());
+ Assert.True(!_isDirty);
+ Assert.True(Monitor.IsEntered(BufferedStorage.Locker));
+
+ _isValid = true;
+ ReferenceCount = 1;
+ }
+
+ ///
+ /// Reads the storage block containing the specified offset into this 's buffer.
+ ///
+ /// An offset in the block to fetch.
+ /// : The operation was successful.
+ /// : A buffer could not be allocated.
+ public Result Fetch(long offset)
+ {
+ Assert.NotNull(BufferedStorage);
+ Assert.NotNull(BufferedStorage.BufferManager);
+ Assert.True(NextIndex == InvalidIndex);
+ Assert.True(PrevIndex == InvalidIndex);
+ Assert.True(!IsValid());
+ Assert.True(!_isDirty);
+
+ Result rc;
+
+ // Make sure this Cache has an allocated buffer
+ if (MemoryRange.IsNull)
+ {
+ rc = AllocateFetchBuffer();
+ if (rc.IsFailure()) return rc;
+ }
+
+ CalcFetchParameter(out FetchParameter fetchParam, offset);
+
+ rc = BufferedStorage.BaseStorage.Read(fetchParam.Offset, fetchParam.Buffer);
+ if (rc.IsFailure()) return rc;
+
+ Offset = fetchParam.Offset;
+ Assert.True(Hits(offset, 1));
+
+ return Result.Success;
+ }
+
+ ///
+ /// Fills this 's buffer from an input buffer containing a block of data
+ /// read from the base .
+ ///
+ /// The start offset of the block in the base
+ /// that the data was read from.
+ /// A buffer containing the data read from the base .
+ /// : The operation was successful.
+ /// : A buffer could not be allocated.
+ public Result FetchFromBuffer(long offset, ReadOnlySpan buffer)
+ {
+ Assert.NotNull(BufferedStorage);
+ Assert.NotNull(BufferedStorage.BufferManager);
+ Assert.True(NextIndex == InvalidIndex);
+ Assert.True(PrevIndex == InvalidIndex);
+ Assert.True(!IsValid());
+ Assert.True(!_isDirty);
+ Assert.True(Alignment.IsAlignedPow2(offset, (uint)BufferedStorage.BlockSize));
+
+ // Make sure this Cache has an allocated buffer
+ if (MemoryRange.IsNull)
+ {
+ Result rc = AllocateFetchBuffer();
+ if (rc.IsFailure()) return rc;
+ }
+
+ CalcFetchParameter(out FetchParameter fetchParam, offset);
+ Assert.Equal(fetchParam.Offset, offset);
+ Assert.True(fetchParam.Buffer.Length <= buffer.Length);
+
+ buffer.Slice(0, fetchParam.Buffer.Length).CopyTo(fetchParam.Buffer);
+ Offset = fetchParam.Offset;
+ Assert.True(Hits(offset, 1));
+
+ return Result.Success;
+ }
+
+ ///
+ /// Tries to retrieve the cache's memory buffer from the .
+ ///
+ /// if the memory buffer was available.
+ /// if the buffer has been evicted from the cache.
+ public bool TryAcquireCache()
+ {
+ Assert.NotNull(BufferedStorage);
+ Assert.NotNull(BufferedStorage.BufferManager);
+ Assert.True(IsValid());
+
+ if (!MemoryRange.IsNull)
+ return true;
+
+ MemoryRange = BufferedStorage.BufferManager.AcquireCache(CacheHandle);
+ _isValid = !MemoryRange.IsNull;
+ return _isValid;
+ }
+
+ ///
+ /// Invalidates the data in this .
+ ///
+ public void Invalidate()
+ {
+ Assert.NotNull(BufferedStorage);
+ _isValid = false;
+ }
+
+ ///
+ /// Does this have a valid buffer or are there any references to this Cache?
+ ///
+ /// if this has a valid buffer
+ /// or if anybody currently has a reference to this Cache. Otherwise, .
+ public bool IsValid()
+ {
+ Assert.NotNull(BufferedStorage);
+
+ return _isValid || ReferenceCount > 0;
+ }
+
+ ///
+ /// Does this have modified data that needs
+ /// to be flushed to the base ?
+ ///
+ /// if this has unflushed data.
+ /// Otherwise, .
+ public bool IsDirty()
+ {
+ Assert.NotNull(BufferedStorage);
+
+ return _isDirty;
+ }
+
+ ///
+ /// Checks if the covers any of the specified range.
+ ///
+ /// The start offset of the range to check.
+ /// The size of the range to check.
+ /// if this 's range covers any of the input range.
+ /// Otherwise, .
+ public bool Hits(long offset, long size)
+ {
+ Assert.NotNull(BufferedStorage);
+
+ long blockSize = BufferedStorage.BlockSize;
+ return (offset < Offset + blockSize) && (Offset < offset + size);
+ }
+
+ ///
+ /// Allocates a buffer for this .
+ /// Should only be called when there is not already an allocated buffer.
+ ///
+ /// : The operation was successful.
+ /// : A buffer could not be allocated.
+ private Result AllocateFetchBuffer()
+ {
+ IBufferManager bufferManager = BufferedStorage.BufferManager;
+ Assert.True(bufferManager.AcquireCache(CacheHandle).IsNull);
+
+ Result rc = BufferManagerUtility.AllocateBufferUsingBufferManagerContext(out Buffer bufferTemp,
+ bufferManager, (int)BufferedStorage.BlockSize, new IBufferManager.BufferAttribute(),
+ static (in Buffer buffer) => !buffer.IsNull);
+
+ // Clear the current MemoryRange if allocation failed.
+ MemoryRange = rc.IsSuccess() ? bufferTemp : default;
+ return Result.Success;
+ }
+
+ ///
+ /// Calculates the parameters used to fetch the block containing the
+ /// specified offset in the base .
+ ///
+ /// When this function returns, contains
+ /// the parameters that can be used to fetch the block.
+ /// The offset to be fetched.
+ private void CalcFetchParameter(out FetchParameter fetchParam, long offset)
+ {
+ long blockSize = BufferedStorage.BlockSize;
+ long storageOffset = Alignment.AlignDownPow2(offset, (uint)BufferedStorage.BlockSize);
+ long baseSize = BufferedStorage.BaseStorageSize;
+ long remainingSize = baseSize - storageOffset;
+ long cacheSize = Math.Min(blockSize, remainingSize);
+ Span cacheBuffer = MemoryRange.Span.Slice(0, (int)cacheSize);
+
+ Assert.True(offset >= 0);
+ Assert.True(offset < baseSize);
+
+ fetchParam = new FetchParameter
+ {
+ Offset = storageOffset,
+ Buffer = cacheBuffer
+ };
+ }
+ }
+
+ ///
+ /// Allows iteration over the in a .
+ /// Several options exist for which Caches to iterate.
+ ///
+ private ref struct SharedCache
+ {
+ // ReSharper disable once MemberHidesStaticFromOuterClass
+ public Ref Cache { get; private set; }
+ private Ref StartCache { get; }
+ public BufferedStorage BufferedStorage { get; }
+
+ public SharedCache(BufferedStorage bufferedStorage)
+ {
+ Assert.NotNull(bufferedStorage);
+ Cache = default;
+ StartCache = new Ref(ref bufferedStorage.NextAcquireCache);
+ BufferedStorage = bufferedStorage;
+ }
+
+ public void Dispose()
+ {
+ lock (BufferedStorage.Locker)
+ {
+ Release();
+ }
+ }
+
+ ///
+ /// Moves to the next that contains data from the specified range.
+ ///
+ /// The start offset of the range.
+ /// The size of the range.
+ /// if a from the
+ /// specified range was found. if no matching Caches exist,
+ /// or if all matching Caches have already been iterated.
+ public bool AcquireNextOverlappedCache(long offset, long size)
+ {
+ Assert.NotNull(BufferedStorage);
+
+ bool isFirst = Cache.IsNull;
+ ref Cache start = ref isFirst ? ref StartCache.Value : ref Unsafe.Add(ref Cache.Value, 1);
+
+ // Make sure the Cache instance is in-range.
+ Assert.False(Unsafe.IsAddressLessThan(ref start,
+ ref MemoryMarshal.GetArrayDataReference(BufferedStorage.Caches)));
+
+ Assert.False(Unsafe.IsAddressGreaterThan(ref start,
+ ref Unsafe.Add(ref MemoryMarshal.GetArrayDataReference(BufferedStorage.Caches),
+ BufferedStorage.CacheCount)));
+
+ lock (BufferedStorage.Locker)
+ {
+ Release();
+ Assert.True(Cache.IsNull);
+
+ for (ref Cache cache = ref start; ; cache = ref Unsafe.Add(ref cache, 1))
+ {
+ // Wrap to the front of the list if we've reached the end.
+ ref Cache end = ref Unsafe.Add(ref MemoryMarshal.GetArrayDataReference(BufferedStorage.Caches),
+ BufferedStorage.CacheCount);
+ if (!Unsafe.IsAddressLessThan(ref cache, ref end))
+ {
+ cache = ref MemoryMarshal.GetArrayDataReference(BufferedStorage.Caches);
+ }
+
+ // Break if we've iterated all the Caches
+ if (!isFirst && Unsafe.AreSame(ref cache, ref StartCache.Value))
+ {
+ break;
+ }
+
+ if (cache.IsValid() && cache.Hits(offset, size) && cache.TryAcquireCache())
+ {
+ cache.Unlink();
+ Cache = new Ref(ref cache);
+ return true;
+ }
+
+ isFirst = false;
+ }
+
+ Cache = default;
+ return false;
+ }
+ }
+
+ ///
+ /// Moves to the next dirty .
+ ///
+ /// if a dirty was found.
+ /// if no dirty Caches exist,
+ /// or if all dirty Caches have already been iterated.
+ public bool AcquireNextDirtyCache()
+ {
+ Assert.NotNull(BufferedStorage);
+
+ ref Cache start = ref Cache.IsNull
+ ? ref MemoryMarshal.GetArrayDataReference(BufferedStorage.Caches)
+ : ref Unsafe.Add(ref Cache.Value, 1);
+
+ ref Cache end = ref Unsafe.Add(ref MemoryMarshal.GetArrayDataReference(BufferedStorage.Caches),
+ BufferedStorage.CacheCount);
+
+ // Validate the range.
+ Assert.False(Unsafe.IsAddressLessThan(ref start,
+ ref MemoryMarshal.GetArrayDataReference(BufferedStorage.Caches)));
+
+ Assert.False(Unsafe.IsAddressGreaterThan(ref start, ref end));
+
+ Release();
+ Assert.True(Cache.IsNull);
+
+ // Find the next dirty Cache
+ for (ref Cache cache = ref start;
+ Unsafe.IsAddressLessThan(ref cache, ref end);
+ cache = ref Unsafe.Add(ref cache, 1))
+ {
+ if (cache.IsValid() && cache.IsDirty() && cache.TryAcquireCache())
+ {
+ cache.Unlink();
+ Cache = new Ref(ref cache);
+ return true;
+ }
+ }
+
+ Cache = default;
+ return false;
+ }
+
+ ///
+ /// Moves to the next valid .
+ ///
+ /// if a valid was found.
+ /// if no valid Caches exist,
+ /// or if all valid Caches have already been iterated.
+ public bool AcquireNextValidCache()
+ {
+ Assert.NotNull(BufferedStorage);
+
+ ref Cache start = ref Cache.IsNull
+ ? ref MemoryMarshal.GetArrayDataReference(BufferedStorage.Caches)
+ : ref Unsafe.Add(ref Cache.Value, 1);
+
+ ref Cache end = ref Unsafe.Add(ref MemoryMarshal.GetArrayDataReference(BufferedStorage.Caches),
+ BufferedStorage.CacheCount);
+
+ // Validate the range.
+ Assert.False(Unsafe.IsAddressLessThan(ref start,
+ ref MemoryMarshal.GetArrayDataReference(BufferedStorage.Caches)));
+
+ Assert.False(Unsafe.IsAddressGreaterThan(ref start, ref end));
+
+ Release();
+ Assert.True(Cache.IsNull);
+
+ // Find the next valid Cache
+ for (ref Cache cache = ref start;
+ Unsafe.IsAddressLessThan(ref cache, ref end);
+ cache = ref Unsafe.Add(ref cache, 1))
+ {
+ if (cache.IsValid() && cache.TryAcquireCache())
+ {
+ cache.Unlink();
+ Cache = new Ref(ref cache);
+ return true;
+ }
+ }
+
+ Cache = default;
+ return false;
+ }
+
+ ///
+ /// Moves to a that can be used for
+ /// fetching a new block from the base .
+ ///
+ /// if a was acquired.
+ /// Otherwise, .
+ public bool AcquireFetchableCache()
+ {
+ Assert.NotNull(BufferedStorage);
+
+ lock (BufferedStorage.Locker)
+ {
+ Release();
+ Assert.True(Cache.IsNull);
+
+ Cache = new Ref(ref BufferedStorage.NextFetchCache);
+
+ if (!Cache.IsNull)
+ {
+ if (Cache.Value.IsValid())
+ Cache.Value.TryAcquireCache();
+
+ Cache.Value.Unlink();
+ }
+
+ return !Cache.IsNull;
+ }
+ }
+
+ ///
+ /// Reads from the current 's buffer.
+ /// The provided must be inside the block of
+ /// data held by the .
+ ///
+ /// The offset in the base to be read from.
+ /// The buffer in which to place the read data.
+ public void Read(long offset, Span buffer)
+ {
+ Assert.True(!Cache.IsNull);
+ Cache.Value.Read(offset, buffer);
+ }
+
+ ///
+ /// Buffers data to be written to the base when the current
+ /// is flushed. The provided
+ /// must be contained by the block of data held by the .
+ ///
+ /// The offset in the base to be written to.
+ /// The buffer containing the data to be written.
+ public void Write(long offset, ReadOnlySpan buffer)
+ {
+ Assert.True(!Cache.IsNull);
+ Cache.Value.Write(offset, buffer);
+ }
+
+ ///
+ /// If the current is dirty,
+ /// flushes its data to the base .
+ ///
+ /// The of the operation.
+ public Result Flush()
+ {
+ Assert.True(!Cache.IsNull);
+ return Cache.Value.Flush();
+ }
+
+ ///
+ /// Invalidates the data in the current .
+ /// Any dirty data will be discarded.
+ ///
+ public void Invalidate()
+ {
+ Assert.True(!Cache.IsNull);
+ Cache.Value.Invalidate();
+ }
+
+ ///
+ /// Checks if the current covers any of the specified range.
+ ///
+ /// The start offset of the range to check.
+ /// The size of the range to check.
+ /// if the current 's range
+ /// covers any of the input range. Otherwise, .
+ public bool Hits(long offset, long size)
+ {
+ Assert.True(!Cache.IsNull);
+ return Cache.Value.Hits(offset, size);
+ }
+
+ ///
+ /// Releases the current to return to the fetch list.
+ ///
+ private void Release()
+ {
+ if (!Cache.IsNull)
+ {
+ // Make sure the Cache instance is in-range.
+ Assert.False(Unsafe.IsAddressLessThan(ref Cache.Value,
+ ref MemoryMarshal.GetArrayDataReference(BufferedStorage.Caches)));
+
+ Assert.False(Unsafe.IsAddressGreaterThan(ref Cache.Value,
+ ref Unsafe.Add(ref MemoryMarshal.GetArrayDataReference(BufferedStorage.Caches),
+ BufferedStorage.CacheCount)));
+
+ Cache.Value.Link();
+ Cache = default;
+ }
+ }
+ }
+
+ ///
+ /// Provides exclusive access to a
+ /// entry in a .
+ ///
+ private ref struct UniqueCache
+ {
+ // ReSharper disable once MemberHidesStaticFromOuterClass
+ private Ref Cache { get; set; }
+ private BufferedStorage BufferedStorage { get; }
+
+ public UniqueCache(BufferedStorage bufferedStorage)
+ {
+ Assert.NotNull(bufferedStorage);
+ Cache = default;
+ BufferedStorage = bufferedStorage;
+ }
+
+ ///
+ /// Disposes the , releasing any held .
+ ///
+ public void Dispose()
+ {
+ if (!Cache.IsNull)
+ {
+ lock (BufferedStorage.Locker)
+ {
+ Cache.Value.UnprepareFetch();
+ }
+ }
+ }
+
+ ///
+ /// Attempts to gain exclusive access to the held by
+ /// and prepare it to read a new block from the base .
+ ///
+ /// The to gain exclusive access to.
+ /// The of the operation, and if exclusive
+ /// access to the was gained; if not.
+ public (Result Result, bool wasUpgradeSuccessful) Upgrade(in SharedCache sharedCache)
+ {
+ Assert.True(BufferedStorage == sharedCache.BufferedStorage);
+ Assert.True(!sharedCache.Cache.IsNull);
+
+ lock (BufferedStorage.Locker)
+ {
+ (Result Result, bool wasUpgradeSuccessful) result = sharedCache.Cache.Value.PrepareFetch();
+
+ if (result.Result.IsSuccess() && result.wasUpgradeSuccessful)
+ Cache = sharedCache.Cache;
+
+ return result;
+ }
+ }
+
+ ///
+ /// Reads the storage block containing the specified offset into the
+ /// 's buffer, and sets the Cache to that offset.
+ ///
+ /// An offset in the block to fetch.
+ /// : The operation was successful.
+ /// : A buffer could not be allocated.
+ public Result Fetch(long offset)
+ {
+ Assert.True(!Cache.IsNull);
+
+ return Cache.Value.Fetch(offset);
+ }
+
+ ///
+ /// Fills the 's buffer from an input buffer containing a block of data
+ /// read from the base , and sets the Cache to that offset.
+ ///
+ /// The start offset of the block in the base
+ /// that the data was read from.
+ /// A buffer containing the data read from the base .
+ /// : The operation was successful.
+ /// : A buffer could not be allocated.
+ public Result FetchFromBuffer(long offset, ReadOnlySpan buffer)
+ {
+ Assert.True(!Cache.IsNull);
+
+ return Cache.Value.FetchFromBuffer(offset, buffer);
+ }
+ }
+
+ private SubStorage BaseStorage { get; set; }
+ private IBufferManager BufferManager { get; set; }
+ private long BlockSize { get; set; }
+
+ private long _baseStorageSize;
+ private long BaseStorageSize
+ {
+ get => _baseStorageSize;
+ set => _baseStorageSize = value;
+ }
+
+ private Cache[] Caches { get; set; }
+ private int CacheCount { get; set; }
+ private int NextAcquireCacheIndex { get; set; }
+ private int NextFetchCacheIndex { get; set; }
+ private object Locker { get; } = new();
+ private bool BulkReadEnabled { get; set; }
+
+ ///
+ /// The at which new s will begin iterating.
+ ///
+ private ref Cache NextAcquireCache => ref Caches[NextAcquireCacheIndex];
+
+ ///
+ /// A list of s that can be used for fetching
+ /// new blocks of data from the base .
+ ///
+ private ref Cache NextFetchCache => ref Caches[NextFetchCacheIndex];
+
+ ///
+ /// Creates an uninitialized .
+ ///
+ public BufferedStorage()
+ {
+ NextAcquireCacheIndex = InvalidIndex;
+ NextFetchCacheIndex = InvalidIndex;
+ }
+
+ ///
+ /// Disposes the , flushing any cached data.
+ ///
+ ///
+ protected override void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ FinalizeObject();
+ }
+
+ base.Dispose(disposing);
+ }
+
+ ///
+ /// Initializes the .
+ /// Calling this method again afterwards will flush the current cache and
+ /// reinitialize the with the new parameters.
+ ///
+ /// The base storage to use.
+ /// The buffer manager used to allocate and cache memory.
+ /// The size of each cached block. Must be a power of 2.
+ /// The maximum number of blocks that can be cached at one time.
+ ///
+ public Result Initialize(SubStorage baseStorage, IBufferManager bufferManager, int blockSize, int cacheCount)
+ {
+ Assert.NotNull(baseStorage);
+ Assert.NotNull(bufferManager);
+ Assert.True(blockSize > 0);
+ Assert.True(BitUtil.IsPowerOfTwo(blockSize));
+ Assert.True(cacheCount > 0);
+
+ // Get the base storage size.
+ Result rc = baseStorage.GetSize(out _baseStorageSize);
+ if (rc.IsFailure()) return rc;
+
+ // Set members.
+ BaseStorage = baseStorage;
+ BufferManager = bufferManager;
+ BlockSize = blockSize;
+ CacheCount = cacheCount;
+
+ // Allocate the caches.
+ if (Caches != null)
+ {
+ for (int i = 0; i < Caches.Length; i++)
+ {
+ Caches[i].FinalizeObject();
+ }
+ }
+
+ Caches = new Cache[cacheCount];
+ if (Caches == null)
+ {
+ return ResultFs.AllocationFailureInBufferedStorageA.Log();
+ }
+
+ // Initialize the caches.
+ for (int i = 0; i < Caches.Length; i++)
+ {
+ Caches[i].Initialize(this, i);
+ }
+
+ NextAcquireCacheIndex = 0;
+ return Result.Success;
+ }
+
+ ///
+ /// Finalizes this , flushing all buffers and leaving it in an uninitialized state.
+ ///
+ public void FinalizeObject()
+ {
+ BaseStorage = null;
+ BaseStorageSize = 0;
+
+ foreach (Cache cache in Caches)
+ {
+ cache.Dispose();
+ }
+
+ Caches = null;
+ CacheCount = 0;
+ NextFetchCacheIndex = InvalidIndex;
+ }
+
+ ///
+ /// Has this been initialized?
+ ///
+ /// if this is initialized.
+ /// Otherwise, .
+ public bool IsInitialized() => Caches != null;
+
+ protected override Result DoRead(long offset, Span destination)
+ {
+ Assert.True(IsInitialized());
+
+ // Succeed if zero size.
+ if (destination.Length == 0)
+ return Result.Success;
+
+ // Do the read.
+ return ReadCore(offset, destination);
+ }
+
+ protected override Result DoWrite(long offset, ReadOnlySpan source)
+ {
+ Assert.True(IsInitialized());
+
+ // Succeed if zero size.
+ if (source.Length == 0)
+ return Result.Success;
+
+ // Do the read.
+ return WriteCore(offset, source);
+ }
+
+ protected override Result DoGetSize(out long size)
+ {
+ Assert.True(IsInitialized());
+
+ size = BaseStorageSize;
+ return Result.Success;
+ }
+
+ protected override Result DoSetSize(long size)
+ {
+ Assert.True(IsInitialized());
+
+ Result rc;
+ long prevSize = BaseStorageSize;
+ if (prevSize < size)
+ {
+ // Prepare to expand.
+ if (!Alignment.IsAlignedPow2(prevSize, (uint)BlockSize))
+ {
+ using var cache = new SharedCache(this);
+ long invalidateOffset = prevSize;
+ long invalidateSize = size - prevSize;
+
+ if (cache.AcquireNextOverlappedCache(invalidateOffset, invalidateSize))
+ {
+ rc = cache.Flush();
+ if (rc.IsFailure()) return rc;
+
+ cache.Invalidate();
+ }
+
+ Assert.True(!cache.AcquireNextOverlappedCache(invalidateOffset, invalidateSize));
+ }
+ }
+ else if (size < prevSize)
+ {
+ // Prepare to shrink.
+ using var cache = new SharedCache(this);
+ long invalidateOffset = prevSize;
+ long invalidateSize = size - prevSize;
+ bool isFragment = Alignment.IsAlignedPow2(size, (uint)BlockSize);
+
+ while (cache.AcquireNextOverlappedCache(invalidateOffset, invalidateSize))
+ {
+ if (isFragment && cache.Hits(invalidateOffset, 1))
+ {
+ rc = cache.Flush();
+ if (rc.IsFailure()) return rc;
+ }
+
+ cache.Invalidate();
+ }
+ }
+
+ // Set the size.
+ rc = BaseStorage.SetSize(size);
+ if (rc.IsFailure()) return rc;
+
+ // Get our new size.
+ rc = BaseStorage.GetSize(out long newSize);
+ if (rc.IsFailure()) return rc;
+
+ BaseStorageSize = newSize;
+ return Result.Success;
+ }
+
+ protected override Result DoOperateRange(Span outBuffer, OperationId operationId, long offset, long size,
+ ReadOnlySpan inBuffer)
+ {
+ Assert.True(IsInitialized());
+
+ // Invalidate caches if needed.
+ if (operationId == OperationId.InvalidateCache)
+ {
+ using var cache = new SharedCache(this);
+
+ while (cache.AcquireNextOverlappedCache(offset, size))
+ cache.Invalidate();
+ }
+
+ return BaseStorage.OperateRange(outBuffer, operationId, offset, size, inBuffer);
+ }
+
+ protected override Result DoFlush()
+ {
+ Assert.True(IsInitialized());
+
+ // Flush caches.
+ using var cache = new SharedCache(this);
+ while (cache.AcquireNextDirtyCache())
+ {
+ Result flushResult = cache.Flush();
+ if (flushResult.IsFailure()) return flushResult;
+ }
+
+ // Flush the base storage.
+ return BaseStorage.Flush();
+ }
+
+ ///
+ /// Invalidates all cached data. Any unflushed data will be discarded.
+ ///
+ public void InvalidateCaches()
+ {
+ Assert.True(IsInitialized());
+
+ using var cache = new SharedCache(this);
+ while (cache.AcquireNextValidCache())
+ cache.Invalidate();
+ }
+
+ ///
+ /// Gets the used by this .
+ ///
+ /// The buffer manager.
+ public IBufferManager GetBufferManager() => BufferManager;
+
+ public void EnableBulkRead() => BulkReadEnabled = true;
+
+ ///
+ /// Flushes the cache to the base if less than 1/8 of the
+ /// 's space can be used for allocation.
+ ///
+ /// The of the operation.
+ private Result PrepareAllocation()
+ {
+ uint flushThreshold = (uint)BufferManager.GetTotalSize() / 8;
+
+ if (BufferManager.GetTotalAllocatableSize() < flushThreshold)
+ {
+ Result rc = Flush();
+ if (rc.IsFailure()) return rc;
+ }
+
+ return Result.Success;
+ }
+
+ ///
+ /// Flushes all dirty caches if less than 25% of the space
+ /// in the is allocatable.
+ ///
+ ///
+ private Result ControlDirtiness()
+ {
+ uint flushThreshold = (uint)BufferManager.GetTotalSize() / 4;
+
+ if (BufferManager.GetTotalAllocatableSize() < flushThreshold)
+ {
+ using var cache = new SharedCache(this);
+ int dirtyCount = 0;
+
+ while (cache.AcquireNextDirtyCache())
+ {
+ if (++dirtyCount > 1)
+ {
+ Result rc = cache.Flush();
+ if (rc.IsFailure()) return rc;
+
+ cache.Invalidate();
+ }
+ }
+ }
+
+ return Result.Success;
+ }
+
+ ///
+ /// Reads data from the base into the destination buffer.
+ ///
+ /// The offset in the at which to begin reading.
+ /// The buffer where the read bytes will be stored.
+ /// The number of bytes read will be equal to the length of the buffer.
+ /// The of the operation.
+ private Result ReadCore(long offset, Span destination)
+ {
+ Assert.NotNull(Caches);
+
+ // Validate the offset.
+ long baseStorageSize = BaseStorageSize;
+ if (offset < 0 || offset > baseStorageSize)
+ return ResultFs.InvalidOffset.Log();
+
+ // Setup tracking variables.
+ long remainingSize = Math.Min(destination.Length, baseStorageSize - offset);
+ long currentOffset = offset;
+ long bufferOffset = 0;
+
+ // Try doing a bulk read if enabled.
+ //
+ // The behavior of which blocks are cached should be the same between bulk reads and non-bulk reads.
+ // If the head and tail offsets of the range to be read are not aligned to block boundaries, those
+ // head and/or tail partial blocks will end up in the cache if doing a non-bulk read.
+ //
+ // This is imitated during bulk reads by tracking if there are any partial head or tail blocks that aren't
+ // already in the cache. After the bulk read is complete these partial blocks will be added to the cache.
+ if (BulkReadEnabled)
+ {
+ // Read any blocks at the head of the range that are cached.
+ bool headCacheNeeded =
+ ReadHeadCache(ref currentOffset, destination, ref remainingSize, ref bufferOffset);
+ if (remainingSize == 0) return Result.Success;
+
+ // Read any blocks at the tail of the range that are cached.
+ bool tailCacheNeeded = ReadTailCache(currentOffset, destination, ref remainingSize, bufferOffset);
+ if (remainingSize == 0) return Result.Success;
+
+ // Perform bulk reads.
+ const long bulkReadSizeMax = 1024 * 1024 * 2; // 2 MB
+
+ if (remainingSize < bulkReadSizeMax)
+ {
+ // Try to do a bulk read.
+ Result rc = BulkRead(currentOffset, destination.Slice((int)bufferOffset, (int)remainingSize),
+ headCacheNeeded, tailCacheNeeded);
+
+ // If the read fails due to insufficient pooled buffer size,
+ // then we want to fall back to the normal read path.
+ if (!ResultFs.AllocationFailurePooledBufferNotEnoughSize.Includes(rc))
+ return rc;
+ }
+ }
+
+ // Repeatedly read until we're done.
+ while (remainingSize > 0)
+ {
+ // Determine how much to read this iteration.
+ int currentSize;
+
+ // If the offset is in the middle of a block. Read the remaining part of that block.
+ if (!Alignment.IsAlignedPow2(currentOffset, (uint)BlockSize))
+ {
+ long alignedSize = BlockSize - (currentOffset & (BlockSize - 1));
+ currentSize = (int)Math.Min(alignedSize, remainingSize);
+ }
+ // If we only have a partial block left to read, read that partial block.
+ else if (remainingSize < BlockSize)
+ {
+ currentSize = (int)remainingSize;
+ }
+ // We have at least one full block to read. Read all the remaining full blocks at once.
+ else
+ {
+ currentSize = (int)Alignment.AlignDownPow2(remainingSize, (uint)BlockSize);
+ }
+
+ Span currentDestination = destination.Slice((int)bufferOffset, currentSize);
+
+ // If reading a single block or less, read it using the cache
+ if (currentSize <= BlockSize)
+ {
+ using var cache = new SharedCache(this);
+
+ // Get the cache for our current block
+ if (!cache.AcquireNextOverlappedCache(currentOffset, currentSize))
+ {
+ // The block wasn't in the cache. Read the block from the base storage
+ Result rc = PrepareAllocation();
+ if (rc.IsFailure()) return rc;
+
+ // Loop until we can get exclusive access to the cache block
+ while (true)
+ {
+ if (!cache.AcquireFetchableCache())
+ return ResultFs.OutOfResource.Log();
+
+ // Try to upgrade out SharedCache to a UniqueCache
+ using var fetchCache = new UniqueCache(this);
+ (Result Result, bool wasUpgradeSuccessful) upgradeResult = fetchCache.Upgrade(in cache);
+ if (upgradeResult.Result.IsFailure())
+ return upgradeResult.Result;
+
+ // Fetch the data from the base storage into the cache buffer if successful
+ if (upgradeResult.wasUpgradeSuccessful)
+ {
+ rc = fetchCache.Fetch(currentOffset);
+ if (rc.IsFailure()) return rc;
+
+ break;
+ }
+ }
+
+ rc = ControlDirtiness();
+ if (rc.IsFailure()) return rc;
+ }
+
+ // Copy the data from the cache buffer to the destination buffer
+ cache.Read(currentOffset, currentDestination);
+ }
+ // If reading multiple blocks, flush the cache entries for all those blocks and
+ // read directly from the base storage into the destination buffer in a single read.
+ else
+ {
+ // Flush all the cache blocks in the storage range being read
+ using (var cache = new SharedCache(this))
+ {
+ while (cache.AcquireNextOverlappedCache(currentOffset, currentSize))
+ {
+ Result rc = cache.Flush();
+ if (rc.IsFailure()) return rc;
+
+ cache.Invalidate();
+ }
+ }
+
+ // Read directly from the base storage to the destination buffer
+ Result rcRead = BaseStorage.Read(currentOffset, currentDestination);
+ if (rcRead.IsFailure()) return rcRead;
+ }
+
+ remainingSize -= currentSize;
+ currentOffset += currentSize;
+ bufferOffset += currentSize;
+ }
+
+ return Result.Success;
+ }
+
+ ///
+ /// Reads as much data into the beginning of the buffer that can be found in the cache. Returns
+ /// if the next uncached data to read from the base
+ /// is not aligned to the beginning of a block.
+ ///
+ /// The storage offset at which to begin reading. When this function returns, contains
+ /// the new offset at which to begin reading if any data was read by this function.
+ /// The buffer to read data into.
+ /// The size of the data to read. When this function returns, contains the new size
+ /// if any data was read by this function.
+ /// The offset of the buffer to begin writing data to. When this function returns,
+ /// contains the new offset to write data to if any data was read by this function.
+ /// if the remaining data to read contains a partial block at the start.
+ /// Otherwise, .
+ private bool ReadHeadCache(ref long offset, Span buffer, ref long size, ref long bufferOffset)
+ {
+ bool isCacheNeeded = !Alignment.IsAlignedPow2(offset, (uint)BlockSize);
+
+ while (size > 0)
+ {
+ long currentSize;
+
+ if (!Alignment.IsAlignedPow2(offset, (uint)BlockSize))
+ {
+ long alignedSize = Alignment.AlignUpPow2(offset, (uint)BlockSize) - offset;
+ currentSize = Math.Min(alignedSize, size);
+ }
+ else if (size < BlockSize)
+ {
+ currentSize = size;
+ }
+ else
+ {
+ currentSize = BlockSize;
+ }
+
+ using var cache = new SharedCache(this);
+
+ if (!cache.AcquireNextOverlappedCache(offset, currentSize))
+ break;
+
+ cache.Read(offset, buffer.Slice((int)bufferOffset, (int)currentSize));
+ offset += currentSize;
+ bufferOffset += currentSize;
+ size -= currentSize;
+ isCacheNeeded = false;
+ }
+
+ return isCacheNeeded;
+ }
+
+ private bool ReadTailCache(long offset, Span buffer, ref long size, long bufferOffset)
+ {
+ bool isCacheNeeded = !Alignment.IsAlignedPow2(offset + size, (uint)BlockSize);
+
+ while (size > 0)
+ {
+ long currentOffsetEnd = offset + size;
+ long currentSize;
+
+ if (!Alignment.IsAlignedPow2(currentOffsetEnd, (uint)BlockSize))
+ {
+ long alignedSize = currentOffsetEnd - Alignment.AlignDownPow2(currentOffsetEnd, (uint)BlockSize);
+ currentSize = Math.Min(alignedSize, size);
+ }
+ else if (size < BlockSize)
+ {
+ currentSize = size;
+ }
+ else
+ {
+ currentSize = BlockSize;
+ }
+
+ long currentOffset = currentOffsetEnd - currentSize;
+ Assert.True(currentOffset >= 0);
+
+ using var cache = new SharedCache(this);
+
+ if (!cache.AcquireNextOverlappedCache(currentOffset, currentSize))
+ break;
+
+ int currentBufferOffset = (int)(bufferOffset + currentOffset - offset);
+ cache.Read(currentOffset, buffer.Slice(currentBufferOffset, (int)currentSize));
+ size -= currentSize;
+ isCacheNeeded = false;
+ }
+
+ return isCacheNeeded;
+ }
+
+ ///
+ /// Reads directly from the base to the destination
+ /// using a single read.
+ ///
+ /// The offset at which to begin reading
+ /// The buffer where the read bytes will be stored.
+ /// The number of bytes read will be equal to the length of the buffer.
+ /// Should the head block of the read data be cached?
+ /// Should the tail block of the read data be cached?
+ /// The of the operation.
+ private Result BulkRead(long offset, Span buffer, bool isHeadCacheNeeded, bool isTailCacheNeeded)
+ {
+ Result rc;
+
+ // Determine aligned extents.
+ long alignedOffset = Alignment.AlignDownPow2(offset, (uint)BlockSize);
+ long alignedOffsetEnd = Math.Min(Alignment.AlignUpPow2(offset + buffer.Length, (uint)BlockSize),
+ BaseStorageSize);
+ long alignedSize = alignedOffsetEnd - alignedOffset;
+
+ // Allocate a work buffer if either the head or tail of the range isn't aligned.
+ // Otherwise directly use the output buffer.
+ Span workBuffer;
+ using var pooledBuffer = new PooledBuffer();
+
+ if (offset == alignedOffset && buffer.Length == alignedSize)
+ {
+ workBuffer = buffer;
+ }
+ else
+ {
+ pooledBuffer.AllocateParticularlyLarge((int)alignedSize, 1);
+ if (pooledBuffer.GetSize() < alignedSize)
+ return ResultFs.AllocationFailurePooledBufferNotEnoughSize.Log();
+
+ workBuffer = pooledBuffer.GetBuffer();
+ }
+
+ // Ensure cache is coherent.
+ using (var cache = new SharedCache(this))
+ {
+ while (cache.AcquireNextOverlappedCache(alignedOffset, alignedSize))
+ {
+ rc = cache.Flush();
+ if (rc.IsFailure()) return rc;
+
+ cache.Invalidate();
+ }
+ }
+
+ // Read from the base storage.
+ rc = BaseStorage.Read(alignedOffset, workBuffer.Slice(0, (int)alignedSize));
+ if (rc.IsFailure()) return rc;
+ if (workBuffer != buffer)
+ {
+ workBuffer.Slice((int)(offset - alignedOffset), buffer.Length).CopyTo(buffer);
+ }
+
+ bool cached = false;
+
+ // Cache the head block if needed.
+ if (isHeadCacheNeeded)
+ {
+ rc = PrepareAllocation();
+ if (rc.IsFailure()) return rc;
+
+ using var cache = new SharedCache(this);
+ while (true)
+ {
+ if (!cache.AcquireFetchableCache())
+ return ResultFs.OutOfResource.Log();
+
+ using var fetchCache = new UniqueCache(this);
+ (Result Result, bool wasUpgradeSuccessful) upgradeResult = fetchCache.Upgrade(in cache);
+ if (upgradeResult.Result.IsFailure())
+ return upgradeResult.Result;
+
+ if (upgradeResult.wasUpgradeSuccessful)
+ {
+ rc = fetchCache.FetchFromBuffer(alignedOffset, workBuffer.Slice(0, (int)alignedSize));
+ if (rc.IsFailure()) return rc;
+ break;
+ }
+ }
+
+ cached = true;
+ }
+
+ // Cache the tail block if needed.
+ if (isTailCacheNeeded && (!isHeadCacheNeeded || alignedSize > BlockSize))
+ {
+ if (!cached)
+ {
+ rc = PrepareAllocation();
+ if (rc.IsFailure()) return rc;
+ }
+
+ using var cache = new SharedCache(this);
+ while (true)
+ {
+ if (!cache.AcquireFetchableCache())
+ return ResultFs.OutOfResource.Log();
+
+ using var fetchCache = new UniqueCache(this);
+ (Result Result, bool wasUpgradeSuccessful) upgradeResult = fetchCache.Upgrade(in cache);
+ if (upgradeResult.Result.IsFailure())
+ return upgradeResult.Result;
+
+ if (upgradeResult.wasUpgradeSuccessful)
+ {
+ long tailCacheOffset = Alignment.AlignDownPow2(offset + buffer.Length, (uint)BlockSize);
+ long tailCacheSize = alignedSize - tailCacheOffset + alignedOffset;
+
+ rc = fetchCache.FetchFromBuffer(tailCacheOffset,
+ workBuffer.Slice((int)(tailCacheOffset - alignedOffset), (int)tailCacheSize));
+ if (rc.IsFailure()) return rc;
+ break;
+ }
+ }
+ }
+
+ if (cached)
+ {
+ rc = ControlDirtiness();
+ if (rc.IsFailure()) return rc;
+ }
+
+ return Result.Success;
+ }
+
+ private Result WriteCore(long offset, ReadOnlySpan source)
+ {
+ Assert.NotNull(Caches);
+
+ // Validate the offset.
+ long baseStorageSize = BaseStorageSize;
+
+ if (offset < 0 || baseStorageSize < offset)
+ return ResultFs.InvalidOffset.Log();
+
+ // Setup tracking variables.
+ int remainingSize = (int)Math.Min(source.Length, baseStorageSize - offset);
+ long currentOffset = offset;
+ int bufferOffset = 0;
+
+ // Repeatedly read until we're done.
+ while (remainingSize > 0)
+ {
+ // Determine how much to read this iteration.
+ ReadOnlySpan currentSource = source.Slice(bufferOffset);
+ int currentSize;
+
+ if (!Alignment.IsAlignedPow2(currentOffset, (uint)BlockSize))
+ {
+ int alignedSize = (int)(BlockSize - (currentOffset & (BlockSize - 1)));
+ currentSize = Math.Min(alignedSize, remainingSize);
+ }
+ else if (remainingSize < BlockSize)
+ {
+ currentSize = remainingSize;
+ }
+ else
+ {
+ currentSize = Alignment.AlignDownPow2(remainingSize, (uint)BlockSize);
+ }
+
+ Result rc;
+ if (currentSize < BlockSize)
+ {
+ using var cache = new SharedCache(this);
+
+ if (!cache.AcquireNextOverlappedCache(currentOffset, currentSize))
+ {
+ rc = PrepareAllocation();
+ if (rc.IsFailure()) return rc;
+
+ while (true)
+ {
+ if (!cache.AcquireFetchableCache())
+ return ResultFs.OutOfResource.Log();
+
+ using var fetchCache = new UniqueCache(this);
+ (Result Result, bool wasUpgradeSuccessful) upgradeResult = fetchCache.Upgrade(in cache);
+ if (upgradeResult.Result.IsFailure())
+ return upgradeResult.Result;
+
+ if (upgradeResult.wasUpgradeSuccessful)
+ {
+ rc = fetchCache.Fetch(currentOffset);
+ if (rc.IsFailure()) return rc;
+ break;
+ }
+ }
+ }
+ cache.Write(currentOffset, currentSource.Slice(0, currentSize));
+
+ BufferManagerUtility.EnableBlockingBufferManagerAllocation();
+
+ rc = ControlDirtiness();
+ if (rc.IsFailure()) return rc;
+ }
+ else
+ {
+ using (var cache = new SharedCache(this))
+ {
+ while (cache.AcquireNextOverlappedCache(currentOffset, currentSize))
+ {
+ rc = cache.Flush();
+ if (rc.IsFailure()) return rc;
+
+ cache.Invalidate();
+ }
+ }
+
+ rc = BaseStorage.Write(currentOffset, currentSource.Slice(0, currentSize));
+ if (rc.IsFailure()) return rc;
+
+ BufferManagerUtility.EnableBlockingBufferManagerAllocation();
+ }
+
+ remainingSize -= currentSize;
+ currentOffset += currentSize;
+ bufferOffset += currentSize;
+ }
+
+ return Result.Success;
+ }
+ }
+}
diff --git a/src/LibHac/Util/Alignment.cs b/src/LibHac/Util/Alignment.cs
index e6726bc3..c5f72718 100644
--- a/src/LibHac/Util/Alignment.cs
+++ b/src/LibHac/Util/Alignment.cs
@@ -25,7 +25,6 @@ namespace LibHac.Util
return (value & ~invMask);
}
- [MethodImpl(MethodImplOptions.NoInlining)]
public static bool IsAlignedPow2(ulong value, uint alignment)
{
Assert.True(BitUtil.IsPowerOfTwo(alignment));
@@ -34,11 +33,6 @@ namespace LibHac.Util
return (value & invMask) == 0;
}
- public static bool IsAlignedPow2(Span buffer, uint alignment)
- {
- return IsAlignedPow2(buffer, alignment);
- }
-
public static bool IsAlignedPow2(ReadOnlySpan buffer, uint alignment)
{
return IsAlignedPow2(ref MemoryMarshal.GetReference(buffer), alignment);
diff --git a/tests/LibHac.Tests/Fs/StorageTester.cs b/tests/LibHac.Tests/Fs/StorageTester.cs
new file mode 100644
index 00000000..4f0c171c
--- /dev/null
+++ b/tests/LibHac.Tests/Fs/StorageTester.cs
@@ -0,0 +1,265 @@
+using System;
+using System.IO;
+using System.Linq;
+using LibHac.Fs;
+
+namespace LibHac.Tests.Fs
+{
+ public class StorageTester
+ {
+ private Random _random;
+ private byte[][] _backingArrays;
+ private byte[][] _buffers;
+ private int _size;
+
+ private int[] _frequentAccessOffsets;
+ private int _lastAccessEnd;
+ private int _totalAccessCount;
+ private Configuration _config;
+
+ public class Configuration
+ {
+ public Entry[] Entries { get; set; }
+ public int[] SizeClassProbs { get; set; }
+ public int[] SizeClassMaxSizes { get; set; }
+ public int[] TaskProbs { get; set; }
+ public int[] AccessTypeProbs { get; set; }
+ public ulong RngSeed { get; set; }
+ public int FrequentAccessBlockCount { get; set; }
+ }
+
+ public StorageTester(Configuration config)
+ {
+ Entry[] entries = config.Entries;
+
+ if (entries.Length < 2)
+ {
+ throw new ArgumentException("At least 2 storage entries must be provided", nameof(config.Entries));
+ }
+
+ if (entries.Select(x => x.BackingArray.Length).Distinct().Count() != 1)
+ {
+ throw new ArgumentException("All storages must have the same size.", nameof(config.Entries));
+ }
+
+ if (entries[0].BackingArray.Length == 0)
+ {
+ throw new ArgumentException("The storage size must be greater than 0.", nameof(config.Entries));
+ }
+
+ _config = config;
+ _random = new Random(config.RngSeed);
+
+ _backingArrays = entries.Select(x => x.BackingArray).ToArray();
+
+ _buffers = new byte[entries.Length][];
+ for (int i = 0; i < entries.Length; i++)
+ {
+ _buffers[i] = new byte[config.SizeClassMaxSizes[^1]];
+ }
+
+ _size = entries[0].BackingArray.Length;
+ _lastAccessEnd = 0;
+
+ _frequentAccessOffsets = new int[config.FrequentAccessBlockCount];
+ for (int i = 0; i < _frequentAccessOffsets.Length; i++)
+ {
+ _frequentAccessOffsets[i] = ChooseOffset(AccessType.Random);
+ }
+ }
+
+ //public StorageTester(ulong rngSeed, int frequentAccessBlockCount, params Entry[] entries)
+ //{
+ // if (entries.Length < 2)
+ // {
+ // throw new ArgumentException("At least 2 storage entries must be provided", nameof(entries));
+ // }
+
+ // if (entries.Select(x => x.BackingArray.Length).Distinct().Count() != 1)
+ // {
+ // throw new ArgumentException("All storages must have the same size.", nameof(entries));
+ // }
+
+ // if (entries[0].BackingArray.Length == 0)
+ // {
+ // throw new ArgumentException("The storage size must be greater than 0.", nameof(entries));
+ // }
+
+ // _random = new Random(rngSeed);
+
+ // _entries = entries;
+ // _backingArrays = entries.Select(x => x.BackingArray).ToArray();
+
+ // _buffers = new byte[entries.Length][];
+ // for (int i = 0; i < entries.Length; i++)
+ // {
+ // _buffers[i] = new byte[SizeClassMaxSizes[^1]];
+ // }
+
+ // _size = _entries[0].BackingArray.Length;
+ // _lastAccessEnd = 0;
+
+ // _frequentAccessOffsets = new int[frequentAccessBlockCount];
+ // for (int i = 0; i < _frequentAccessOffsets.Length; i++)
+ // {
+ // _frequentAccessOffsets[i] = ChooseOffset(AccessType.Random);
+ // }
+ //}
+
+ public void Run(long accessCount)
+ {
+ long endCount = _totalAccessCount + accessCount;
+
+ while (_totalAccessCount < endCount)
+ {
+ Task task = ChooseTask();
+ switch (task)
+ {
+ case Task.Read:
+ RunRead();
+ break;
+ case Task.Write:
+ RunWrite();
+ break;
+ case Task.Flush:
+ RunFlush();
+ break;
+ }
+
+ _totalAccessCount++;
+ }
+ }
+
+ private void RunRead()
+ {
+ int sizeClass = ChooseSizeClass();
+ AccessType accessType = ChooseAccessType();
+ int offset = ChooseOffset(accessType);
+ int size = ChooseSize(offset, sizeClass);
+
+ for (int i = 0; i < _config.Entries.Length; i++)
+ {
+ Entry entry = _config.Entries[i];
+ entry.Storage.Read(offset, _buffers[i].AsSpan(0, size)).ThrowIfFailure();
+ }
+
+ if (!CompareBuffers(_buffers, size))
+ {
+ throw new InvalidDataException($"Read: Offset {offset}; Size {size}");
+ }
+ }
+
+ private void RunWrite()
+ {
+ int sizeClass = ChooseSizeClass();
+ AccessType accessType = ChooseAccessType();
+ int offset = ChooseOffset(accessType);
+ int size = ChooseSize(offset, sizeClass);
+
+ Span buffer = _buffers[0].AsSpan(0, size);
+ _random.NextBytes(buffer);
+
+ for (int i = 0; i < _config.Entries.Length; i++)
+ {
+ Entry entry = _config.Entries[i];
+ entry.Storage.Write(offset, buffer).ThrowIfFailure();
+ }
+ }
+
+ private void RunFlush()
+ {
+ foreach (Entry entry in _config.Entries)
+ {
+ entry.Storage.Flush().ThrowIfFailure();
+ }
+
+ if (!CompareBuffers(_backingArrays, _size))
+ {
+ throw new InvalidDataException("Flush");
+ }
+ }
+
+ private Task ChooseTask() => (Task)ChooseProb(_config.TaskProbs);
+ private int ChooseSizeClass() => ChooseProb(_config.SizeClassProbs);
+ private AccessType ChooseAccessType() => (AccessType)ChooseProb(_config.AccessTypeProbs);
+
+ private int ChooseOffset(AccessType type) => type switch
+ {
+ AccessType.Random => _random.Next(0, _size),
+ AccessType.Sequential => _lastAccessEnd == _size ? 0 : _lastAccessEnd,
+ AccessType.FrequentBlock => _frequentAccessOffsets[_random.Next(0, _frequentAccessOffsets.Length)],
+ _ => 0
+ };
+
+ private int ChooseSize(int offset, int sizeClass)
+ {
+ int availableSize = Math.Max(0, _size - offset);
+ int randSize = _random.Next(0, _config.SizeClassMaxSizes[sizeClass]);
+ return Math.Min(availableSize, randSize);
+ }
+
+ private int ChooseProb(int[] weights)
+ {
+ int total = 0;
+ foreach (int weight in weights)
+ {
+ total += weight;
+ }
+
+ int rand = _random.Next(0, total);
+ int currentThreshold = 0;
+
+ for (int i = 0; i < weights.Length; i++)
+ {
+ currentThreshold += weights[i];
+
+ if (rand < currentThreshold)
+ return i;
+ }
+
+ return 0;
+ }
+
+ private bool CompareBuffers(byte[][] buffers, int size)
+ {
+ Span baseBuffer = buffers[0].AsSpan(0, size);
+
+ for (int i = 1; i < buffers.Length; i++)
+ {
+ Span testBuffer = buffers[i].AsSpan(0, size);
+ if (!baseBuffer.SequenceEqual(testBuffer))
+ {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ public readonly struct Entry
+ {
+ public readonly IStorage Storage;
+ public readonly byte[] BackingArray;
+
+ public Entry(IStorage storage, byte[] backingArray)
+ {
+ Storage = storage;
+ BackingArray = backingArray;
+ }
+ }
+
+ private enum Task
+ {
+ Read = 0,
+ Write = 1,
+ Flush = 2
+ }
+
+ private enum AccessType
+ {
+ Random = 0,
+ Sequential = 1,
+ FrequentBlock = 2
+ }
+ }
+}
diff --git a/tests/LibHac.Tests/FsSystem/BufferedStorageTests.cs b/tests/LibHac.Tests/FsSystem/BufferedStorageTests.cs
new file mode 100644
index 00000000..8cc94ded
--- /dev/null
+++ b/tests/LibHac.Tests/FsSystem/BufferedStorageTests.cs
@@ -0,0 +1,229 @@
+using System;
+using System.Collections.Generic;
+using LibHac.Fs;
+using LibHac.FsSystem;
+using LibHac.FsSystem.Save;
+using LibHac.Tests.Fs;
+using Xunit;
+
+namespace LibHac.Tests.FsSystem
+{
+ public class BufferedStorageTests
+ {
+ [Fact]
+ public void Write_SingleBlock_CanReadBack()
+ {
+ byte[] buffer = new byte[0x18000];
+ byte[] workBuffer = new byte[0x18000];
+ var bufferManager = new FileSystemBufferManager();
+ Assert.Success(bufferManager.Initialize(5, buffer, 0x4000, workBuffer));
+
+ byte[] storageBuffer = new byte[0x80000];
+ var baseStorage = new SubStorage(new MemoryStorage(storageBuffer), 0, storageBuffer.Length);
+
+ var bufferedStorage = new BufferedStorage();
+ Assert.Success(bufferedStorage.Initialize(baseStorage, bufferManager, 0x4000, 4));
+
+ byte[] writeBuffer = new byte[0x400];
+ byte[] readBuffer = new byte[0x400];
+
+ writeBuffer.AsSpan().Fill(0xAA);
+ Assert.Success(bufferedStorage.Write(0x10000, writeBuffer));
+ Assert.Success(bufferedStorage.Read(0x10000, readBuffer));
+
+ Assert.Equal(writeBuffer, readBuffer);
+ }
+
+ public class AccessTestConfig
+ {
+ public int[] SizeClassProbs { get; set; }
+ public int[] SizeClassMaxSizes { get; set; }
+ public int[] TaskProbs { get; set; }
+ public int[] AccessTypeProbs { get; set; }
+ public ulong RngSeed { get; set; }
+ public int FrequentAccessBlockCount { get; set; }
+ public int BlockSize { get; set; }
+ public int StorageCacheCount { get; set; }
+ public bool EnableBulkRead { get; set; }
+ public int StorageSize { get; set; }
+ public int HeapSize { get; set; }
+ public int HeapBlockSize { get; set; }
+ public int BufferManagerCacheCount { get; set; }
+ }
+
+
+ public static AccessTestConfig[] AccessTestConfigs =
+ {
+ new()
+ {
+ SizeClassProbs = new[] {50, 50, 5},
+ SizeClassMaxSizes = new[] {0x4000, 0x80000, 0x800000}, // 4 KB, 512 KB, 8 MB
+ TaskProbs = new[] {50, 50, 1}, // Read, Write, Flush
+ AccessTypeProbs = new[] {10, 10, 5}, // Random, Sequential, Frequent block
+ RngSeed = 35467,
+ FrequentAccessBlockCount = 6,
+ BlockSize = 0x4000,
+ StorageCacheCount = 40,
+ EnableBulkRead = true,
+ StorageSize = 0x1000000,
+ HeapSize = 0x180000,
+ HeapBlockSize = 0x4000,
+ BufferManagerCacheCount = 50
+ },
+ new()
+ {
+ SizeClassProbs = new[] {50, 50, 5},
+ SizeClassMaxSizes = new[] {0x4000, 0x80000, 0x800000}, // 4 KB, 512 KB, 8 MB
+ TaskProbs = new[] {50, 50, 1}, // Read, Write, Flush
+ AccessTypeProbs = new[] {10, 10, 5}, // Random, Sequential, Frequent block
+ RngSeed = 6548433,
+ FrequentAccessBlockCount = 6,
+ BlockSize = 0x4000,
+ StorageCacheCount = 40,
+ EnableBulkRead = false,
+ StorageSize = 0x1000000,
+ HeapSize = 0x180000,
+ HeapBlockSize = 0x4000,
+ BufferManagerCacheCount = 50
+ },
+ new()
+ {
+ SizeClassProbs = new[] {50, 50, 0},
+ SizeClassMaxSizes = new[] {0x4000, 0x80000, 0x800000}, // 4 KB, 512 KB, 8 MB
+ TaskProbs = new[] {50, 0, 0},
+ AccessTypeProbs = new[] {10, 10, 5}, // Random, Sequential, Frequent block
+ RngSeed = 756478,
+ FrequentAccessBlockCount = 16,
+ BlockSize = 0x4000,
+ StorageCacheCount = 8,
+ EnableBulkRead = true,
+ StorageSize = 0x1000000,
+ HeapSize = 0xE00000,
+ HeapBlockSize = 0x4000,
+ BufferManagerCacheCount = 0x400
+ },
+ new()
+ {
+ SizeClassProbs = new[] {50, 50, 0},
+ SizeClassMaxSizes = new[] {0x4000, 0x80000, 0x800000}, // 4 KB, 512 KB, 8 MB
+ TaskProbs = new[] {50, 0, 0},
+ AccessTypeProbs = new[] {0, 0, 5}, // Random, Sequential, Frequent block
+ RngSeed = 38197549,
+ FrequentAccessBlockCount = 16,
+ BlockSize = 0x4000,
+ StorageCacheCount = 16,
+ EnableBulkRead = false,
+ StorageSize = 0x1000000,
+ HeapSize = 0xE00000,
+ HeapBlockSize = 0x4000,
+ BufferManagerCacheCount = 0x400
+ },
+ new()
+ {
+ SizeClassProbs = new[] {50, 50, 0},
+ SizeClassMaxSizes = new[] {0x4000, 0x80000, 0x800000}, // 4 KB, 512 KB, 8 MB
+ TaskProbs = new[] {50, 50, 1}, // Read, Write, Flush
+ AccessTypeProbs = new[] {10, 10, 5}, // Random, Sequential, Frequent block
+ RngSeed = 567365,
+ FrequentAccessBlockCount = 6,
+ BlockSize = 0x4000,
+ StorageCacheCount = 8,
+ EnableBulkRead = false,
+ StorageSize = 0x100000,
+ HeapSize = 0x180000,
+ HeapBlockSize = 0x4000,
+ BufferManagerCacheCount = 50
+ },
+ new()
+ {
+ SizeClassProbs = new[] {50, 50, 0},
+ SizeClassMaxSizes = new[] {0x4000, 0x80000, 0x800000}, // 4 KB, 512 KB, 8 MB
+ TaskProbs = new[] {50, 50, 1}, // Read, Write, Flush
+ AccessTypeProbs = new[] {10, 10, 5}, // Random, Sequential, Frequent block
+ RngSeed = 949365,
+ FrequentAccessBlockCount = 6,
+ BlockSize = 0x4000,
+ StorageCacheCount = 8,
+ EnableBulkRead = false,
+ StorageSize = 0x100000,
+ HeapSize = 0x180000,
+ HeapBlockSize = 0x4000,
+ BufferManagerCacheCount = 50
+ },
+ new()
+ {
+ SizeClassProbs = new[] {50, 50, 10},
+ SizeClassMaxSizes = new[] {0x4000, 0x80000, 0x800000}, // 4 KB, 512 KB, 8 MB
+ TaskProbs = new[] {50, 50, 1}, // Read, Write, Flush
+ AccessTypeProbs = new[] {10, 10, 5}, // Random, Sequential, Frequent block
+ RngSeed = 670670,
+ FrequentAccessBlockCount = 16,
+ BlockSize = 0x4000,
+ StorageCacheCount = 8,
+ EnableBulkRead = true,
+ StorageSize = 0x1000000,
+ HeapSize = 0xE00000,
+ HeapBlockSize = 0x4000,
+ BufferManagerCacheCount = 0x400
+ }
+ };
+
+ private static TheoryData CreateTheoryData(IEnumerable items)
+ {
+ var output = new TheoryData();
+
+ foreach (T item in items)
+ {
+ output.Add(item);
+ }
+
+ return output;
+ }
+
+ public static TheoryData AccessTestTheoryData = CreateTheoryData(AccessTestConfigs);
+
+ [Theory]
+ [MemberData(nameof(AccessTestTheoryData))]
+ public void ReadWrite_AccessCorrectnessTestAgainstMemoryStorage(AccessTestConfig config)
+ {
+ int orderMax = FileSystemBuddyHeap.QueryOrderMax((nuint)config.HeapSize, (nuint)config.HeapBlockSize);
+ int workBufferSize = (int)FileSystemBuddyHeap.QueryWorkBufferSize(orderMax);
+ byte[] workBuffer = GC.AllocateArray(workBufferSize, true);
+ byte[] heapBuffer = new byte[config.HeapSize];
+
+ var bufferManager = new FileSystemBufferManager();
+ Assert.Success(bufferManager.Initialize(config.BufferManagerCacheCount, heapBuffer, config.HeapBlockSize, workBuffer));
+
+ byte[] memoryStorageArray = new byte[config.StorageSize];
+ byte[] bufferedStorageArray = new byte[config.StorageSize];
+
+ var memoryStorage = new MemoryStorage(memoryStorageArray);
+ var baseBufferedStorage = new SubStorage(new MemoryStorage(bufferedStorageArray), 0, bufferedStorageArray.Length);
+
+ var bufferedStorage = new BufferedStorage();
+ Assert.Success(bufferedStorage.Initialize(baseBufferedStorage, bufferManager, config.BlockSize, config.StorageCacheCount));
+
+ if (config.EnableBulkRead)
+ {
+ bufferedStorage.EnableBulkRead();
+ }
+
+ var memoryStorageEntry = new StorageTester.Entry(memoryStorage, memoryStorageArray);
+ var bufferedStorageEntry = new StorageTester.Entry(bufferedStorage, bufferedStorageArray);
+
+ var testerConfig = new StorageTester.Configuration()
+ {
+ Entries = new[] { memoryStorageEntry, bufferedStorageEntry },
+ SizeClassProbs = config.SizeClassProbs,
+ SizeClassMaxSizes = config.SizeClassMaxSizes,
+ TaskProbs = config.TaskProbs,
+ AccessTypeProbs = config.AccessTypeProbs,
+ RngSeed = config.RngSeed,
+ FrequentAccessBlockCount = config.FrequentAccessBlockCount
+ };
+
+ var tester = new StorageTester(testerConfig);
+ tester.Run(0x100);
+ }
+ }
+}
diff --git a/tests/LibHac.Tests/FsSystem/FileSystemBufferManagerTests.cs b/tests/LibHac.Tests/FsSystem/FileSystemBufferManagerTests.cs
new file mode 100644
index 00000000..220bad7a
--- /dev/null
+++ b/tests/LibHac.Tests/FsSystem/FileSystemBufferManagerTests.cs
@@ -0,0 +1,89 @@
+using LibHac.Fs;
+using LibHac.FsSystem;
+using Xunit;
+
+namespace LibHac.Tests.FsSystem
+{
+ public class FileSystemBufferManagerTests
+ {
+ private FileSystemBufferManager CreateManager(int size, int blockSize = 0x4000, int maxCacheCount = 16)
+ {
+ int orderMax = FileSystemBuddyHeap.QueryOrderMax((nuint)size, (nuint)blockSize);
+ nuint workBufferSize = FileSystemBuddyHeap.QueryWorkBufferSize(orderMax);
+ byte[] workBuffer = new byte[workBufferSize];
+ byte[] heapBuffer = new byte[size];
+
+ var bufferManager = new FileSystemBufferManager();
+ Assert.Success(bufferManager.Initialize(maxCacheCount, heapBuffer, blockSize, workBuffer));
+ return bufferManager;
+ }
+
+ [Fact]
+ public void AllocateBuffer_NoFreeSpace_ReturnsNull()
+ {
+ FileSystemBufferManager manager = CreateManager(0x20000);
+ Buffer buffer1 = manager.AllocateBuffer(0x10000);
+ Buffer buffer2 = manager.AllocateBuffer(0x10000);
+ Buffer buffer3 = manager.AllocateBuffer(0x4000);
+
+ Assert.True(!buffer1.IsNull);
+ Assert.True(!buffer2.IsNull);
+ Assert.True(buffer3.IsNull);
+ }
+
+ [Fact]
+ public void AcquireCache_EntryNotEvicted_ReturnsEntry()
+ {
+ FileSystemBufferManager manager = CreateManager(0x20000);
+ Buffer buffer1 = manager.AllocateBuffer(0x10000);
+
+ long handle = manager.RegisterCache(buffer1, new IBufferManager.BufferAttribute());
+
+ manager.AllocateBuffer(0x10000);
+ Buffer buffer3 = manager.AcquireCache(handle);
+
+ Assert.Equal(buffer1, buffer3);
+ }
+
+ [Fact]
+ public void AcquireCache_EntryEvicted_ReturnsNull()
+ {
+ FileSystemBufferManager manager = CreateManager(0x20000);
+ Buffer buffer1 = manager.AllocateBuffer(0x10000);
+
+ long handle = manager.RegisterCache(buffer1, new IBufferManager.BufferAttribute());
+
+ manager.AllocateBuffer(0x20000);
+ Buffer buffer3 = manager.AcquireCache(handle);
+
+ Assert.True(buffer3.IsNull);
+ }
+
+ [Fact]
+ public void AcquireCache_MultipleEntriesEvicted_OldestAreEvicted()
+ {
+ FileSystemBufferManager manager = CreateManager(0x20000);
+ Buffer buffer1 = manager.AllocateBuffer(0x8000);
+ Buffer buffer2 = manager.AllocateBuffer(0x8000);
+ Buffer buffer3 = manager.AllocateBuffer(0x8000);
+ Buffer buffer4 = manager.AllocateBuffer(0x8000);
+
+ long handle1 = manager.RegisterCache(buffer1, new IBufferManager.BufferAttribute());
+ long handle2 = manager.RegisterCache(buffer2, new IBufferManager.BufferAttribute());
+ long handle3 = manager.RegisterCache(buffer3, new IBufferManager.BufferAttribute());
+ long handle4 = manager.RegisterCache(buffer4, new IBufferManager.BufferAttribute());
+
+ manager.AllocateBuffer(0x10000);
+
+ Buffer buffer1B = manager.AcquireCache(handle1);
+ Buffer buffer2B = manager.AcquireCache(handle2);
+ Buffer buffer3B = manager.AcquireCache(handle3);
+ Buffer buffer4B = manager.AcquireCache(handle4);
+
+ Assert.True(buffer1B.IsNull);
+ Assert.True(buffer2B.IsNull);
+ Assert.Equal(buffer3, buffer3B);
+ Assert.Equal(buffer4, buffer4B);
+ }
+ }
+}
diff --git a/tests/LibHac.Tests/LibHac.Tests.csproj b/tests/LibHac.Tests/LibHac.Tests.csproj
index 34a882f0..bc0e1136 100644
--- a/tests/LibHac.Tests/LibHac.Tests.csproj
+++ b/tests/LibHac.Tests/LibHac.Tests.csproj
@@ -2,6 +2,7 @@
net5.0
+ true
false
diff --git a/tests/LibHac.Tests/Random.cs b/tests/LibHac.Tests/Random.cs
new file mode 100644
index 00000000..32e66999
--- /dev/null
+++ b/tests/LibHac.Tests/Random.cs
@@ -0,0 +1,65 @@
+using System;
+using System.Numerics;
+using System.Runtime.InteropServices;
+
+namespace LibHac.Tests
+{
+ public struct Random
+ {
+ private ulong _state1;
+ private ulong _state2;
+
+ public Random(ulong seed)
+ {
+ ulong x = seed;
+ ulong z = x + 0x9e3779b97f4a7c15;
+ z = (z ^ (z >> 30)) * 0xbf58476d1ce4e5b9;
+ z = (z ^ (z >> 27)) * 0x94d049bb133111eb;
+ x = z ^ (z >> 31);
+ z = (x += 0x9e3779b97f4a7c15);
+ z = (z ^ (z >> 30)) * 0xbf58476d1ce4e5b9;
+ z = (z ^ (z >> 27)) * 0x94d049bb133111eb;
+ _state1 = z ^ (z >> 31);
+ _state2 = x;
+ }
+
+ ulong Next()
+ {
+ ulong s0 = _state1;
+ ulong s1 = _state2;
+ ulong result = BitOperations.RotateLeft(s0 + s1, 17) + s0;
+
+ s1 ^= s0;
+ _state1 = BitOperations.RotateLeft(s0, 49) ^ s1 ^ (s1 << 21);
+ _state2 = BitOperations.RotateLeft(s1, 28);
+
+ return result;
+ }
+
+ public int Next(int minValue, int maxValue)
+ {
+ if (minValue > maxValue)
+ {
+ throw new ArgumentOutOfRangeException(nameof(minValue));
+ }
+
+ long range = (long)maxValue - minValue;
+ return (int)((uint)Next() * (1.0 / uint.MaxValue) * range) + minValue;
+ }
+
+ public void NextBytes(Span buffer)
+ {
+ Span bufferUlong = MemoryMarshal.Cast(buffer);
+
+ for (int i = 0; i < bufferUlong.Length; i++)
+ {
+ bufferUlong[i] = Next();
+ }
+
+ for (int i = bufferUlong.Length * sizeof(ulong); i < buffer.Length; i++)
+ {
+ buffer[i] = (byte)Next();
+ }
+ }
+ }
+}