mirror of
https://github.com/Thealexbarney/LibHac.git
synced 2025-02-09 13:14:46 +01:00
Use Storage throughout the library instead of Stream (#18)
*Create an IStorage interface and Storage abstract class to use instead of Stream * Improve AES-XTS performance by ~16x * Double AES-CTR performance: 800 MB/s -> 1600 MB/s on a 6700K * Add AES-XTS tests * Add AES benchmark and AES-CTR writing * Add support for a hashed FAT in save files * Add option to export decrypted NCA * Allow opening decrypted package1 and package2 * Make sure romfs disposal can cascade all the way down * Validate NCA, NPDM and package2 signatures
This commit is contained in:
parent
f20fac5459
commit
3d50085e22
182
LICENSE
182
LICENSE
@ -69,185 +69,3 @@ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
=====================
|
||||
Combination Stream
|
||||
https://github.com/facebook-csharp-sdk/combination-stream
|
||||
=====================
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
@ -4,8 +4,7 @@ using DiscUtils;
|
||||
using DiscUtils.Fat;
|
||||
using DiscUtils.Partitions;
|
||||
using DiscUtils.Streams;
|
||||
using LibHac.Streams;
|
||||
using LibHac.XTSSharp;
|
||||
using LibHac.IO;
|
||||
|
||||
namespace LibHac.Nand
|
||||
{
|
||||
@ -32,45 +31,45 @@ namespace LibHac.Nand
|
||||
|
||||
public Stream OpenProdInfo()
|
||||
{
|
||||
SparseStream encStream = ProdInfo.Open();
|
||||
Xts xts = XtsAes128.Create(Keyset.BisKeys[0]);
|
||||
var decStream = new RandomAccessSectorStream(new XtsSectorStream(encStream, xts, 0x4000, 0), true);
|
||||
return decStream;
|
||||
IStorage encStorage = ProdInfo.Open().AsStorage();
|
||||
var decStorage = new CachedStorage(new Aes128XtsStorage(encStorage, Keyset.BisKeys[0], 0x4000, true), 0x4000, 4, true);
|
||||
decStorage.SetReadOnly();
|
||||
return decStorage.AsStream();
|
||||
}
|
||||
|
||||
public NandPartition OpenProdInfoF()
|
||||
{
|
||||
SparseStream encStream = ProdInfoF.Open();
|
||||
Xts xts = XtsAes128.Create(Keyset.BisKeys[0]);
|
||||
var decStream = new RandomAccessSectorStream(new XtsSectorStream(encStream, xts, 0x4000, 0), true);
|
||||
var fat = new FatFileSystem(decStream, Ownership.None);
|
||||
IStorage encStorage = ProdInfoF.Open().AsStorage();
|
||||
var decStorage = new CachedStorage(new Aes128XtsStorage(encStorage, Keyset.BisKeys[0], 0x4000, true), 0x4000, 4, true);
|
||||
decStorage.SetReadOnly();
|
||||
var fat = new FatFileSystem(decStorage.AsStream(), Ownership.None);
|
||||
return new NandPartition(fat);
|
||||
}
|
||||
|
||||
public NandPartition OpenSafePartition()
|
||||
{
|
||||
SparseStream encStream = Safe.Open();
|
||||
Xts xts = XtsAes128.Create(Keyset.BisKeys[1]);
|
||||
var decStream = new RandomAccessSectorStream(new XtsSectorStream(encStream, xts, 0x4000, 0), true);
|
||||
var fat = new FatFileSystem(decStream, Ownership.None);
|
||||
IStorage encStorage = Safe.Open().AsStorage();
|
||||
var decStorage = new CachedStorage(new Aes128XtsStorage(encStorage, Keyset.BisKeys[1], 0x4000, true), 0x4000, 4, true);
|
||||
decStorage.SetReadOnly();
|
||||
var fat = new FatFileSystem(decStorage.AsStream(), Ownership.None);
|
||||
return new NandPartition(fat);
|
||||
}
|
||||
|
||||
public NandPartition OpenSystemPartition()
|
||||
{
|
||||
SparseStream encStream = System.Open();
|
||||
Xts xts = XtsAes128.Create(Keyset.BisKeys[2]);
|
||||
var decStream = new RandomAccessSectorStream(new XtsSectorStream(encStream, xts, 0x4000, 0), true);
|
||||
var fat = new FatFileSystem(decStream, Ownership.None);
|
||||
IStorage encStorage = System.Open().AsStorage();
|
||||
var decStorage = new CachedStorage(new Aes128XtsStorage(encStorage, Keyset.BisKeys[2], 0x4000, true), 0x4000, 4, true);
|
||||
decStorage.SetReadOnly();
|
||||
var fat = new FatFileSystem(decStorage.AsStream(), Ownership.None);
|
||||
return new NandPartition(fat);
|
||||
}
|
||||
|
||||
public NandPartition OpenUserPartition()
|
||||
{
|
||||
SparseStream encStream = User.Open();
|
||||
Xts xts = XtsAes128.Create(Keyset.BisKeys[3]);
|
||||
var decStream = new RandomAccessSectorStream(new XtsSectorStream(encStream, xts, 0x4000, 0), true);
|
||||
var fat = new FatFileSystem(decStream, Ownership.None);
|
||||
IStorage encStorage = User.Open().AsStorage();
|
||||
var decStorage = new CachedStorage(new Aes128XtsStorage(encStorage, Keyset.BisKeys[3], 0x4000, true), 0x4000, 4, true);
|
||||
decStorage.SetReadOnly();
|
||||
var fat = new FatFileSystem(decStorage.AsStream(), Ownership.None);
|
||||
return new NandPartition(fat);
|
||||
}
|
||||
}
|
||||
|
255
LibHac.Tests/AesXts.cs
Normal file
255
LibHac.Tests/AesXts.cs
Normal file
@ -0,0 +1,255 @@
|
||||
using System.Linq;
|
||||
using LibHac.IO;
|
||||
using Xunit;
|
||||
|
||||
namespace LibHac.Tests
|
||||
{
|
||||
public class AesXts
|
||||
{
|
||||
private static readonly TestData[] TestVectors =
|
||||
{
|
||||
// #1 32 byte key, 32 byte PTX
|
||||
new TestData
|
||||
{
|
||||
Key1 = new byte[]
|
||||
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
Key2 = new byte[]
|
||||
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
Sector = 0,
|
||||
PlainText = new byte[]
|
||||
{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
|
||||
},
|
||||
CipherText = new byte[]
|
||||
{
|
||||
0x91, 0x7C, 0xF6, 0x9E, 0xBD, 0x68, 0xB2, 0xEC, 0x9B, 0x9F, 0xE9, 0xA3, 0xEA, 0xDD, 0xA6, 0x92,
|
||||
0xCD, 0x43, 0xD2, 0xF5, 0x95, 0x98, 0xED, 0x85, 0x8C, 0x02, 0xC2, 0x65, 0x2F, 0xBF, 0x92, 0x2E
|
||||
}
|
||||
},
|
||||
|
||||
// #2, 32 byte key, 32 byte PTX
|
||||
new TestData
|
||||
{
|
||||
Key1 = new byte[]
|
||||
{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
|
||||
Key2 = new byte[]
|
||||
{0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22},
|
||||
Sector = 0x3333333333,
|
||||
PlainText = new byte[]
|
||||
{
|
||||
0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
|
||||
0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44
|
||||
},
|
||||
CipherText = new byte[]
|
||||
{
|
||||
0x44, 0xBE, 0xC8, 0x2F, 0xFB, 0x76, 0xAE, 0xFD, 0xFB, 0xC9, 0x6D, 0xFE, 0x61, 0xE1, 0x92, 0xCC,
|
||||
0xFA, 0x22, 0x13, 0x67, 0x7C, 0x8F, 0x4F, 0xD6, 0xE4, 0xF1, 0x8F, 0x7E, 0xBB, 0x69, 0x38, 0x2F
|
||||
},
|
||||
},
|
||||
|
||||
// #5 from xts.7, 32 byte key, 32 byte PTX
|
||||
new TestData
|
||||
{
|
||||
Key1 = new byte[]
|
||||
{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8, 0xF7, 0xF6, 0xF5, 0xF4, 0xF3, 0xF2, 0xF1, 0xF0},
|
||||
Key2 = new byte[]
|
||||
{0xBF, 0xBE, 0xBD, 0xBC, 0xBB, 0xBA, 0xB9, 0xB8, 0xB7, 0xB6, 0xB5, 0xB4, 0xB3, 0xB2, 0xB1, 0xB0},
|
||||
Sector = 0x123456789A,
|
||||
PlainText = new byte[]
|
||||
{
|
||||
0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
|
||||
0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44
|
||||
},
|
||||
CipherText = new byte[]
|
||||
{
|
||||
0xC1, 0x18, 0x39, 0xD6, 0x36, 0xAD, 0x8B, 0xE5, 0xA1, 0x16, 0xE4, 0x8C, 0x70, 0x22, 0x77, 0x63,
|
||||
0xDA, 0xBD, 0x3C, 0x2D, 0x13, 0x83, 0xC5, 0xDD, 0x15, 0xB2, 0x57, 0x2A, 0xAA, 0x99, 0x2C, 0x40,
|
||||
},
|
||||
},
|
||||
|
||||
// #4, 32 byte key, 512 byte PTX
|
||||
new TestData
|
||||
{
|
||||
Key1 = new byte[]
|
||||
{0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45, 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26},
|
||||
Key2 = new byte[]
|
||||
{0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93, 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95},
|
||||
Sector = 0,
|
||||
PlainText = new byte[]
|
||||
{
|
||||
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
|
||||
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
|
||||
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
|
||||
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
|
||||
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
|
||||
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
|
||||
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
|
||||
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,
|
||||
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
|
||||
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F,
|
||||
0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF,
|
||||
0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
|
||||
0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
|
||||
0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
|
||||
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
|
||||
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF,
|
||||
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
|
||||
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
|
||||
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
|
||||
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
|
||||
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
|
||||
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
|
||||
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
|
||||
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,
|
||||
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
|
||||
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F,
|
||||
0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF,
|
||||
0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
|
||||
0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
|
||||
0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
|
||||
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
|
||||
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF,
|
||||
},
|
||||
CipherText = new byte[]
|
||||
{
|
||||
0x27, 0xA7, 0x47, 0x9B, 0xEF, 0xA1, 0xD4, 0x76, 0x48, 0x9F, 0x30, 0x8C, 0xD4, 0xCF, 0xA6, 0xE2,
|
||||
0xA9, 0x6E, 0x4B, 0xBE, 0x32, 0x08, 0xFF, 0x25, 0x28, 0x7D, 0xD3, 0x81, 0x96, 0x16, 0xE8, 0x9C,
|
||||
0xC7, 0x8C, 0xF7, 0xF5, 0xE5, 0x43, 0x44, 0x5F, 0x83, 0x33, 0xD8, 0xFA, 0x7F, 0x56, 0x00, 0x00,
|
||||
0x05, 0x27, 0x9F, 0xA5, 0xD8, 0xB5, 0xE4, 0xAD, 0x40, 0xE7, 0x36, 0xDD, 0xB4, 0xD3, 0x54, 0x12,
|
||||
0x32, 0x80, 0x63, 0xFD, 0x2A, 0xAB, 0x53, 0xE5, 0xEA, 0x1E, 0x0A, 0x9F, 0x33, 0x25, 0x00, 0xA5,
|
||||
0xDF, 0x94, 0x87, 0xD0, 0x7A, 0x5C, 0x92, 0xCC, 0x51, 0x2C, 0x88, 0x66, 0xC7, 0xE8, 0x60, 0xCE,
|
||||
0x93, 0xFD, 0xF1, 0x66, 0xA2, 0x49, 0x12, 0xB4, 0x22, 0x97, 0x61, 0x46, 0xAE, 0x20, 0xCE, 0x84,
|
||||
0x6B, 0xB7, 0xDC, 0x9B, 0xA9, 0x4A, 0x76, 0x7A, 0xAE, 0xF2, 0x0C, 0x0D, 0x61, 0xAD, 0x02, 0x65,
|
||||
0x5E, 0xA9, 0x2D, 0xC4, 0xC4, 0xE4, 0x1A, 0x89, 0x52, 0xC6, 0x51, 0xD3, 0x31, 0x74, 0xBE, 0x51,
|
||||
0xA1, 0x0C, 0x42, 0x11, 0x10, 0xE6, 0xD8, 0x15, 0x88, 0xED, 0xE8, 0x21, 0x03, 0xA2, 0x52, 0xD8,
|
||||
0xA7, 0x50, 0xE8, 0x76, 0x8D, 0xEF, 0xFF, 0xED, 0x91, 0x22, 0x81, 0x0A, 0xAE, 0xB9, 0x9F, 0x91,
|
||||
0x72, 0xAF, 0x82, 0xB6, 0x04, 0xDC, 0x4B, 0x8E, 0x51, 0xBC, 0xB0, 0x82, 0x35, 0xA6, 0xF4, 0x34,
|
||||
0x13, 0x32, 0xE4, 0xCA, 0x60, 0x48, 0x2A, 0x4B, 0xA1, 0xA0, 0x3B, 0x3E, 0x65, 0x00, 0x8F, 0xC5,
|
||||
0xDA, 0x76, 0xB7, 0x0B, 0xF1, 0x69, 0x0D, 0xB4, 0xEA, 0xE2, 0x9C, 0x5F, 0x1B, 0xAD, 0xD0, 0x3C,
|
||||
0x5C, 0xCF, 0x2A, 0x55, 0xD7, 0x05, 0xDD, 0xCD, 0x86, 0xD4, 0x49, 0x51, 0x1C, 0xEB, 0x7E, 0xC3,
|
||||
0x0B, 0xF1, 0x2B, 0x1F, 0xA3, 0x5B, 0x91, 0x3F, 0x9F, 0x74, 0x7A, 0x8A, 0xFD, 0x1B, 0x13, 0x0E,
|
||||
0x94, 0xBF, 0xF9, 0x4E, 0xFF, 0xD0, 0x1A, 0x91, 0x73, 0x5C, 0xA1, 0x72, 0x6A, 0xCD, 0x0B, 0x19,
|
||||
0x7C, 0x4E, 0x5B, 0x03, 0x39, 0x36, 0x97, 0xE1, 0x26, 0x82, 0x6F, 0xB6, 0xBB, 0xDE, 0x8E, 0xCC,
|
||||
0x1E, 0x08, 0x29, 0x85, 0x16, 0xE2, 0xC9, 0xED, 0x03, 0xFF, 0x3C, 0x1B, 0x78, 0x60, 0xF6, 0xDE,
|
||||
0x76, 0xD4, 0xCE, 0xCD, 0x94, 0xC8, 0x11, 0x98, 0x55, 0xEF, 0x52, 0x97, 0xCA, 0x67, 0xE9, 0xF3,
|
||||
0xE7, 0xFF, 0x72, 0xB1, 0xE9, 0x97, 0x85, 0xCA, 0x0A, 0x7E, 0x77, 0x20, 0xC5, 0xB3, 0x6D, 0xC6,
|
||||
0xD7, 0x2C, 0xAC, 0x95, 0x74, 0xC8, 0xCB, 0xBC, 0x2F, 0x80, 0x1E, 0x23, 0xE5, 0x6F, 0xD3, 0x44,
|
||||
0xB0, 0x7F, 0x22, 0x15, 0x4B, 0xEB, 0xA0, 0xF0, 0x8C, 0xE8, 0x89, 0x1E, 0x64, 0x3E, 0xD9, 0x95,
|
||||
0xC9, 0x4D, 0x9A, 0x69, 0xC9, 0xF1, 0xB5, 0xF4, 0x99, 0x02, 0x7A, 0x78, 0x57, 0x2A, 0xEE, 0xBD,
|
||||
0x74, 0xD2, 0x0C, 0xC3, 0x98, 0x81, 0xC2, 0x13, 0xEE, 0x77, 0x0B, 0x10, 0x10, 0xE4, 0xBE, 0xA7,
|
||||
0x18, 0x84, 0x69, 0x77, 0xAE, 0x11, 0x9F, 0x7A, 0x02, 0x3A, 0xB5, 0x8C, 0xCA, 0x0A, 0xD7, 0x52,
|
||||
0xAF, 0xE6, 0x56, 0xBB, 0x3C, 0x17, 0x25, 0x6A, 0x9F, 0x6E, 0x9B, 0xF1, 0x9F, 0xDD, 0x5A, 0x38,
|
||||
0xFC, 0x82, 0xBB, 0xE8, 0x72, 0xC5, 0x53, 0x9E, 0xDB, 0x60, 0x9E, 0xF4, 0xF7, 0x9C, 0x20, 0x3E,
|
||||
0xBB, 0x14, 0x0F, 0x2E, 0x58, 0x3C, 0xB2, 0xAD, 0x15, 0xB4, 0xAA, 0x5B, 0x65, 0x50, 0x16, 0xA8,
|
||||
0x44, 0x92, 0x77, 0xDB, 0xD4, 0x77, 0xEF, 0x2C, 0x8D, 0x6C, 0x01, 0x7D, 0xB7, 0x38, 0xB1, 0x8D,
|
||||
0xEB, 0x4A, 0x42, 0x7D, 0x19, 0x23, 0xCE, 0x3F, 0xF2, 0x62, 0x73, 0x57, 0x79, 0xA4, 0x18, 0xF2,
|
||||
0x0A, 0x28, 0x2D, 0xF9, 0x20, 0x14, 0x7B, 0xEA, 0xBE, 0x42, 0x1E, 0xE5, 0x31, 0x9D, 0x05, 0x68
|
||||
},
|
||||
},
|
||||
|
||||
// #7, 32 byte key, 17 byte PTX
|
||||
new TestData
|
||||
{
|
||||
Key1 = new byte[]
|
||||
{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8, 0xF7, 0xF6, 0xF5, 0xF4, 0xF3, 0xF2, 0xF1, 0xF0},
|
||||
Key2 = new byte[]
|
||||
{0xBF, 0xBE, 0xBD, 0xBC, 0xBB, 0xBA, 0xB9, 0xB8, 0xB7, 0xB6, 0xB5, 0xB4, 0xB3, 0xB2, 0xB1, 0xB0},
|
||||
Sector = 0x123456789A,
|
||||
PlainText = new byte[]
|
||||
{
|
||||
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10
|
||||
},
|
||||
CipherText = new byte[]
|
||||
{
|
||||
0x9E, 0x61, 0x71, 0x58, 0x09, 0xA7, 0x4B, 0x7E, 0x0E, 0xF0, 0x33, 0xCD, 0x86, 0x18, 0x14, 0x04, 0xC2
|
||||
},
|
||||
},
|
||||
|
||||
// #15, 32 byte key, 25 byte PTX
|
||||
new TestData
|
||||
{
|
||||
Key1 = new byte[]
|
||||
{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8, 0xF7, 0xF6, 0xF5, 0xF4, 0xF3, 0xF2, 0xF1, 0xF0},
|
||||
Key2 = new byte[]
|
||||
{0xBF, 0xBE, 0xBD, 0xBC, 0xBB, 0xBA, 0xB9, 0xB8, 0xB7, 0xB6, 0xB5, 0xB4, 0xB3, 0xB2, 0xB1, 0xB0},
|
||||
Sector = 0x123456789A,
|
||||
PlainText = new byte[]
|
||||
{
|
||||
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
|
||||
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18
|
||||
},
|
||||
CipherText = new byte[]
|
||||
{
|
||||
0x5D, 0x0B, 0x4A, 0x86, 0xEC, 0x5A, 0x91, 0xFB, 0x84, 0x9D, 0x0F, 0x82, 0x6A, 0x31, 0x62, 0x22,
|
||||
0xC2, 0x74, 0xAD, 0x93, 0xFC, 0x68, 0xC2, 0xC1, 0x01
|
||||
},
|
||||
},
|
||||
|
||||
// #21, 32 byte key, 31 byte PTX
|
||||
new TestData
|
||||
{
|
||||
Key1 = new byte[]
|
||||
{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8, 0xF7, 0xF6, 0xF5, 0xF4, 0xF3, 0xF2, 0xF1, 0xF0},
|
||||
Key2 = new byte[]
|
||||
{0xBF, 0xBE, 0xBD, 0xBC, 0xBB, 0xBA, 0xB9, 0xB8, 0xB7, 0xB6, 0xB5, 0xB4, 0xB3, 0xB2, 0xB1, 0xB0},
|
||||
Sector = 0x123456789A,
|
||||
PlainText = new byte[]
|
||||
{
|
||||
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
|
||||
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E
|
||||
},
|
||||
CipherText = new byte[]
|
||||
{
|
||||
0x42, 0x67, 0x3C, 0x89, 0x7D, 0x4F, 0x53, 0x2C, 0xF8, 0xAA, 0x65, 0xEE, 0xB4, 0xD5, 0xB6, 0xF5,
|
||||
0xC2, 0x74, 0xAD, 0x93, 0xFC, 0x68, 0xC2, 0xC1, 0x01, 0x5D, 0x90, 0x4F, 0x33, 0xFF, 0x95
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
[Theory]
|
||||
[InlineData(0)]
|
||||
[InlineData(1)]
|
||||
[InlineData(2)]
|
||||
[InlineData(3)]
|
||||
[InlineData(4)]
|
||||
[InlineData(5)]
|
||||
[InlineData(6)]
|
||||
public static void Encrypt(int index)
|
||||
{
|
||||
TestData data = TestVectors[index];
|
||||
var transform = new Aes128XtsTransform(data.Key1, data.Key2, false);
|
||||
byte[] encrypted = data.PlainText.ToArray();
|
||||
|
||||
transform.TransformBlock(encrypted, 0, encrypted.Length, data.Sector);
|
||||
Assert.Equal(data.CipherText, encrypted);
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData(0)]
|
||||
[InlineData(1)]
|
||||
[InlineData(2)]
|
||||
[InlineData(3)]
|
||||
[InlineData(4)]
|
||||
[InlineData(5)]
|
||||
[InlineData(6)]
|
||||
public static void Decrypt(int index)
|
||||
{
|
||||
TestData data = TestVectors[index];
|
||||
byte[] decrypted = data.CipherText.ToArray();
|
||||
var transform = new Aes128XtsTransform(data.Key1, data.Key2, true);
|
||||
|
||||
transform.TransformBlock(decrypted, 0, decrypted.Length, data.Sector);
|
||||
Assert.Equal(data.PlainText, decrypted);
|
||||
}
|
||||
|
||||
private struct TestData
|
||||
{
|
||||
public byte[] CipherText;
|
||||
public byte[] PlainText;
|
||||
public byte[] Key1;
|
||||
public byte[] Key2;
|
||||
public ulong Sector;
|
||||
}
|
||||
}
|
||||
}
|
19
LibHac.Tests/LibHac.Tests.csproj
Normal file
19
LibHac.Tests/LibHac.Tests.csproj
Normal file
@ -0,0 +1,19 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
|
||||
<PropertyGroup>
|
||||
<TargetFramework>netcoreapp2.1</TargetFramework>
|
||||
|
||||
<IsPackable>false</IsPackable>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="15.8.0" />
|
||||
<PackageReference Include="xunit" Version="2.3.1" />
|
||||
<PackageReference Include="xunit.runner.visualstudio" Version="2.3.1" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="..\LibHac\LibHac.csproj" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
@ -13,6 +13,8 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "NandReader", "NandReader\Na
|
||||
EndProject
|
||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NandReaderGui", "NandReaderGui\NandReaderGui.csproj", "{3CBD38B0-6575-4768-8E94-A8AF2D2C9F43}"
|
||||
EndProject
|
||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LibHac.Tests", "LibHac.Tests\LibHac.Tests.csproj", "{679C89BD-5FDF-4CC2-9129-ABABD759035B}"
|
||||
EndProject
|
||||
Global
|
||||
GlobalSection(SolutionConfigurationPlatforms) = preSolution
|
||||
Debug|Any CPU = Debug|Any CPU
|
||||
@ -37,6 +39,10 @@ Global
|
||||
{9889C467-284F-4061-B4DB-EC94051C29C0}.Release|Any CPU.Build.0 = Release|Any CPU
|
||||
{3CBD38B0-6575-4768-8E94-A8AF2D2C9F43}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
|
||||
{3CBD38B0-6575-4768-8E94-A8AF2D2C9F43}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
||||
{679C89BD-5FDF-4CC2-9129-ABABD759035B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
|
||||
{679C89BD-5FDF-4CC2-9129-ABABD759035B}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
||||
{679C89BD-5FDF-4CC2-9129-ABABD759035B}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
||||
{679C89BD-5FDF-4CC2-9129-ABABD759035B}.Release|Any CPU.Build.0 = Release|Any CPU
|
||||
EndGlobalSection
|
||||
GlobalSection(SolutionProperties) = preSolution
|
||||
HideSolutionNode = FALSE
|
||||
|
@ -1,161 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using LibHac.Streams;
|
||||
|
||||
namespace LibHac
|
||||
{
|
||||
public class Aes128CtrStream : SectorStream
|
||||
{
|
||||
private const int CryptChunkSize = 0x4000;
|
||||
private const int BlockSize = 0x10;
|
||||
|
||||
private readonly long _counterOffset;
|
||||
private readonly byte[] _tempBuffer;
|
||||
private readonly Aes128CtrTransform _decryptor;
|
||||
protected readonly byte[] Counter;
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new stream
|
||||
/// </summary>
|
||||
/// <param name="baseStream">The base stream</param>
|
||||
/// <param name="key">The decryption key</param>
|
||||
/// <param name="counter">The initial counter</param>
|
||||
public Aes128CtrStream(Stream baseStream, byte[] key, byte[] counter)
|
||||
: this(baseStream, key, 0, baseStream.Length, counter) { }
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new stream
|
||||
/// </summary>
|
||||
/// <param name="baseStream">The base stream</param>
|
||||
/// <param name="key">The decryption key</param>
|
||||
/// <param name="counterOffset">Offset to add to the counter</param>
|
||||
/// <param name="ctrHi">The value of the upper 64 bits of the counter</param>
|
||||
public Aes128CtrStream(Stream baseStream, byte[] key, long counterOffset = 0, byte[] ctrHi = null)
|
||||
: this(baseStream, key, 0, baseStream.Length, counterOffset, ctrHi) { }
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new stream
|
||||
/// </summary>
|
||||
/// <param name="baseStream">The base stream</param>
|
||||
/// <param name="key">The decryption key</param>
|
||||
/// <param name="offset">Offset to start at in the input stream</param>
|
||||
/// <param name="length">The length of the created stream</param>
|
||||
/// <param name="counter">The initial counter</param>
|
||||
public Aes128CtrStream(Stream baseStream, byte[] key, long offset, long length, byte[] counter)
|
||||
: base(baseStream, BlockSize, 1, offset)
|
||||
{
|
||||
_counterOffset = 0;
|
||||
|
||||
// Make the stream seekable by remembering the initial counter value
|
||||
if (counter != null)
|
||||
{
|
||||
for (int i = 0; i < 8; i++)
|
||||
{
|
||||
_counterOffset |= (long)counter[0xF - i] << (4 + i * 8);
|
||||
}
|
||||
}
|
||||
|
||||
Length = length;
|
||||
_tempBuffer = new byte[CryptChunkSize];
|
||||
|
||||
_decryptor = new Aes128CtrTransform(key, counter ?? new byte[0x10], CryptChunkSize);
|
||||
Counter = _decryptor.Counter;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new stream
|
||||
/// </summary>
|
||||
/// <param name="baseStream">The base stream</param>
|
||||
/// <param name="key">The decryption key</param>
|
||||
/// <param name="offset">Offset to start at in the input stream</param>
|
||||
/// <param name="length">The length of the created stream</param>
|
||||
/// <param name="counterOffset">Offset to add to the counter</param>
|
||||
/// <param name="ctrHi">The value of the upper 64 bits of the counter</param>
|
||||
public Aes128CtrStream(Stream baseStream, byte[] key, long offset, long length, long counterOffset, byte[] ctrHi = null)
|
||||
: base(baseStream, BlockSize, CryptChunkSize / BlockSize, offset)
|
||||
{
|
||||
var initialCounter = new byte[BlockSize];
|
||||
if (ctrHi != null)
|
||||
{
|
||||
Array.Copy(ctrHi, initialCounter, 8);
|
||||
}
|
||||
|
||||
_counterOffset = counterOffset;
|
||||
Length = length;
|
||||
_tempBuffer = new byte[CryptChunkSize];
|
||||
|
||||
_decryptor = new Aes128CtrTransform(key, initialCounter, CryptChunkSize);
|
||||
Counter = _decryptor.Counter;
|
||||
UpdateCounter(_counterOffset + base.Position);
|
||||
|
||||
baseStream.Position = offset;
|
||||
}
|
||||
|
||||
private void UpdateCounter(long offset)
|
||||
{
|
||||
ulong off = (ulong)offset >> 4;
|
||||
for (uint j = 0; j < 0x7; j++)
|
||||
{
|
||||
Counter[0x10 - j - 1] = (byte)(off & 0xFF);
|
||||
off >>= 8;
|
||||
}
|
||||
|
||||
// Because the value stored in the counter is offset >> 4, the top 4 bits
|
||||
// of byte 8 need to have their original value preserved
|
||||
Counter[8] = (byte)((Counter[8] & 0xF0) | (int)(off & 0x0F));
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
switch (origin)
|
||||
{
|
||||
case SeekOrigin.Begin:
|
||||
Position = offset;
|
||||
break;
|
||||
case SeekOrigin.Current:
|
||||
Position += offset;
|
||||
break;
|
||||
case SeekOrigin.End:
|
||||
Position = Length - offset;
|
||||
break;
|
||||
}
|
||||
|
||||
return Position;
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override bool CanRead => true;
|
||||
public override bool CanSeek => true;
|
||||
public override bool CanWrite => false;
|
||||
public override long Length { get; }
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => base.Position;
|
||||
set
|
||||
{
|
||||
base.Position = value;
|
||||
UpdateCounter(_counterOffset + base.Position);
|
||||
}
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
ValidateSize(count);
|
||||
|
||||
int bytesRead = base.Read(_tempBuffer, 0, count);
|
||||
if (bytesRead == 0) return 0;
|
||||
|
||||
return _decryptor.TransformBlock(_tempBuffer, 0, bytesRead, buffer, offset);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,90 +0,0 @@
|
||||
using System;
|
||||
using System.Numerics;
|
||||
using System.Security.Cryptography;
|
||||
|
||||
namespace LibHac
|
||||
{
|
||||
public class Aes128CtrTransform
|
||||
{
|
||||
private const int BlockSize = 128;
|
||||
private const int BlockSizeBytes = BlockSize / 8;
|
||||
|
||||
private readonly int _maxSize;
|
||||
public readonly byte[] Counter = new byte[BlockSizeBytes];
|
||||
private readonly byte[] _counterDec;
|
||||
private readonly byte[] _counterEnc;
|
||||
private readonly ICryptoTransform _encryptor;
|
||||
|
||||
public Aes128CtrTransform(byte[] key, byte[] counter, int maxTransformSize)
|
||||
{
|
||||
if (key == null) throw new ArgumentNullException(nameof(key));
|
||||
if (counter == null) throw new ArgumentNullException(nameof(counter));
|
||||
if (key.Length != BlockSizeBytes)
|
||||
throw new ArgumentException($"{nameof(key)} must be {BlockSizeBytes} bytes long");
|
||||
if (counter.Length != BlockSizeBytes)
|
||||
throw new ArgumentException($"{nameof(counter)} must be {BlockSizeBytes} bytes long");
|
||||
|
||||
Aes aes = Aes.Create();
|
||||
if (aes == null) throw new CryptographicException("Unable to create AES object");
|
||||
aes.Mode = CipherMode.ECB;
|
||||
aes.Padding = PaddingMode.None;
|
||||
|
||||
_encryptor = aes.CreateEncryptor(key, new byte[BlockSizeBytes]);
|
||||
_maxSize = maxTransformSize;
|
||||
_counterDec = new byte[_maxSize];
|
||||
_counterEnc = new byte[_maxSize];
|
||||
|
||||
Array.Copy(counter, Counter, BlockSizeBytes);
|
||||
}
|
||||
|
||||
public int TransformBlock(byte[] inputBuffer, int inputOffset, int inputCount, byte[] outputBuffer, int outputOffset)
|
||||
{
|
||||
if (inputCount > _maxSize)
|
||||
throw new ArgumentException($"{nameof(inputCount)} cannot be greater than {_maxSize}");
|
||||
|
||||
int blockCount = Util.DivideByRoundUp(inputCount, BlockSizeBytes);
|
||||
|
||||
FillDecryptedCounter(blockCount);
|
||||
|
||||
_encryptor.TransformBlock(_counterDec, 0, blockCount * BlockSizeBytes, _counterEnc, 0);
|
||||
XorArrays(inputBuffer, inputOffset, outputBuffer, outputOffset, _counterEnc, inputCount);
|
||||
|
||||
return inputCount;
|
||||
}
|
||||
|
||||
private void FillDecryptedCounter(int blockCount)
|
||||
{
|
||||
for (int i = 0; i < blockCount; i++)
|
||||
{
|
||||
Array.Copy(Counter, 0, _counterDec, i * BlockSizeBytes, BlockSizeBytes);
|
||||
IncrementCounter();
|
||||
}
|
||||
}
|
||||
|
||||
private void IncrementCounter()
|
||||
{
|
||||
Util.IncrementByteArray(Counter);
|
||||
}
|
||||
|
||||
private void XorArrays(byte[] inputBuffer, int inputOffset, byte[] outputBuffer, int outputOffset, byte[] xor, int length)
|
||||
{
|
||||
int i = 0;
|
||||
if (Vector.IsHardwareAccelerated)
|
||||
{
|
||||
int simdEnd = Math.Max(length - Vector<byte>.Count, 0);
|
||||
for (; i < simdEnd; i += Vector<byte>.Count)
|
||||
{
|
||||
var inputVec = new Vector<byte>(inputBuffer, inputOffset + i);
|
||||
var xorVec = new Vector<byte>(xor, i);
|
||||
Vector<byte> outputVec = inputVec ^ xorVec;
|
||||
outputVec.CopyTo(outputBuffer, outputOffset + i);
|
||||
}
|
||||
}
|
||||
|
||||
for (; i < length; i++)
|
||||
{
|
||||
outputBuffer[outputOffset + i] = (byte)(inputBuffer[inputOffset + i] ^ xor[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
172
LibHac/Bktr.cs
172
LibHac/Bktr.cs
@ -1,172 +0,0 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
|
||||
namespace LibHac
|
||||
{
|
||||
public class Bktr : Stream
|
||||
{
|
||||
private long _position;
|
||||
public RelocationBlock RelocationBlock { get; }
|
||||
private List<RelocationEntry> RelocationEntries { get; } = new List<RelocationEntry>();
|
||||
private List<long> RelocationOffsets { get; }
|
||||
|
||||
private Stream Patch { get; }
|
||||
private Stream Base { get; }
|
||||
private RelocationEntry CurrentEntry { get; set; }
|
||||
|
||||
public Bktr(Stream patchRomfs, Stream baseRomfs, NcaSection section)
|
||||
{
|
||||
if (section.Header.EncryptionType != NcaEncryptionType.AesCtrEx) throw new ArgumentException("Section is not of type BKTR");
|
||||
Patch = patchRomfs ?? throw new NullReferenceException($"{nameof(patchRomfs)} cannot be null");
|
||||
Base = baseRomfs ?? throw new NullReferenceException($"{nameof(baseRomfs)} cannot be null");
|
||||
|
||||
IvfcLevelHeader level5 = section.Header.IvfcInfo.LevelHeaders[5];
|
||||
Length = level5.LogicalOffset + level5.HashDataSize;
|
||||
|
||||
using (var reader = new BinaryReader(patchRomfs, Encoding.Default, true))
|
||||
{
|
||||
patchRomfs.Position = section.Header.BktrInfo.RelocationHeader.Offset;
|
||||
RelocationBlock = new RelocationBlock(reader);
|
||||
}
|
||||
|
||||
foreach (RelocationBucket bucket in RelocationBlock.Buckets)
|
||||
{
|
||||
RelocationEntries.AddRange(bucket.Entries);
|
||||
}
|
||||
|
||||
for (int i = 0; i < RelocationEntries.Count - 1; i++)
|
||||
{
|
||||
RelocationEntries[i].Next = RelocationEntries[i + 1];
|
||||
RelocationEntries[i].VirtOffsetEnd = RelocationEntries[i + 1].VirtOffset;
|
||||
}
|
||||
|
||||
RelocationEntries[RelocationEntries.Count - 1].VirtOffsetEnd = level5.LogicalOffset + level5.HashDataSize;
|
||||
RelocationOffsets = RelocationEntries.Select(x => x.VirtOffset).ToList();
|
||||
|
||||
CurrentEntry = GetRelocationEntry(0);
|
||||
UpdateSourceStreamPositions();
|
||||
}
|
||||
|
||||
private RelocationEntry GetRelocationEntry(long offset)
|
||||
{
|
||||
int index = RelocationOffsets.BinarySearch(offset);
|
||||
if (index < 0) index = ~index - 1;
|
||||
return RelocationEntries[index];
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
long remaining = Length - Position;
|
||||
if (remaining <= 0) return 0;
|
||||
if (remaining < count) count = (int)remaining;
|
||||
|
||||
int toOutput = count;
|
||||
int pos = 0;
|
||||
|
||||
while (toOutput > 0)
|
||||
{
|
||||
long remainInEntry = CurrentEntry.VirtOffsetEnd - Position;
|
||||
int toRead = (int)Math.Min(toOutput, remainInEntry);
|
||||
ReadCurrent(buffer, pos, toRead);
|
||||
pos += toRead;
|
||||
toOutput -= toRead;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
private void ReadCurrent(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (CurrentEntry.IsPatch)
|
||||
{
|
||||
Patch.Read(buffer, offset, count);
|
||||
}
|
||||
else
|
||||
{
|
||||
Base.Read(buffer, offset, count);
|
||||
}
|
||||
|
||||
Position += count;
|
||||
}
|
||||
|
||||
private void UpdateSourceStreamPositions()
|
||||
{
|
||||
// At end of virtual stream
|
||||
if (CurrentEntry == null) return;
|
||||
|
||||
long entryOffset = Position - CurrentEntry.VirtOffset;
|
||||
|
||||
if (CurrentEntry.IsPatch)
|
||||
{
|
||||
Patch.Position = CurrentEntry.PhysOffset + entryOffset;
|
||||
}
|
||||
else
|
||||
{
|
||||
Base.Position = CurrentEntry.PhysOffset + entryOffset;
|
||||
}
|
||||
}
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => _position;
|
||||
set
|
||||
{
|
||||
if (value > Length) throw new IndexOutOfRangeException();
|
||||
|
||||
// Avoid doing a search when reading sequentially
|
||||
if (CurrentEntry != null && value == CurrentEntry.VirtOffsetEnd)
|
||||
{
|
||||
CurrentEntry = CurrentEntry.Next;
|
||||
}
|
||||
else if (CurrentEntry == null || value < CurrentEntry.VirtOffset || value > CurrentEntry.VirtOffsetEnd)
|
||||
{
|
||||
CurrentEntry = GetRelocationEntry(value);
|
||||
}
|
||||
|
||||
_position = value;
|
||||
UpdateSourceStreamPositions();
|
||||
}
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
switch (origin)
|
||||
{
|
||||
case SeekOrigin.Begin:
|
||||
Position = offset;
|
||||
break;
|
||||
case SeekOrigin.Current:
|
||||
Position += offset;
|
||||
break;
|
||||
case SeekOrigin.End:
|
||||
Position = Length - offset;
|
||||
break;
|
||||
}
|
||||
|
||||
return Position;
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override bool CanRead => true;
|
||||
public override bool CanWrite => false;
|
||||
public override long Length { get; }
|
||||
public override bool CanSeek => true;
|
||||
}
|
||||
}
|
@ -1,114 +0,0 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using LibHac.Streams;
|
||||
|
||||
namespace LibHac
|
||||
{
|
||||
public class BktrCryptoStream : Aes128CtrStream
|
||||
{
|
||||
public AesSubsectionBlock AesSubsectionBlock { get; }
|
||||
private List<AesSubsectionEntry> SubsectionEntries { get; } = new List<AesSubsectionEntry>();
|
||||
private List<long> SubsectionOffsets { get; }
|
||||
private AesSubsectionEntry CurrentEntry { get; set; }
|
||||
|
||||
public BktrCryptoStream(Stream baseStream, byte[] key, long offset, long length, long counterOffset, byte[] ctrHi, BktrPatchInfo bktr)
|
||||
: base(baseStream, key, offset, length, counterOffset, ctrHi)
|
||||
{
|
||||
BktrHeader header = bktr.EncryptionHeader;
|
||||
byte[] subsectionBytes;
|
||||
using (var streamDec = new RandomAccessSectorStream(new Aes128CtrStream(baseStream, key, offset, length, counterOffset, ctrHi)))
|
||||
{
|
||||
streamDec.Position = header.Offset;
|
||||
subsectionBytes = new byte[header.Size];
|
||||
streamDec.Read(subsectionBytes, 0, subsectionBytes.Length);
|
||||
}
|
||||
|
||||
using (var reader = new BinaryReader(new MemoryStream(subsectionBytes)))
|
||||
{
|
||||
AesSubsectionBlock = new AesSubsectionBlock(reader);
|
||||
}
|
||||
|
||||
foreach (AesSubsectionBucket bucket in AesSubsectionBlock.Buckets)
|
||||
{
|
||||
SubsectionEntries.AddRange(bucket.Entries);
|
||||
}
|
||||
|
||||
// Add a subsection for the BKTR headers to make things easier
|
||||
var headerSubsection = new AesSubsectionEntry
|
||||
{
|
||||
Offset = bktr.RelocationHeader.Offset,
|
||||
Counter = (uint)(ctrHi[4] << 24 | ctrHi[5] << 16 | ctrHi[6] << 8 | ctrHi[7]),
|
||||
OffsetEnd = long.MaxValue
|
||||
};
|
||||
SubsectionEntries.Add(headerSubsection);
|
||||
|
||||
for (int i = 0; i < SubsectionEntries.Count - 1; i++)
|
||||
{
|
||||
SubsectionEntries[i].Next = SubsectionEntries[i + 1];
|
||||
SubsectionEntries[i].OffsetEnd = SubsectionEntries[i + 1].Offset;
|
||||
}
|
||||
|
||||
SubsectionOffsets = SubsectionEntries.Select(x => x.Offset).ToList();
|
||||
|
||||
CurrentEntry = GetSubsectionEntry(0);
|
||||
UpdateCounterSubsection(CurrentEntry.Counter);
|
||||
baseStream.Position = offset;
|
||||
}
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => base.Position;
|
||||
set
|
||||
{
|
||||
base.Position = value;
|
||||
CurrentEntry = GetSubsectionEntry(value);
|
||||
UpdateCounterSubsection(CurrentEntry.Counter);
|
||||
}
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
int totalBytesRead = 0;
|
||||
int outPos = offset;
|
||||
|
||||
while (count > 0)
|
||||
{
|
||||
int bytesToRead = (int)Math.Min(CurrentEntry.OffsetEnd - Position, count);
|
||||
int bytesRead = base.Read(buffer, outPos, bytesToRead);
|
||||
|
||||
outPos += bytesRead;
|
||||
totalBytesRead += bytesRead;
|
||||
count -= bytesRead;
|
||||
|
||||
if (Position >= CurrentEntry.OffsetEnd)
|
||||
{
|
||||
CurrentEntry = CurrentEntry.Next;
|
||||
UpdateCounterSubsection(CurrentEntry.Counter);
|
||||
}
|
||||
else if (bytesRead == 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return totalBytesRead;
|
||||
}
|
||||
|
||||
private AesSubsectionEntry GetSubsectionEntry(long offset)
|
||||
{
|
||||
int index = SubsectionOffsets.BinarySearch(offset);
|
||||
if (index < 0) index = ~index - 1;
|
||||
return SubsectionEntries[index];
|
||||
}
|
||||
|
||||
private void UpdateCounterSubsection(uint value)
|
||||
{
|
||||
Counter[7] = (byte)value;
|
||||
Counter[6] = (byte)(value >> 8);
|
||||
Counter[5] = (byte)(value >> 16);
|
||||
Counter[4] = (byte)(value >> 24);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,152 +0,0 @@
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac
|
||||
{
|
||||
public class RelocationBlock
|
||||
{
|
||||
public uint Field0;
|
||||
public int BucketCount;
|
||||
public long Size;
|
||||
public long[] BaseOffsets;
|
||||
public RelocationBucket[] Buckets;
|
||||
|
||||
public RelocationBlock(BinaryReader reader)
|
||||
{
|
||||
long start = reader.BaseStream.Position;
|
||||
|
||||
Field0 = reader.ReadUInt32();
|
||||
BucketCount = reader.ReadInt32();
|
||||
Size = reader.ReadInt64();
|
||||
BaseOffsets = new long[BucketCount];
|
||||
Buckets = new RelocationBucket[BucketCount];
|
||||
|
||||
for (int i = 0; i < BucketCount; i++)
|
||||
{
|
||||
BaseOffsets[i] = reader.ReadInt64();
|
||||
}
|
||||
|
||||
reader.BaseStream.Position = start + 0x4000;
|
||||
|
||||
for (int i = 0; i < BucketCount; i++)
|
||||
{
|
||||
Buckets[i] = new RelocationBucket(reader);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public class RelocationBucket
|
||||
{
|
||||
public int BucketNum;
|
||||
public int EntryCount;
|
||||
public long VirtualOffsetEnd;
|
||||
public RelocationEntry[] Entries;
|
||||
|
||||
public RelocationBucket(BinaryReader reader)
|
||||
{
|
||||
long start = reader.BaseStream.Position;
|
||||
|
||||
BucketNum = reader.ReadInt32();
|
||||
EntryCount = reader.ReadInt32();
|
||||
VirtualOffsetEnd = reader.ReadInt64();
|
||||
Entries = new RelocationEntry[EntryCount];
|
||||
|
||||
for (int i = 0; i < EntryCount; i++)
|
||||
{
|
||||
Entries[i] = new RelocationEntry(reader);
|
||||
}
|
||||
|
||||
reader.BaseStream.Position = start + 0x4000;
|
||||
}
|
||||
}
|
||||
|
||||
public class RelocationEntry
|
||||
{
|
||||
public long VirtOffset;
|
||||
public long VirtOffsetEnd;
|
||||
public long PhysOffset;
|
||||
public bool IsPatch;
|
||||
public RelocationEntry Next;
|
||||
|
||||
public RelocationEntry(BinaryReader reader)
|
||||
{
|
||||
VirtOffset = reader.ReadInt64();
|
||||
PhysOffset = reader.ReadInt64();
|
||||
IsPatch = reader.ReadInt32() != 0;
|
||||
}
|
||||
}
|
||||
|
||||
public class AesSubsectionBlock
|
||||
{
|
||||
public uint Field0;
|
||||
public int BucketCount;
|
||||
public long Size;
|
||||
public long[] BaseOffsets;
|
||||
public AesSubsectionBucket[] Buckets;
|
||||
|
||||
public AesSubsectionBlock(BinaryReader reader)
|
||||
{
|
||||
long start = reader.BaseStream.Position;
|
||||
|
||||
Field0 = reader.ReadUInt32();
|
||||
BucketCount = reader.ReadInt32();
|
||||
Size = reader.ReadInt64();
|
||||
BaseOffsets = new long[BucketCount];
|
||||
Buckets = new AesSubsectionBucket[BucketCount];
|
||||
|
||||
for (int i = 0; i < BucketCount; i++)
|
||||
{
|
||||
BaseOffsets[i] = reader.ReadInt64();
|
||||
}
|
||||
|
||||
reader.BaseStream.Position = start + 0x4000;
|
||||
|
||||
for (int i = 0; i < BucketCount; i++)
|
||||
{
|
||||
Buckets[i] = new AesSubsectionBucket(reader);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public class AesSubsectionBucket
|
||||
{
|
||||
public int BucketNum;
|
||||
public int EntryCount;
|
||||
public long VirtualOffsetEnd;
|
||||
public AesSubsectionEntry[] Entries;
|
||||
public AesSubsectionBucket(BinaryReader reader)
|
||||
{
|
||||
long start = reader.BaseStream.Position;
|
||||
|
||||
BucketNum = reader.ReadInt32();
|
||||
EntryCount = reader.ReadInt32();
|
||||
VirtualOffsetEnd = reader.ReadInt64();
|
||||
Entries = new AesSubsectionEntry[EntryCount];
|
||||
|
||||
for (int i = 0; i < EntryCount; i++)
|
||||
{
|
||||
Entries[i] = new AesSubsectionEntry(reader);
|
||||
}
|
||||
|
||||
reader.BaseStream.Position = start + 0x4000;
|
||||
}
|
||||
}
|
||||
|
||||
public class AesSubsectionEntry
|
||||
{
|
||||
public long Offset;
|
||||
public uint Field8;
|
||||
public uint Counter;
|
||||
|
||||
public AesSubsectionEntry Next;
|
||||
public long OffsetEnd;
|
||||
|
||||
public AesSubsectionEntry() { }
|
||||
|
||||
public AesSubsectionEntry(BinaryReader reader)
|
||||
{
|
||||
Offset = reader.ReadInt64();
|
||||
Field8 = reader.ReadUInt32();
|
||||
Counter = reader.ReadUInt32();
|
||||
}
|
||||
}
|
||||
}
|
@ -2,7 +2,7 @@
|
||||
using System.IO;
|
||||
using System.Numerics;
|
||||
using System.Security.Cryptography;
|
||||
using LibHac.Streams;
|
||||
using LibHac.IO;
|
||||
|
||||
namespace LibHac
|
||||
{
|
||||
@ -110,9 +110,9 @@ namespace LibHac
|
||||
Array.Copy(encryptedKey, 0x10, body, 0, 0x230);
|
||||
var dec = new byte[0x230];
|
||||
|
||||
using (var streamDec = new RandomAccessSectorStream(new Aes128CtrStream(new MemoryStream(body), kek, counter)))
|
||||
using (var storageDec = new Aes128CtrStorage(new MemoryStorage(body), kek, counter, false))
|
||||
{
|
||||
streamDec.Read(dec, 0, dec.Length);
|
||||
storageDec.Read(dec, 0);
|
||||
}
|
||||
|
||||
var d = new byte[0x100];
|
||||
@ -146,22 +146,31 @@ namespace LibHac
|
||||
}
|
||||
}
|
||||
|
||||
public static bool Rsa2048Pkcs1Verify(byte[] data, byte[] signature, byte[] modulus)
|
||||
public static Validity Rsa2048Pkcs1Verify(byte[] data, byte[] signature, byte[] modulus)
|
||||
{
|
||||
byte[] hash;
|
||||
using (SHA256 sha256 = SHA256.Create())
|
||||
using (RSA rsa = RSA.Create())
|
||||
{
|
||||
hash = sha256.ComputeHash(data);
|
||||
rsa.ImportParameters(new RSAParameters { Exponent = new byte[] { 1, 0, 1 }, Modulus = modulus });
|
||||
|
||||
return rsa.VerifyData(data, signature, HashAlgorithmName.SHA256, RSASignaturePadding.Pkcs1)
|
||||
? Validity.Valid
|
||||
: Validity.Invalid;
|
||||
}
|
||||
}
|
||||
|
||||
using (var rsa = new RSACryptoServiceProvider())
|
||||
public static Validity Rsa2048PssVerify(byte[] data, byte[] signature, byte[] modulus)
|
||||
{
|
||||
#if USE_RSA_CNG
|
||||
using (RSA rsa = new RSACng())
|
||||
#else
|
||||
using (RSA rsa = RSA.Create())
|
||||
#endif
|
||||
{
|
||||
rsa.ImportParameters(new RSAParameters() { Exponent = new byte[] { 1, 0, 1 }, Modulus = modulus });
|
||||
rsa.ImportParameters(new RSAParameters { Exponent = new byte[] { 1, 0, 1 }, Modulus = modulus });
|
||||
|
||||
var rsaFormatter = new RSAPKCS1SignatureDeformatter(rsa);
|
||||
rsaFormatter.SetHashAlgorithm("SHA256");
|
||||
|
||||
return rsaFormatter.VerifySignature(hash, signature);
|
||||
return rsa.VerifyData(data, signature, HashAlgorithmName.SHA256, RSASignaturePadding.Pss)
|
||||
? Validity.Valid
|
||||
: Validity.Invalid;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,161 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using LibHac.Streams;
|
||||
|
||||
namespace LibHac
|
||||
{
|
||||
public class HierarchicalIntegrityVerificationStream : Stream
|
||||
{
|
||||
public Stream[] Levels { get; }
|
||||
public Stream DataLevel { get; }
|
||||
public IntegrityCheckLevel IntegrityCheckLevel { get; }
|
||||
|
||||
/// <summary>
|
||||
/// An array of the hash statuses of every block in each level.
|
||||
/// </summary>
|
||||
public Validity[][] LevelValidities { get; }
|
||||
|
||||
private IntegrityVerificationStream[] IntegrityStreams { get; }
|
||||
|
||||
public HierarchicalIntegrityVerificationStream(IntegrityVerificationInfo[] levelInfo, IntegrityCheckLevel integrityCheckLevel)
|
||||
{
|
||||
Levels = new Stream[levelInfo.Length];
|
||||
IntegrityCheckLevel = integrityCheckLevel;
|
||||
LevelValidities = new Validity[levelInfo.Length - 1][];
|
||||
IntegrityStreams = new IntegrityVerificationStream[levelInfo.Length - 1];
|
||||
|
||||
Levels[0] = levelInfo[0].Data;
|
||||
|
||||
for (int i = 1; i < Levels.Length; i++)
|
||||
{
|
||||
var levelData = new IntegrityVerificationStream(levelInfo[i], Levels[i - 1], integrityCheckLevel);
|
||||
|
||||
Levels[i] = new RandomAccessSectorStream(levelData);
|
||||
LevelValidities[i - 1] = levelData.BlockValidities;
|
||||
IntegrityStreams[i - 1] = levelData;
|
||||
}
|
||||
|
||||
DataLevel = Levels[Levels.Length - 1];
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Checks the hashes of any unchecked blocks and returns the <see cref="Validity"/> of the data.
|
||||
/// </summary>
|
||||
/// <param name="returnOnError">If <see langword="true"/>, return as soon as an invalid block is found.</param>
|
||||
/// <param name="logger">An optional <see cref="IProgressReport"/> for reporting progress.</param>
|
||||
/// <returns>The <see cref="Validity"/> of the data of the specified hash level.</returns>
|
||||
public Validity Validate(bool returnOnError, IProgressReport logger = null)
|
||||
{
|
||||
Validity[] validities = LevelValidities[LevelValidities.Length - 1];
|
||||
IntegrityVerificationStream stream = IntegrityStreams[IntegrityStreams.Length - 1];
|
||||
|
||||
// Restore the original position of the stream when we're done validating
|
||||
long initialPosition = stream.Position;
|
||||
|
||||
long blockSize = stream.SectorSize;
|
||||
int blockCount = (int)Util.DivideByRoundUp(Length, blockSize);
|
||||
|
||||
var buffer = new byte[blockSize];
|
||||
var result = Validity.Valid;
|
||||
|
||||
logger?.SetTotal(blockCount);
|
||||
|
||||
for (int i = 0; i < blockCount; i++)
|
||||
{
|
||||
if (validities[i] == Validity.Unchecked)
|
||||
{
|
||||
stream.Position = blockSize * i;
|
||||
stream.Read(buffer, 0, buffer.Length, IntegrityCheckLevel.IgnoreOnInvalid);
|
||||
}
|
||||
|
||||
if (validities[i] == Validity.Invalid)
|
||||
{
|
||||
result = Validity.Invalid;
|
||||
if (returnOnError) break;
|
||||
}
|
||||
|
||||
logger?.ReportAdd(1);
|
||||
}
|
||||
|
||||
logger?.SetTotal(0);
|
||||
stream.Position = initialPosition;
|
||||
return result;
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
DataLevel.Flush();
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
switch (origin)
|
||||
{
|
||||
case SeekOrigin.Begin:
|
||||
Position = offset;
|
||||
break;
|
||||
case SeekOrigin.Current:
|
||||
Position += offset;
|
||||
break;
|
||||
case SeekOrigin.End:
|
||||
Position = Length - offset;
|
||||
break;
|
||||
}
|
||||
|
||||
return Position;
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
return DataLevel.Read(buffer, offset, count);
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
DataLevel.Write(buffer, offset, count);
|
||||
}
|
||||
|
||||
public override bool CanRead => DataLevel.CanRead;
|
||||
public override bool CanSeek => DataLevel.CanSeek;
|
||||
public override bool CanWrite => false;
|
||||
public override long Length => DataLevel.Length;
|
||||
public override long Position
|
||||
{
|
||||
get => DataLevel.Position;
|
||||
set => DataLevel.Position = value;
|
||||
}
|
||||
}
|
||||
|
||||
public static class HierarchicalIntegrityVerificationStreamExtensions
|
||||
{
|
||||
internal static void SetLevelValidities(this HierarchicalIntegrityVerificationStream stream, IvfcHeader header)
|
||||
{
|
||||
for (int i = 0; i < stream.Levels.Length - 1; i++)
|
||||
{
|
||||
Validity[] level = stream.LevelValidities[i];
|
||||
var levelValidity = Validity.Valid;
|
||||
|
||||
foreach (Validity block in level)
|
||||
{
|
||||
if (block == Validity.Invalid)
|
||||
{
|
||||
levelValidity = Validity.Invalid;
|
||||
break;
|
||||
}
|
||||
|
||||
if (block == Validity.Unchecked && levelValidity != Validity.Invalid)
|
||||
{
|
||||
levelValidity = Validity.Unchecked;
|
||||
}
|
||||
}
|
||||
|
||||
header.LevelHeaders[i].HashValidity = levelValidity;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
80
LibHac/IO/Aes128CtrExStorage.cs
Normal file
80
LibHac/IO/Aes128CtrExStorage.cs
Normal file
@ -0,0 +1,80 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Linq;
|
||||
|
||||
namespace LibHac.IO
|
||||
{
|
||||
public class Aes128CtrExStorage : Aes128CtrStorage
|
||||
{
|
||||
private List<AesSubsectionEntry> SubsectionEntries { get; }
|
||||
private List<long> SubsectionOffsets { get; }
|
||||
private BucketTree<AesSubsectionEntry> BucketTree { get; }
|
||||
|
||||
private readonly object _locker = new object();
|
||||
|
||||
public Aes128CtrExStorage(IStorage baseStorage, IStorage bucketTreeHeader, IStorage bucketTreeData, byte[] key, long counterOffset, byte[] ctrHi, bool leaveOpen)
|
||||
: base(baseStorage, key, counterOffset, ctrHi, leaveOpen)
|
||||
{
|
||||
BucketTree = new BucketTree<AesSubsectionEntry>(bucketTreeHeader, bucketTreeData);
|
||||
|
||||
SubsectionEntries = BucketTree.GetEntryList();
|
||||
SubsectionOffsets = SubsectionEntries.Select(x => x.Offset).ToList();
|
||||
}
|
||||
|
||||
protected override void ReadImpl(Span<byte> destination, long offset)
|
||||
{
|
||||
AesSubsectionEntry entry = GetSubsectionEntry(offset);
|
||||
|
||||
long inPos = offset;
|
||||
int outPos = 0;
|
||||
int remaining = destination.Length;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
int bytesToRead = (int)Math.Min(entry.OffsetEnd - inPos, remaining);
|
||||
|
||||
lock (_locker)
|
||||
{
|
||||
UpdateCounterSubsection(entry.Counter);
|
||||
base.ReadImpl(destination.Slice(outPos, bytesToRead), inPos);
|
||||
}
|
||||
|
||||
outPos += bytesToRead;
|
||||
inPos += bytesToRead;
|
||||
remaining -= bytesToRead;
|
||||
|
||||
if (remaining != 0 && inPos >= entry.OffsetEnd)
|
||||
{
|
||||
entry = entry.Next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected override void WriteImpl(ReadOnlySpan<byte> source, long offset)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override bool CanWrite => false;
|
||||
|
||||
private AesSubsectionEntry GetSubsectionEntry(long offset)
|
||||
{
|
||||
int index = SubsectionOffsets.BinarySearch(offset);
|
||||
if (index < 0) index = ~index - 1;
|
||||
return SubsectionEntries[index];
|
||||
}
|
||||
|
||||
private void UpdateCounterSubsection(uint value)
|
||||
{
|
||||
Counter[7] = (byte)value;
|
||||
Counter[6] = (byte)(value >> 8);
|
||||
Counter[5] = (byte)(value >> 16);
|
||||
Counter[4] = (byte)(value >> 24);
|
||||
}
|
||||
}
|
||||
}
|
107
LibHac/IO/Aes128CtrStorage.cs
Normal file
107
LibHac/IO/Aes128CtrStorage.cs
Normal file
@ -0,0 +1,107 @@
|
||||
using System;
|
||||
using System.Buffers;
|
||||
|
||||
namespace LibHac.IO
|
||||
{
|
||||
public class Aes128CtrStorage : SectorStorage
|
||||
{
|
||||
private const int BlockSize = 0x10;
|
||||
|
||||
private readonly long _counterOffset;
|
||||
private readonly Aes128CtrTransform _decryptor;
|
||||
|
||||
protected readonly byte[] Counter;
|
||||
private readonly object _locker = new object();
|
||||
|
||||
public Aes128CtrStorage(IStorage baseStorage, byte[] key, byte[] counter, bool leaveOpen)
|
||||
: base(baseStorage, BlockSize, leaveOpen)
|
||||
{
|
||||
if (key == null) throw new NullReferenceException(nameof(key));
|
||||
if (key.Length != BlockSize) throw new ArgumentException(nameof(key), $"Key must be {BlockSize} bytes long");
|
||||
if (counter == null) throw new NullReferenceException(nameof(counter));
|
||||
if (counter.Length != BlockSize) throw new ArgumentException(nameof(counter), $"Counter must be {BlockSize} bytes long");
|
||||
|
||||
// Make the stream seekable by remembering the initial counter value
|
||||
for (int i = 0; i < 8; i++)
|
||||
{
|
||||
_counterOffset |= (long)counter[0xF - i] << (4 + i * 8);
|
||||
}
|
||||
|
||||
_decryptor = new Aes128CtrTransform(key, counter);
|
||||
Counter = _decryptor.Counter;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new AES storage
|
||||
/// </summary>
|
||||
/// <param name="baseStorage">The input <see cref="IStorage"/>.</param>
|
||||
/// <param name="key">The decryption key.</param>
|
||||
/// <param name="counterOffset">Offset to add to the counter.</param>
|
||||
/// <param name="counterHi">The value of the upper 64 bits of the counter. Can be null.</param>
|
||||
/// <param name="leaveOpen"><see langword="true"/> to leave the storage open after the <see cref="Aes128CtrStorage"/> object is disposed; otherwise, <see langword="false"/>.</param>
|
||||
public Aes128CtrStorage(IStorage baseStorage, byte[] key, long counterOffset, byte[] counterHi, bool leaveOpen)
|
||||
: base(baseStorage, BlockSize, leaveOpen)
|
||||
{
|
||||
if (key == null) throw new NullReferenceException(nameof(key));
|
||||
if (key.Length != BlockSize) throw new ArgumentException(nameof(key), $"Key must be {BlockSize} bytes long");
|
||||
|
||||
var initialCounter = new byte[BlockSize];
|
||||
if (counterHi != null)
|
||||
{
|
||||
Array.Copy(counterHi, initialCounter, 8);
|
||||
}
|
||||
|
||||
_counterOffset = counterOffset;
|
||||
|
||||
_decryptor = new Aes128CtrTransform(key, initialCounter);
|
||||
Counter = _decryptor.Counter;
|
||||
}
|
||||
|
||||
protected override void ReadImpl(Span<byte> destination, long offset)
|
||||
{
|
||||
base.ReadImpl(destination, offset);
|
||||
|
||||
lock (_locker)
|
||||
{
|
||||
UpdateCounter(_counterOffset + offset);
|
||||
_decryptor.TransformBlock(destination);
|
||||
}
|
||||
}
|
||||
|
||||
protected override void WriteImpl(ReadOnlySpan<byte> source, long offset)
|
||||
{
|
||||
byte[] encrypted = ArrayPool<byte>.Shared.Rent(source.Length);
|
||||
try
|
||||
{
|
||||
Span<byte> encryptedSpan = encrypted.AsSpan(0, source.Length);
|
||||
source.CopyTo(encryptedSpan);
|
||||
|
||||
lock (_locker)
|
||||
{
|
||||
UpdateCounter(_counterOffset + offset);
|
||||
_decryptor.TransformBlock(encryptedSpan);
|
||||
}
|
||||
|
||||
base.WriteImpl(encryptedSpan, offset);
|
||||
}
|
||||
finally
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(encrypted);
|
||||
}
|
||||
}
|
||||
|
||||
private void UpdateCounter(long offset)
|
||||
{
|
||||
ulong off = (ulong)offset >> 4;
|
||||
for (uint j = 0; j < 0x7; j++)
|
||||
{
|
||||
Counter[0x10 - j - 1] = (byte)(off & 0xFF);
|
||||
off >>= 8;
|
||||
}
|
||||
|
||||
// Because the value stored in the counter is offset >> 4, the top 4 bits
|
||||
// of byte 8 need to have their original value preserved
|
||||
Counter[8] = (byte)((Counter[8] & 0xF0) | (int)(off & 0x0F));
|
||||
}
|
||||
}
|
||||
}
|
74
LibHac/IO/Aes128CtrTransform.cs
Normal file
74
LibHac/IO/Aes128CtrTransform.cs
Normal file
@ -0,0 +1,74 @@
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.Buffers.Binary;
|
||||
using System.Runtime.InteropServices;
|
||||
using System.Security.Cryptography;
|
||||
|
||||
namespace LibHac.IO
|
||||
{
|
||||
public class Aes128CtrTransform
|
||||
{
|
||||
private const int BlockSize = 128;
|
||||
private const int BlockSizeBytes = BlockSize / 8;
|
||||
|
||||
public readonly byte[] Counter = new byte[BlockSizeBytes];
|
||||
|
||||
private readonly ICryptoTransform _encryptor;
|
||||
|
||||
public Aes128CtrTransform(byte[] key, byte[] counter)
|
||||
{
|
||||
if (key == null) throw new ArgumentNullException(nameof(key));
|
||||
if (counter == null) throw new ArgumentNullException(nameof(counter));
|
||||
if (key.Length != BlockSizeBytes)
|
||||
throw new ArgumentException($"{nameof(key)} must be {BlockSizeBytes} bytes long");
|
||||
if (counter.Length != BlockSizeBytes)
|
||||
throw new ArgumentException($"{nameof(counter)} must be {BlockSizeBytes} bytes long");
|
||||
|
||||
Aes aes = Aes.Create();
|
||||
if (aes == null) throw new CryptographicException("Unable to create AES object");
|
||||
aes.Mode = CipherMode.ECB;
|
||||
aes.Padding = PaddingMode.None;
|
||||
|
||||
_encryptor = aes.CreateEncryptor(key, new byte[BlockSizeBytes]);
|
||||
|
||||
Array.Copy(counter, Counter, BlockSizeBytes);
|
||||
}
|
||||
|
||||
public int TransformBlock(Span<byte> data)
|
||||
{
|
||||
int blockCount = Util.DivideByRoundUp(data.Length, BlockSizeBytes);
|
||||
int length = blockCount * BlockSizeBytes;
|
||||
|
||||
byte[] counterXor = ArrayPool<byte>.Shared.Rent(length);
|
||||
try
|
||||
{
|
||||
Counter.CopyTo(counterXor, 0);
|
||||
FillDecryptedCounter(counterXor.AsSpan(0, length));
|
||||
|
||||
_encryptor.TransformBlock(counterXor, 0, length, counterXor, 0);
|
||||
Util.XorArrays(data, counterXor);
|
||||
}
|
||||
finally
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(counterXor);
|
||||
}
|
||||
|
||||
return data.Length;
|
||||
}
|
||||
|
||||
public static void FillDecryptedCounter(Span<byte> buffer)
|
||||
{
|
||||
Span<ulong> bufL = MemoryMarshal.Cast<byte, ulong>(buffer);
|
||||
|
||||
ulong hi = bufL[0];
|
||||
ulong lo = BinaryPrimitives.ReverseEndianness(bufL[1]);
|
||||
|
||||
for (int i = 2; i < bufL.Length; i += 2)
|
||||
{
|
||||
lo++;
|
||||
bufL[i] = hi;
|
||||
bufL[i + 1] = BinaryPrimitives.ReverseEndianness(lo);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
76
LibHac/IO/Aes128XtsStorage.cs
Normal file
76
LibHac/IO/Aes128XtsStorage.cs
Normal file
@ -0,0 +1,76 @@
|
||||
using System;
|
||||
|
||||
namespace LibHac.IO
|
||||
{
|
||||
public class Aes128XtsStorage : SectorStorage
|
||||
{
|
||||
private const int BlockSize = 0x10;
|
||||
|
||||
private readonly byte[] _tempBuffer;
|
||||
|
||||
private Aes128XtsTransform _decryptor;
|
||||
private Aes128XtsTransform _encryptor;
|
||||
|
||||
private readonly byte[] _key1;
|
||||
private readonly byte[] _key2;
|
||||
|
||||
public Aes128XtsStorage(IStorage baseStorage, Span<byte> key, int sectorSize, bool leaveOpen)
|
||||
: base(baseStorage, sectorSize, leaveOpen)
|
||||
{
|
||||
if (key == null) throw new NullReferenceException(nameof(key));
|
||||
if (key.Length != BlockSize * 2) throw new ArgumentException(nameof(key), $"Key must be {BlockSize * 2} bytes long");
|
||||
|
||||
_tempBuffer = new byte[sectorSize];
|
||||
_key1 = key.Slice(0, BlockSize).ToArray();
|
||||
_key2 = key.Slice(BlockSize, BlockSize).ToArray();
|
||||
|
||||
Length = baseStorage.Length;
|
||||
}
|
||||
|
||||
public Aes128XtsStorage(IStorage baseStorage, Span<byte> key1, Span<byte> key2, int sectorSize, bool leaveOpen)
|
||||
: base(baseStorage, sectorSize, leaveOpen)
|
||||
{
|
||||
if (key1 == null) throw new NullReferenceException(nameof(key1));
|
||||
if (key2 == null) throw new NullReferenceException(nameof(key2));
|
||||
if (key1.Length != BlockSize || key1.Length != BlockSize) throw new ArgumentException($"Keys must be {BlockSize} bytes long");
|
||||
|
||||
_tempBuffer = new byte[sectorSize];
|
||||
_key1 = key1.ToArray();
|
||||
_key2 = key2.ToArray();
|
||||
|
||||
Length = baseStorage.Length;
|
||||
}
|
||||
|
||||
public override long Length { get; }
|
||||
protected override void ReadImpl(Span<byte> destination, long offset)
|
||||
{
|
||||
int size = destination.Length;
|
||||
long sectorIndex = offset / SectorSize;
|
||||
|
||||
if (_decryptor == null) _decryptor = new Aes128XtsTransform(_key1, _key2, true);
|
||||
|
||||
base.ReadImpl(_tempBuffer.AsSpan(0, size), offset);
|
||||
|
||||
_decryptor.TransformBlock(_tempBuffer, 0, size, (ulong)sectorIndex);
|
||||
_tempBuffer.AsSpan(0, size).CopyTo(destination);
|
||||
}
|
||||
|
||||
protected override void WriteImpl(ReadOnlySpan<byte> source, long offset)
|
||||
{
|
||||
int size = source.Length;
|
||||
long sectorIndex = offset / SectorSize;
|
||||
|
||||
if (_encryptor == null) _encryptor = new Aes128XtsTransform(_key1, _key2, false);
|
||||
|
||||
source.CopyTo(_tempBuffer);
|
||||
_encryptor.TransformBlock(_tempBuffer, 0, size, (ulong)sectorIndex);
|
||||
|
||||
base.WriteImpl(_tempBuffer.AsSpan(0, size), offset);
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
BaseStorage.Flush();
|
||||
}
|
||||
}
|
||||
}
|
238
LibHac/IO/Aes128XtsTransform.cs
Normal file
238
LibHac/IO/Aes128XtsTransform.cs
Normal file
@ -0,0 +1,238 @@
|
||||
// Copyright (c) 2010 Gareth Lennox (garethl@dwakn.com)
|
||||
// All rights reserved.
|
||||
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of Gareth Lennox nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from this
|
||||
// software without specific prior written permission.
|
||||
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.Runtime.InteropServices;
|
||||
using System.Security.Cryptography;
|
||||
|
||||
namespace LibHac.IO
|
||||
{
|
||||
public class Aes128XtsTransform
|
||||
{
|
||||
private const int BlockSize = 128;
|
||||
private const int BlockSizeBytes = BlockSize / 8;
|
||||
|
||||
private readonly byte[] _cc = new byte[16];
|
||||
private readonly bool _decrypting;
|
||||
private readonly ICryptoTransform _key1;
|
||||
private readonly ICryptoTransform _key2;
|
||||
|
||||
private readonly byte[] _pp = new byte[16];
|
||||
private readonly byte[] _t = new byte[16];
|
||||
|
||||
public Aes128XtsTransform(byte[] key1, byte[] key2, bool decrypting)
|
||||
{
|
||||
if (key1?.Length != BlockSizeBytes || key2?.Length != BlockSizeBytes)
|
||||
throw new ArgumentException($"Each key must be {BlockSizeBytes} bytes long");
|
||||
|
||||
Aes aes = Aes.Create();
|
||||
if (aes == null) throw new CryptographicException("Unable to create AES object");
|
||||
aes.Mode = CipherMode.ECB;
|
||||
aes.Padding = PaddingMode.None;
|
||||
|
||||
_decrypting = decrypting;
|
||||
|
||||
if (decrypting)
|
||||
{
|
||||
_key1 = aes.CreateDecryptor(key1, new byte[BlockSizeBytes]);
|
||||
_key2 = aes.CreateEncryptor(key2, new byte[BlockSizeBytes]);
|
||||
}
|
||||
else
|
||||
{
|
||||
_key1 = aes.CreateEncryptor(key1, new byte[BlockSizeBytes]);
|
||||
_key2 = aes.CreateEncryptor(key2, new byte[BlockSizeBytes]);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Transforms a single block.
|
||||
/// </summary>
|
||||
/// <param name="buffer"> The input for which to compute the transform.</param>
|
||||
/// <param name="offset">The offset into the byte array from which to begin using data.</param>
|
||||
/// <param name="count">The number of bytes in the byte array to use as data.</param>
|
||||
/// <param name="sector">The sector number of the block</param>
|
||||
/// <returns>The number of bytes written.</returns>
|
||||
public int TransformBlock(byte[] buffer, int offset, int count, ulong sector)
|
||||
{
|
||||
int lim;
|
||||
|
||||
/* get number of blocks */
|
||||
int m = count >> 4;
|
||||
int mo = count & 15;
|
||||
int alignedCount = Util.AlignUp(count, BlockSizeBytes);
|
||||
|
||||
/* for i = 0 to m-2 do */
|
||||
if (mo == 0)
|
||||
lim = m;
|
||||
else
|
||||
lim = m - 1;
|
||||
|
||||
byte[] tweak = ArrayPool<byte>.Shared.Rent(alignedCount);
|
||||
try
|
||||
{
|
||||
FillArrayFromSector(tweak, sector);
|
||||
|
||||
/* encrypt the tweak */
|
||||
_key2.TransformBlock(tweak, 0, 16, tweak, 0);
|
||||
|
||||
FillTweakBuffer(tweak.AsSpan(0, alignedCount));
|
||||
|
||||
if (lim > 0)
|
||||
{
|
||||
Util.XorArrays(buffer.AsSpan(offset, lim * 16), tweak);
|
||||
_key1.TransformBlock(buffer, offset, lim * 16, buffer, offset);
|
||||
Util.XorArrays(buffer.AsSpan(offset, lim * 16), tweak);
|
||||
}
|
||||
|
||||
if (mo > 0)
|
||||
{
|
||||
Buffer.BlockCopy(tweak, lim * 16, _t, 0, 16);
|
||||
|
||||
if (_decrypting)
|
||||
{
|
||||
Buffer.BlockCopy(tweak, lim * 16 + 16, _cc, 0, 16);
|
||||
|
||||
/* CC = tweak encrypt block m-1 */
|
||||
TweakCrypt(buffer, offset, _pp, 0, _cc);
|
||||
|
||||
/* Cm = first ptlen % 16 bytes of CC */
|
||||
int i;
|
||||
for (i = 0; i < mo; i++)
|
||||
{
|
||||
_cc[i] = buffer[16 + i + offset];
|
||||
buffer[16 + i + offset] = _pp[i];
|
||||
}
|
||||
|
||||
for (; i < 16; i++)
|
||||
{
|
||||
_cc[i] = _pp[i];
|
||||
}
|
||||
|
||||
/* Cm-1 = Tweak encrypt PP */
|
||||
TweakCrypt(_cc, 0, buffer, offset, _t);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* CC = tweak encrypt block m-1 */
|
||||
TweakCrypt(buffer, offset, _cc, 0, _t);
|
||||
|
||||
/* Cm = first ptlen % 16 bytes of CC */
|
||||
int i;
|
||||
for (i = 0; i < mo; i++)
|
||||
{
|
||||
_pp[i] = buffer[16 + i + offset];
|
||||
buffer[16 + i + offset] = _cc[i];
|
||||
}
|
||||
|
||||
for (; i < 16; i++)
|
||||
{
|
||||
_pp[i] = _cc[i];
|
||||
}
|
||||
|
||||
/* Cm-1 = Tweak encrypt PP */
|
||||
TweakCrypt(_pp, 0, buffer, offset, _t);
|
||||
}
|
||||
}
|
||||
}
|
||||
finally { ArrayPool<byte>.Shared.Return(tweak); }
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
private static void FillTweakBuffer(Span<byte> buffer)
|
||||
{
|
||||
Span<ulong> bufL = MemoryMarshal.Cast<byte, ulong>(buffer);
|
||||
|
||||
ulong a = bufL[1];
|
||||
ulong b = bufL[0];
|
||||
|
||||
for (int i = 2; i < bufL.Length; i += 2)
|
||||
{
|
||||
ulong tt = (ulong)((long)a >> 63) & 0x87;
|
||||
|
||||
a = (a << 1) | (b >> 63);
|
||||
b = (b << 1) ^ tt;
|
||||
|
||||
bufL[i + 1] = a;
|
||||
bufL[i] = b;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Fills a byte array from a sector number (little endian)
|
||||
/// </summary>
|
||||
/// <param name="value">The destination</param>
|
||||
/// <param name="sector">The sector number</param>
|
||||
private static void FillArrayFromSector(byte[] value, ulong sector)
|
||||
{
|
||||
for (int i = 0xF; i >= 0; i--)
|
||||
{
|
||||
value[i] = (byte)sector;
|
||||
sector >>= 8;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Performs the Xts TweakCrypt operation
|
||||
/// </summary>
|
||||
private void TweakCrypt(byte[] inputBuffer, int inputOffset, byte[] outputBuffer, int outputOffset, byte[] t)
|
||||
{
|
||||
for (int x = 0; x < 16; x++)
|
||||
{
|
||||
outputBuffer[x + outputOffset] = (byte)(inputBuffer[x + inputOffset] ^ t[x]);
|
||||
}
|
||||
|
||||
_key1.TransformBlock(outputBuffer, outputOffset, 16, outputBuffer, outputOffset);
|
||||
|
||||
for (int x = 0; x < 16; x++)
|
||||
{
|
||||
outputBuffer[x + outputOffset] = (byte)(outputBuffer[x + outputOffset] ^ t[x]);
|
||||
}
|
||||
|
||||
MultiplyByX(t);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Multiply by x
|
||||
/// </summary>
|
||||
/// <param name="i">The value to multiply by x (LFSR shift)</param>
|
||||
private static void MultiplyByX(byte[] i)
|
||||
{
|
||||
byte t = 0, tt = 0;
|
||||
|
||||
for (int x = 0; x < 16; x++)
|
||||
{
|
||||
tt = (byte)(i[x] >> 7);
|
||||
i[x] = (byte)(((i[x] << 1) | t) & 0xFF);
|
||||
t = tt;
|
||||
}
|
||||
|
||||
if (tt > 0)
|
||||
i[0] ^= 0x87;
|
||||
}
|
||||
}
|
||||
}
|
128
LibHac/IO/BucketTree.cs
Normal file
128
LibHac/IO/BucketTree.cs
Normal file
@ -0,0 +1,128 @@
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
|
||||
namespace LibHac.IO
|
||||
{
|
||||
public class BucketTree<T> where T : BucketTreeEntry<T>, new()
|
||||
{
|
||||
private const int BucketAlignment = 0x4000;
|
||||
public BucketTreeHeader Header { get; }
|
||||
public BucketTreeBucket<OffsetEntry> BucketOffsets { get; }
|
||||
public BucketTreeBucket<T>[] Buckets { get; }
|
||||
|
||||
public BucketTree(IStorage header, IStorage data)
|
||||
{
|
||||
Header = new BucketTreeHeader(header);
|
||||
var reader = new BinaryReader(data.AsStream());
|
||||
|
||||
BucketOffsets = new BucketTreeBucket<OffsetEntry>(reader);
|
||||
|
||||
Buckets = new BucketTreeBucket<T>[BucketOffsets.EntryCount];
|
||||
|
||||
for (int i = 0; i < BucketOffsets.EntryCount; i++)
|
||||
{
|
||||
reader.BaseStream.Position = (i + 1) * BucketAlignment;
|
||||
Buckets[i] = new BucketTreeBucket<T>(reader);
|
||||
}
|
||||
}
|
||||
|
||||
public List<T> GetEntryList()
|
||||
{
|
||||
List<T> list = Buckets.SelectMany(x => x.Entries).ToList();
|
||||
|
||||
for (int i = 0; i < list.Count - 1; i++)
|
||||
{
|
||||
list[i].Next = list[i + 1];
|
||||
list[i].OffsetEnd = list[i + 1].Offset;
|
||||
}
|
||||
|
||||
list[list.Count - 1].OffsetEnd = BucketOffsets.OffsetEnd;
|
||||
|
||||
return list;
|
||||
}
|
||||
}
|
||||
|
||||
public class BucketTreeHeader
|
||||
{
|
||||
public string Magic;
|
||||
public int Version;
|
||||
public int NumEntries;
|
||||
public int Field1C;
|
||||
|
||||
public BucketTreeHeader(IStorage storage)
|
||||
{
|
||||
var reader = new BinaryReader(storage.AsStream());
|
||||
|
||||
Magic = reader.ReadAscii(4);
|
||||
Version = reader.ReadInt32();
|
||||
NumEntries = reader.ReadInt32();
|
||||
Field1C = reader.ReadInt32();
|
||||
}
|
||||
}
|
||||
|
||||
public class BucketTreeBucket<T> where T : BucketTreeEntry<T>, new()
|
||||
{
|
||||
public int Index;
|
||||
public int EntryCount;
|
||||
public long OffsetEnd;
|
||||
public T[] Entries;
|
||||
|
||||
public BucketTreeBucket(BinaryReader reader)
|
||||
{
|
||||
Index = reader.ReadInt32();
|
||||
EntryCount = reader.ReadInt32();
|
||||
OffsetEnd = reader.ReadInt64();
|
||||
Entries = new T[EntryCount];
|
||||
|
||||
for (int i = 0; i < EntryCount; i++)
|
||||
{
|
||||
Entries[i] = new T().Read(reader);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public abstract class BucketTreeEntry<T> where T : BucketTreeEntry<T>
|
||||
{
|
||||
public long Offset { get; set; }
|
||||
public long OffsetEnd { get; set; }
|
||||
public T Next { get; set; }
|
||||
|
||||
protected abstract void ReadSpecific(BinaryReader reader);
|
||||
internal T Read(BinaryReader reader)
|
||||
{
|
||||
Offset = reader.ReadInt64();
|
||||
ReadSpecific(reader);
|
||||
return (T)this;
|
||||
}
|
||||
}
|
||||
|
||||
public class OffsetEntry : BucketTreeEntry<OffsetEntry>
|
||||
{
|
||||
protected override void ReadSpecific(BinaryReader reader) { }
|
||||
}
|
||||
|
||||
public class AesSubsectionEntry : BucketTreeEntry<AesSubsectionEntry>
|
||||
{
|
||||
public uint Field8 { get; set; }
|
||||
public uint Counter { get; set; }
|
||||
|
||||
protected override void ReadSpecific(BinaryReader reader)
|
||||
{
|
||||
Field8 = reader.ReadUInt32();
|
||||
Counter = reader.ReadUInt32();
|
||||
}
|
||||
}
|
||||
|
||||
public class RelocationEntry : BucketTreeEntry<RelocationEntry>
|
||||
{
|
||||
public long SourceOffset { get; set; }
|
||||
public int SourceIndex { get; set; }
|
||||
|
||||
protected override void ReadSpecific(BinaryReader reader)
|
||||
{
|
||||
SourceOffset = reader.ReadInt64();
|
||||
SourceIndex = reader.ReadInt32();
|
||||
}
|
||||
}
|
||||
}
|
162
LibHac/IO/CachedStorage.cs
Normal file
162
LibHac/IO/CachedStorage.cs
Normal file
@ -0,0 +1,162 @@
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace LibHac.IO
|
||||
{
|
||||
public class CachedStorage : Storage
|
||||
{
|
||||
private IStorage BaseStorage { get; }
|
||||
private int BlockSize { get; }
|
||||
|
||||
private LinkedList<CacheBlock> Blocks { get; } = new LinkedList<CacheBlock>();
|
||||
private Dictionary<long, LinkedListNode<CacheBlock>> BlockDict { get; } = new Dictionary<long, LinkedListNode<CacheBlock>>();
|
||||
|
||||
public CachedStorage(IStorage baseStorage, int blockSize, int cacheSize, bool leaveOpen)
|
||||
{
|
||||
BaseStorage = baseStorage;
|
||||
BlockSize = blockSize;
|
||||
Length = BaseStorage.Length;
|
||||
|
||||
if (!leaveOpen) ToDispose.Add(BaseStorage);
|
||||
|
||||
for (int i = 0; i < cacheSize; i++)
|
||||
{
|
||||
var block = new CacheBlock { Buffer = ArrayPool<byte>.Shared.Rent(blockSize) };
|
||||
Blocks.AddLast(block);
|
||||
}
|
||||
}
|
||||
|
||||
public CachedStorage(SectorStorage baseStorage, int cacheSize, bool leaveOpen)
|
||||
: this(baseStorage, baseStorage.SectorSize, cacheSize, leaveOpen) { }
|
||||
|
||||
protected override void ReadImpl(Span<byte> destination, long offset)
|
||||
{
|
||||
long remaining = destination.Length;
|
||||
long inOffset = offset;
|
||||
int outOffset = 0;
|
||||
|
||||
lock (Blocks)
|
||||
{
|
||||
while (remaining > 0)
|
||||
{
|
||||
long blockIndex = inOffset / BlockSize;
|
||||
int blockPos = (int)(inOffset % BlockSize);
|
||||
CacheBlock block = GetBlock(blockIndex);
|
||||
|
||||
int bytesToRead = (int)Math.Min(remaining, BlockSize - blockPos);
|
||||
|
||||
block.Buffer.AsSpan(blockPos, bytesToRead).CopyTo(destination.Slice(outOffset));
|
||||
|
||||
outOffset += bytesToRead;
|
||||
inOffset += bytesToRead;
|
||||
remaining -= bytesToRead;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected override void WriteImpl(ReadOnlySpan<byte> source, long offset)
|
||||
{
|
||||
long remaining = source.Length;
|
||||
long inOffset = offset;
|
||||
int outOffset = 0;
|
||||
|
||||
lock (Blocks)
|
||||
{
|
||||
while (remaining > 0)
|
||||
{
|
||||
long blockIndex = inOffset / BlockSize;
|
||||
int blockPos = (int)(inOffset % BlockSize);
|
||||
CacheBlock block = GetBlock(blockIndex);
|
||||
|
||||
int bytesToWrite = (int)Math.Min(remaining, BlockSize - blockPos);
|
||||
|
||||
source.Slice(outOffset, bytesToWrite).CopyTo(block.Buffer.AsSpan(blockPos));
|
||||
|
||||
block.Dirty = true;
|
||||
|
||||
outOffset += bytesToWrite;
|
||||
inOffset += bytesToWrite;
|
||||
remaining -= bytesToWrite;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
lock (Blocks)
|
||||
{
|
||||
foreach (CacheBlock cacheItem in Blocks)
|
||||
{
|
||||
FlushBlock(cacheItem);
|
||||
}
|
||||
}
|
||||
|
||||
BaseStorage.Flush();
|
||||
}
|
||||
|
||||
public override long Length { get; }
|
||||
|
||||
private CacheBlock GetBlock(long blockIndex)
|
||||
{
|
||||
if (BlockDict.TryGetValue(blockIndex, out LinkedListNode<CacheBlock> node))
|
||||
{
|
||||
if (Blocks.First != node)
|
||||
{
|
||||
Blocks.Remove(node);
|
||||
Blocks.AddFirst(node);
|
||||
}
|
||||
|
||||
return node.Value;
|
||||
}
|
||||
|
||||
node = Blocks.Last;
|
||||
FlushBlock(node.Value);
|
||||
|
||||
CacheBlock block = node.Value;
|
||||
Blocks.RemoveLast();
|
||||
BlockDict.Remove(block.Index);
|
||||
|
||||
FlushBlock(block);
|
||||
ReadBlock(block, blockIndex);
|
||||
|
||||
Blocks.AddFirst(node);
|
||||
BlockDict.Add(blockIndex, node);
|
||||
|
||||
return block;
|
||||
}
|
||||
|
||||
private void ReadBlock(CacheBlock block, long index)
|
||||
{
|
||||
long offset = index * BlockSize;
|
||||
int length = BlockSize;
|
||||
|
||||
if (Length != -1)
|
||||
{
|
||||
length = (int)Math.Min(Length - offset, length);
|
||||
}
|
||||
|
||||
BaseStorage.Read(block.Buffer, offset, length, 0);
|
||||
block.Length = length;
|
||||
block.Index = index;
|
||||
block.Dirty = false;
|
||||
}
|
||||
|
||||
private void FlushBlock(CacheBlock block)
|
||||
{
|
||||
if (!block.Dirty) return;
|
||||
|
||||
long offset = block.Index * BlockSize;
|
||||
BaseStorage.Write(block.Buffer, offset, block.Length, 0);
|
||||
block.Dirty = false;
|
||||
}
|
||||
|
||||
private class CacheBlock
|
||||
{
|
||||
public long Index { get; set; }
|
||||
public byte[] Buffer { get; set; }
|
||||
public int Length { get; set; }
|
||||
public bool Dirty { get; set; }
|
||||
}
|
||||
}
|
||||
}
|
115
LibHac/IO/ConcatenationStorage.cs
Normal file
115
LibHac/IO/ConcatenationStorage.cs
Normal file
@ -0,0 +1,115 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
|
||||
namespace LibHac.IO
|
||||
{
|
||||
public class ConcatenationStorage : Storage
|
||||
{
|
||||
private ConcatSource[] Sources { get; }
|
||||
public override long Length { get; }
|
||||
|
||||
public ConcatenationStorage(IList<IStorage> sources, bool leaveOpen)
|
||||
{
|
||||
Sources = new ConcatSource[sources.Count];
|
||||
if (!leaveOpen) ToDispose.AddRange(sources);
|
||||
|
||||
long length = 0;
|
||||
for (int i = 0; i < sources.Count; i++)
|
||||
{
|
||||
if (sources[i].Length < 0) throw new ArgumentException("Sources must have an explicit length.");
|
||||
Sources[i] = new ConcatSource(sources[i], length, sources[i].Length);
|
||||
length += sources[i].Length;
|
||||
}
|
||||
|
||||
Length = length;
|
||||
}
|
||||
|
||||
protected override void ReadImpl(Span<byte> destination, long offset)
|
||||
{
|
||||
long inPos = offset;
|
||||
int outPos = 0;
|
||||
int remaining = destination.Length;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
ConcatSource entry = FindSource(inPos);
|
||||
long sourcePos = inPos - entry.StartOffset;
|
||||
|
||||
int bytesToRead = (int)Math.Min(entry.EndOffset - inPos, remaining);
|
||||
entry.Storage.Read(destination.Slice(outPos, bytesToRead), sourcePos);
|
||||
|
||||
outPos += bytesToRead;
|
||||
inPos += bytesToRead;
|
||||
remaining -= bytesToRead;
|
||||
}
|
||||
}
|
||||
|
||||
protected override void WriteImpl(ReadOnlySpan<byte> source, long offset)
|
||||
{
|
||||
long inPos = offset;
|
||||
int outPos = 0;
|
||||
int remaining = source.Length;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
ConcatSource storage = FindSource(inPos);
|
||||
long sourcePos = inPos - storage.StartOffset;
|
||||
|
||||
int bytesToWrite = (int)Math.Min(storage.EndOffset - inPos, remaining);
|
||||
storage.Storage.Write(source.Slice(outPos, bytesToWrite), sourcePos);
|
||||
|
||||
outPos += bytesToWrite;
|
||||
inPos += bytesToWrite;
|
||||
remaining -= bytesToWrite;
|
||||
}
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
foreach (ConcatSource source in Sources)
|
||||
{
|
||||
source.Storage.Flush();
|
||||
}
|
||||
}
|
||||
|
||||
public override Storage Slice(long start, long length, bool leaveOpen)
|
||||
{
|
||||
ConcatSource startSource = FindSource(start);
|
||||
ConcatSource endSource = FindSource(start + length - 1);
|
||||
|
||||
if (startSource != endSource)
|
||||
{
|
||||
return base.Slice(start, length, leaveOpen);
|
||||
}
|
||||
|
||||
Storage storage = startSource.Storage.Slice(start - startSource.StartOffset, length, true);
|
||||
if (!leaveOpen) storage.ToDispose.Add(this);
|
||||
|
||||
return storage;
|
||||
}
|
||||
|
||||
private ConcatSource FindSource(long offset)
|
||||
{
|
||||
foreach (ConcatSource info in Sources)
|
||||
{
|
||||
if (info.EndOffset > offset) return info;
|
||||
}
|
||||
|
||||
throw new ArgumentOutOfRangeException(nameof(offset), offset, "The Storage does not contain this offset.");
|
||||
}
|
||||
|
||||
private class ConcatSource
|
||||
{
|
||||
public IStorage Storage { get; }
|
||||
public long StartOffset { get; }
|
||||
public long EndOffset { get; }
|
||||
|
||||
public ConcatSource(IStorage storage, long startOffset, long length)
|
||||
{
|
||||
Storage = storage;
|
||||
StartOffset = startOffset;
|
||||
EndOffset = startOffset + length;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
237
LibHac/IO/HierarchicalIntegrityVerificationStorage.cs
Normal file
237
LibHac/IO/HierarchicalIntegrityVerificationStorage.cs
Normal file
@ -0,0 +1,237 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
|
||||
namespace LibHac.IO
|
||||
{
|
||||
public class HierarchicalIntegrityVerificationStorage : Storage
|
||||
{
|
||||
public IStorage[] Levels { get; }
|
||||
public IStorage DataLevel { get; }
|
||||
public IntegrityCheckLevel IntegrityCheckLevel { get; }
|
||||
|
||||
/// <summary>
|
||||
/// An array of the hash statuses of every block in each level.
|
||||
/// </summary>
|
||||
public Validity[][] LevelValidities { get; }
|
||||
public override long Length { get; }
|
||||
|
||||
private IntegrityVerificationStorage[] IntegrityStorages { get; }
|
||||
|
||||
public HierarchicalIntegrityVerificationStorage(IntegrityVerificationInfo[] levelInfo, IntegrityCheckLevel integrityCheckLevel, bool leaveOpen)
|
||||
{
|
||||
Levels = new IStorage[levelInfo.Length];
|
||||
IntegrityCheckLevel = integrityCheckLevel;
|
||||
LevelValidities = new Validity[levelInfo.Length - 1][];
|
||||
IntegrityStorages = new IntegrityVerificationStorage[levelInfo.Length - 1];
|
||||
|
||||
Levels[0] = levelInfo[0].Data;
|
||||
|
||||
for (int i = 1; i < Levels.Length; i++)
|
||||
{
|
||||
var levelData = new IntegrityVerificationStorage(levelInfo[i], Levels[i - 1], integrityCheckLevel, leaveOpen);
|
||||
|
||||
Levels[i] = new CachedStorage(levelData, 4, leaveOpen);
|
||||
LevelValidities[i - 1] = levelData.BlockValidities;
|
||||
IntegrityStorages[i - 1] = levelData;
|
||||
}
|
||||
|
||||
DataLevel = Levels[Levels.Length - 1];
|
||||
Length = DataLevel.Length;
|
||||
|
||||
if (!leaveOpen) ToDispose.Add(DataLevel);
|
||||
}
|
||||
|
||||
public HierarchicalIntegrityVerificationStorage(IvfcHeader header, IStorage masterHash, IStorage data,
|
||||
IntegrityStorageType type, IntegrityCheckLevel integrityCheckLevel, bool leaveOpen)
|
||||
: this(header, ToStorageList(header, masterHash, data, leaveOpen), type, integrityCheckLevel, leaveOpen) { }
|
||||
|
||||
public HierarchicalIntegrityVerificationStorage(IvfcHeader header, IList<IStorage> levels,
|
||||
IntegrityStorageType type, IntegrityCheckLevel integrityCheckLevel, bool leaveOpen)
|
||||
: this(GetIvfcInfo(header, levels, type), integrityCheckLevel, leaveOpen) { }
|
||||
|
||||
private static List<IStorage> ToStorageList(IvfcHeader header, IStorage masterHash, IStorage data, bool leaveOpen)
|
||||
{
|
||||
var levels = new List<IStorage> { masterHash };
|
||||
|
||||
for (int i = 0; i < header.NumLevels - 1; i++)
|
||||
{
|
||||
IvfcLevelHeader level = header.LevelHeaders[i];
|
||||
levels.Add(data.Slice(level.Offset, level.Size, leaveOpen));
|
||||
}
|
||||
|
||||
return levels;
|
||||
}
|
||||
|
||||
private static IntegrityVerificationInfo[] GetIvfcInfo(IvfcHeader ivfc, IList<IStorage> levels, IntegrityStorageType type)
|
||||
{
|
||||
var initInfo = new IntegrityVerificationInfo[ivfc.NumLevels];
|
||||
|
||||
initInfo[0] = new IntegrityVerificationInfo
|
||||
{
|
||||
Data = levels[0],
|
||||
BlockSize = 0
|
||||
};
|
||||
|
||||
for (int i = 1; i < ivfc.NumLevels; i++)
|
||||
{
|
||||
initInfo[i] = new IntegrityVerificationInfo
|
||||
{
|
||||
Data = levels[i],
|
||||
BlockSize = 1 << ivfc.LevelHeaders[i - 1].BlockSizePower,
|
||||
Salt = new HMACSHA256(Encoding.ASCII.GetBytes(SaltSources[i - 1])).ComputeHash(ivfc.SaltSource),
|
||||
Type = type
|
||||
};
|
||||
}
|
||||
|
||||
return initInfo;
|
||||
}
|
||||
|
||||
protected override void ReadImpl(Span<byte> destination, long offset)
|
||||
{
|
||||
DataLevel.Read(destination, offset);
|
||||
}
|
||||
|
||||
protected override void WriteImpl(ReadOnlySpan<byte> source, long offset)
|
||||
{
|
||||
DataLevel.Write(source, offset);
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
DataLevel.Flush();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Checks the hashes of any unchecked blocks and returns the <see cref="Validity"/> of the data.
|
||||
/// </summary>
|
||||
/// <param name="returnOnError">If <see langword="true"/>, return as soon as an invalid block is found.</param>
|
||||
/// <param name="logger">An optional <see cref="IProgressReport"/> for reporting progress.</param>
|
||||
/// <returns>The <see cref="Validity"/> of the data of the specified hash level.</returns>
|
||||
public Validity Validate(bool returnOnError, IProgressReport logger = null)
|
||||
{
|
||||
Validity[] validities = LevelValidities[LevelValidities.Length - 1];
|
||||
IntegrityVerificationStorage storage = IntegrityStorages[IntegrityStorages.Length - 1];
|
||||
|
||||
long blockSize = storage.SectorSize;
|
||||
int blockCount = (int)Util.DivideByRoundUp(Length, blockSize);
|
||||
|
||||
var buffer = new byte[blockSize];
|
||||
var result = Validity.Valid;
|
||||
|
||||
logger?.SetTotal(blockCount);
|
||||
|
||||
for (int i = 0; i < blockCount; i++)
|
||||
{
|
||||
if (validities[i] == Validity.Unchecked)
|
||||
{
|
||||
int toRead = (int)Math.Min(storage.Length - blockSize * i, buffer.Length);
|
||||
storage.Read(buffer, blockSize * i, toRead, 0, IntegrityCheckLevel.IgnoreOnInvalid);
|
||||
}
|
||||
|
||||
if (validities[i] == Validity.Invalid)
|
||||
{
|
||||
result = Validity.Invalid;
|
||||
if (returnOnError) break;
|
||||
}
|
||||
|
||||
logger?.ReportAdd(1);
|
||||
}
|
||||
|
||||
logger?.SetTotal(0);
|
||||
return result;
|
||||
}
|
||||
|
||||
private static readonly string[] SaltSources =
|
||||
{
|
||||
"HierarchicalIntegrityVerificationStorage::Master",
|
||||
"HierarchicalIntegrityVerificationStorage::L1",
|
||||
"HierarchicalIntegrityVerificationStorage::L2",
|
||||
"HierarchicalIntegrityVerificationStorage::L3",
|
||||
"HierarchicalIntegrityVerificationStorage::L4",
|
||||
"HierarchicalIntegrityVerificationStorage::L5"
|
||||
};
|
||||
}
|
||||
|
||||
public static class HierarchicalIntegrityVerificationStorageExtensions
|
||||
{
|
||||
internal static void SetLevelValidities(this HierarchicalIntegrityVerificationStorage stream, IvfcHeader header)
|
||||
{
|
||||
for (int i = 0; i < stream.Levels.Length - 1; i++)
|
||||
{
|
||||
Validity[] level = stream.LevelValidities[i];
|
||||
var levelValidity = Validity.Valid;
|
||||
|
||||
foreach (Validity block in level)
|
||||
{
|
||||
if (block == Validity.Invalid)
|
||||
{
|
||||
levelValidity = Validity.Invalid;
|
||||
break;
|
||||
}
|
||||
|
||||
if (block == Validity.Unchecked && levelValidity != Validity.Invalid)
|
||||
{
|
||||
levelValidity = Validity.Unchecked;
|
||||
}
|
||||
}
|
||||
|
||||
header.LevelHeaders[i].HashValidity = levelValidity;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public class IvfcHeader
|
||||
{
|
||||
public string Magic;
|
||||
public int Version;
|
||||
public int MasterHashSize;
|
||||
public int NumLevels;
|
||||
public IvfcLevelHeader[] LevelHeaders = new IvfcLevelHeader[6];
|
||||
public byte[] SaltSource;
|
||||
public byte[] MasterHash;
|
||||
|
||||
public IvfcHeader() { }
|
||||
|
||||
public IvfcHeader(BinaryReader reader)
|
||||
{
|
||||
Magic = reader.ReadAscii(4);
|
||||
reader.BaseStream.Position += 2;
|
||||
Version = reader.ReadInt16();
|
||||
MasterHashSize = reader.ReadInt32();
|
||||
NumLevels = reader.ReadInt32();
|
||||
|
||||
for (int i = 0; i < LevelHeaders.Length; i++)
|
||||
{
|
||||
LevelHeaders[i] = new IvfcLevelHeader(reader);
|
||||
}
|
||||
|
||||
SaltSource = reader.ReadBytes(0x20);
|
||||
MasterHash = reader.ReadBytes(0x20);
|
||||
}
|
||||
|
||||
public IvfcHeader(IStorage storage) : this(new BinaryReader(storage.AsStream())) { }
|
||||
}
|
||||
|
||||
public class IvfcLevelHeader
|
||||
{
|
||||
public long Offset;
|
||||
public long Size;
|
||||
public int BlockSizePower;
|
||||
public uint Reserved;
|
||||
|
||||
public Validity HashValidity = Validity.Unchecked;
|
||||
|
||||
public IvfcLevelHeader() { }
|
||||
|
||||
public IvfcLevelHeader(BinaryReader reader)
|
||||
{
|
||||
Offset = reader.ReadInt64();
|
||||
Size = reader.ReadInt64();
|
||||
BlockSizePower = reader.ReadInt32();
|
||||
Reserved = reader.ReadUInt32();
|
||||
}
|
||||
}
|
||||
}
|
53
LibHac/IO/IStorage.cs
Normal file
53
LibHac/IO/IStorage.cs
Normal file
@ -0,0 +1,53 @@
|
||||
using System;
|
||||
|
||||
namespace LibHac.IO
|
||||
{
|
||||
public interface IStorage : IDisposable
|
||||
{
|
||||
/// <summary>
|
||||
/// Reads a sequence of bytes from the current <see cref="IStorage"/>.
|
||||
/// </summary>
|
||||
/// <param name="destination">The buffer where the read bytes will be stored.
|
||||
/// The number of bytes read will be equal to the length of the buffer.</param>
|
||||
/// <param name="offset">The offset in the <see cref="IStorage"/> to begin reading from.</param>
|
||||
void Read(Span<byte> destination, long offset);
|
||||
|
||||
/// <summary>
|
||||
/// Reads a sequence of bytes from the current <see cref="IStorage"/>.
|
||||
/// </summary>
|
||||
/// <param name="buffer">The buffer where the read bytes will be stored.</param>
|
||||
/// <param name="offset">The zero-based byte offset in <paramref name="buffer"/>
|
||||
/// at which to begin storing the data read from the current <see cref="IStorage"/>.</param>
|
||||
/// <param name="count">The number of bytes to be read from the <see cref="IStorage"/>.</param>
|
||||
/// <param name="bufferOffset">The offset in the <see cref="IStorage"/> to begin reading from.</param>
|
||||
void Read(byte[] buffer, long offset, int count, int bufferOffset);
|
||||
|
||||
/// <summary>
|
||||
/// Writes a sequence of bytes to the current <see cref="IStorage"/>.
|
||||
/// </summary>
|
||||
/// <param name="source">The buffer containing the bytes to be written.</param>
|
||||
/// <param name="offset">The offset in the <see cref="IStorage"/> to begin writing to.</param>
|
||||
void Write(ReadOnlySpan<byte> source, long offset);
|
||||
|
||||
/// <summary>
|
||||
/// Writes a sequence of bytes to the current <see cref="IStorage"/>.
|
||||
/// </summary>
|
||||
/// <param name="buffer"></param>
|
||||
/// <param name="offset">The zero-based byte offset in <paramref name="buffer"/>
|
||||
/// at which to begin begin copying bytes to the current <see cref="IStorage"/>.</param>
|
||||
/// <param name="count">The number of bytes to be written to the <see cref="IStorage"/>.</param>
|
||||
/// <param name="bufferOffset">The offset in the <see cref="IStorage"/> to begin writing to.</param>
|
||||
void Write(byte[] buffer, long offset, int count, int bufferOffset);
|
||||
|
||||
/// <summary>
|
||||
/// Causes any buffered data to be written to the underlying device.
|
||||
/// </summary>
|
||||
void Flush();
|
||||
|
||||
/// <summary>
|
||||
/// The length of the <see cref="IStorage"/>. -1 will be returned if
|
||||
/// the <see cref="IStorage"/> cannot be represented as a sequence of contiguous bytes.
|
||||
/// </summary>
|
||||
long Length { get; }
|
||||
}
|
||||
}
|
76
LibHac/IO/IndirectStorage.cs
Normal file
76
LibHac/IO/IndirectStorage.cs
Normal file
@ -0,0 +1,76 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Linq;
|
||||
|
||||
namespace LibHac.IO
|
||||
{
|
||||
public class IndirectStorage : Storage
|
||||
{
|
||||
private List<RelocationEntry> RelocationEntries { get; }
|
||||
private List<long> RelocationOffsets { get; }
|
||||
|
||||
private List<IStorage> Sources { get; } = new List<IStorage>();
|
||||
private BucketTree<RelocationEntry> BucketTree { get; }
|
||||
|
||||
public IndirectStorage(IStorage bucketTreeHeader, IStorage bucketTreeData, bool leaveOpen, params IStorage[] sources)
|
||||
{
|
||||
Sources.AddRange(sources);
|
||||
|
||||
if (!leaveOpen) ToDispose.AddRange(sources);
|
||||
|
||||
BucketTree = new BucketTree<RelocationEntry>(bucketTreeHeader, bucketTreeData);
|
||||
|
||||
RelocationEntries = BucketTree.GetEntryList();
|
||||
RelocationOffsets = RelocationEntries.Select(x => x.Offset).ToList();
|
||||
|
||||
Length = BucketTree.BucketOffsets.OffsetEnd;
|
||||
}
|
||||
|
||||
protected override void ReadImpl(Span<byte> destination, long offset)
|
||||
{
|
||||
RelocationEntry entry = GetRelocationEntry(offset);
|
||||
|
||||
long inPos = offset;
|
||||
int outPos = 0;
|
||||
int remaining = destination.Length;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
long entryPos = inPos - entry.Offset;
|
||||
|
||||
int bytesToRead = (int)Math.Min(entry.OffsetEnd - inPos, remaining);
|
||||
Sources[entry.SourceIndex].Read(destination.Slice(outPos, bytesToRead), entry.SourceOffset + entryPos);
|
||||
|
||||
outPos += bytesToRead;
|
||||
inPos += bytesToRead;
|
||||
remaining -= bytesToRead;
|
||||
|
||||
if (inPos >= entry.OffsetEnd)
|
||||
{
|
||||
entry = entry.Next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected override void WriteImpl(ReadOnlySpan<byte> source, long offset)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override bool CanWrite => false;
|
||||
|
||||
public override long Length { get; }
|
||||
|
||||
private RelocationEntry GetRelocationEntry(long offset)
|
||||
{
|
||||
int index = RelocationOffsets.BinarySearch(offset);
|
||||
if (index < 0) index = ~index - 1;
|
||||
return RelocationEntries[index];
|
||||
}
|
||||
}
|
||||
}
|
213
LibHac/IO/IntegrityVerificationStorage.cs
Normal file
213
LibHac/IO/IntegrityVerificationStorage.cs
Normal file
@ -0,0 +1,213 @@
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.IO;
|
||||
using System.Security.Cryptography;
|
||||
|
||||
namespace LibHac.IO
|
||||
{
|
||||
public class IntegrityVerificationStorage : SectorStorage
|
||||
{
|
||||
private const int DigestSize = 0x20;
|
||||
|
||||
private IStorage HashStorage { get; }
|
||||
public IntegrityCheckLevel IntegrityCheckLevel { get; }
|
||||
public Validity[] BlockValidities { get; }
|
||||
|
||||
private byte[] Salt { get; }
|
||||
private IntegrityStorageType Type { get; }
|
||||
|
||||
private readonly SHA256 _hash = SHA256.Create();
|
||||
private readonly object _locker = new object();
|
||||
|
||||
public IntegrityVerificationStorage(IntegrityVerificationInfo info, IStorage hashStorage,
|
||||
IntegrityCheckLevel integrityCheckLevel, bool leaveOpen)
|
||||
: base(info.Data, info.BlockSize, leaveOpen)
|
||||
{
|
||||
HashStorage = hashStorage;
|
||||
IntegrityCheckLevel = integrityCheckLevel;
|
||||
Salt = info.Salt;
|
||||
Type = info.Type;
|
||||
|
||||
BlockValidities = new Validity[SectorCount];
|
||||
}
|
||||
|
||||
private void ReadImpl(Span<byte> destination, long offset, IntegrityCheckLevel integrityCheckLevel)
|
||||
{
|
||||
int count = destination.Length;
|
||||
|
||||
if (count < 0 || count > SectorSize)
|
||||
throw new ArgumentOutOfRangeException(nameof(destination), "Length is invalid.");
|
||||
|
||||
Span<byte> hashBuffer = stackalloc byte[DigestSize];
|
||||
long blockIndex = offset / SectorSize;
|
||||
long hashPos = blockIndex * DigestSize;
|
||||
|
||||
if (BlockValidities[blockIndex] == Validity.Invalid && integrityCheckLevel == IntegrityCheckLevel.ErrorOnInvalid)
|
||||
{
|
||||
throw new InvalidDataException("Hash error!");
|
||||
}
|
||||
|
||||
HashStorage.Read(hashBuffer, hashPos);
|
||||
|
||||
if (Type == IntegrityStorageType.Save && Util.IsEmpty(hashBuffer))
|
||||
{
|
||||
destination.Clear();
|
||||
BlockValidities[blockIndex] = Validity.Valid;
|
||||
return;
|
||||
}
|
||||
|
||||
byte[] dataBuffer = ArrayPool<byte>.Shared.Rent(SectorSize);
|
||||
try
|
||||
{
|
||||
BaseStorage.Read(dataBuffer, offset, count, 0);
|
||||
dataBuffer.AsSpan(0, count).CopyTo(destination);
|
||||
|
||||
if (integrityCheckLevel == IntegrityCheckLevel.None) return;
|
||||
if (BlockValidities[blockIndex] != Validity.Unchecked) return;
|
||||
|
||||
int bytesToHash = SectorSize;
|
||||
|
||||
if (count < SectorSize)
|
||||
{
|
||||
// Pad out unused portion of block
|
||||
Array.Clear(dataBuffer, count, SectorSize - count);
|
||||
|
||||
// Partition FS hashes don't pad out an incomplete block
|
||||
if (Type == IntegrityStorageType.PartitionFs)
|
||||
{
|
||||
bytesToHash = count;
|
||||
}
|
||||
}
|
||||
|
||||
byte[] hash = DoHash(dataBuffer, 0, bytesToHash);
|
||||
|
||||
Validity validity = Util.SpansEqual(hashBuffer, hash) ? Validity.Valid : Validity.Invalid;
|
||||
BlockValidities[blockIndex] = validity;
|
||||
|
||||
if (validity == Validity.Invalid && integrityCheckLevel == IntegrityCheckLevel.ErrorOnInvalid)
|
||||
{
|
||||
throw new InvalidDataException("Hash error!");
|
||||
}
|
||||
}
|
||||
finally
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(dataBuffer);
|
||||
}
|
||||
}
|
||||
|
||||
protected override void ReadImpl(Span<byte> destination, long offset)
|
||||
{
|
||||
ReadImpl(destination, offset, IntegrityCheckLevel);
|
||||
}
|
||||
|
||||
public void Read(Span<byte> destination, long offset, IntegrityCheckLevel integrityCheckLevel)
|
||||
{
|
||||
ValidateSpanParameters(destination, offset);
|
||||
ReadImpl(destination, offset, integrityCheckLevel);
|
||||
}
|
||||
|
||||
public void Read(byte[] buffer, long offset, int count, int bufferOffset, IntegrityCheckLevel integrityCheckLevel)
|
||||
{
|
||||
ValidateArrayParameters(buffer, offset, count, bufferOffset);
|
||||
ReadImpl(buffer.AsSpan(bufferOffset, count), offset, integrityCheckLevel);
|
||||
}
|
||||
|
||||
protected override void WriteImpl(ReadOnlySpan<byte> source, long offset)
|
||||
{
|
||||
long blockIndex = offset / SectorSize;
|
||||
long hashPos = blockIndex * DigestSize;
|
||||
|
||||
int toWrite = (int)Math.Min(source.Length, Length - offset);
|
||||
|
||||
byte[] dataBuffer = ArrayPool<byte>.Shared.Rent(SectorSize);
|
||||
try
|
||||
{
|
||||
source.CopyTo(dataBuffer);
|
||||
byte[] hash = DoHash(dataBuffer, 0, toWrite);
|
||||
|
||||
if (Type == IntegrityStorageType.Save && source.IsEmpty())
|
||||
{
|
||||
Array.Clear(hash, 0, DigestSize);
|
||||
}
|
||||
|
||||
BaseStorage.Write(source, offset);
|
||||
|
||||
HashStorage.Write(hash, hashPos);
|
||||
BlockValidities[blockIndex] = Validity.Unchecked;
|
||||
}
|
||||
finally
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(dataBuffer);
|
||||
}
|
||||
}
|
||||
|
||||
private byte[] DoHash(byte[] buffer, int offset, int count)
|
||||
{
|
||||
lock (_locker)
|
||||
{
|
||||
_hash.Initialize();
|
||||
|
||||
if (Type == IntegrityStorageType.Save)
|
||||
{
|
||||
_hash.TransformBlock(Salt, 0, Salt.Length, null, 0);
|
||||
}
|
||||
|
||||
_hash.TransformBlock(buffer, offset, count, null, 0);
|
||||
_hash.TransformFinalBlock(buffer, 0, 0);
|
||||
|
||||
byte[] hash = _hash.Hash;
|
||||
|
||||
if (Type == IntegrityStorageType.Save)
|
||||
{
|
||||
// This bit is set on all save hashes
|
||||
hash[0x1F] |= 0x80;
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
HashStorage.Flush();
|
||||
base.Flush();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Information for creating an <see cref="IntegrityVerificationStorage"/>
|
||||
/// </summary>
|
||||
public class IntegrityVerificationInfo
|
||||
{
|
||||
public IStorage Data { get; set; }
|
||||
public int BlockSize { get; set; }
|
||||
public byte[] Salt { get; set; }
|
||||
public IntegrityStorageType Type { get; set; }
|
||||
}
|
||||
|
||||
public enum IntegrityStorageType
|
||||
{
|
||||
Save,
|
||||
RomFs,
|
||||
PartitionFs
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Represents the level of integrity checks to be performed.
|
||||
/// </summary>
|
||||
public enum IntegrityCheckLevel
|
||||
{
|
||||
/// <summary>
|
||||
/// No integrity checks will be performed.
|
||||
/// </summary>
|
||||
None,
|
||||
/// <summary>
|
||||
/// Invalid blocks will be marked as invalid when read, and will not cause an error.
|
||||
/// </summary>
|
||||
IgnoreOnInvalid,
|
||||
/// <summary>
|
||||
/// An <see cref="InvalidDataException"/> will be thrown if an integrity check fails.
|
||||
/// </summary>
|
||||
ErrorOnInvalid
|
||||
}
|
||||
}
|
41
LibHac/IO/MemoryStorage.cs
Normal file
41
LibHac/IO/MemoryStorage.cs
Normal file
@ -0,0 +1,41 @@
|
||||
using System;
|
||||
|
||||
namespace LibHac.IO
|
||||
{
|
||||
public class MemoryStorage : Storage
|
||||
{
|
||||
private byte[] Buffer { get; }
|
||||
private int Start { get; }
|
||||
|
||||
public MemoryStorage(byte[] buffer) : this(buffer, 0, buffer.Length)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
public MemoryStorage(byte[] buffer, int index, int count)
|
||||
{
|
||||
if (buffer == null) throw new NullReferenceException(nameof(buffer));
|
||||
if (index < 0) throw new ArgumentOutOfRangeException(nameof(index), "Value must be non-negative.");
|
||||
if (count < 0) throw new ArgumentOutOfRangeException(nameof(count), "Value must be non-negative.");
|
||||
if (buffer.Length - index < count) throw new ArgumentException("Length, index and count parameters are invalid.");
|
||||
|
||||
Buffer = buffer;
|
||||
Start = index;
|
||||
Length = count;
|
||||
}
|
||||
|
||||
protected override void ReadImpl(Span<byte> destination, long offset)
|
||||
{
|
||||
Buffer.AsSpan((int)(Start + offset), destination.Length).CopyTo(destination);
|
||||
}
|
||||
|
||||
protected override void WriteImpl(ReadOnlySpan<byte> source, long offset)
|
||||
{
|
||||
source.CopyTo(Buffer.AsSpan((int)(Start + offset), source.Length));
|
||||
}
|
||||
|
||||
public override void Flush() { }
|
||||
|
||||
public override long Length { get; }
|
||||
}
|
||||
}
|
27
LibHac/IO/NullStorage.cs
Normal file
27
LibHac/IO/NullStorage.cs
Normal file
@ -0,0 +1,27 @@
|
||||
using System;
|
||||
|
||||
namespace LibHac.IO
|
||||
{
|
||||
/// <summary>
|
||||
/// An <see cref="IStorage"/> that returns all zeros when read, and does nothing on write.
|
||||
/// </summary>
|
||||
public class NullStorage : Storage
|
||||
{
|
||||
public NullStorage() { }
|
||||
public NullStorage(long length) => Length = length;
|
||||
|
||||
public override long Length { get; }
|
||||
protected override void ReadImpl(Span<byte> destination, long offset)
|
||||
{
|
||||
destination.Clear();
|
||||
}
|
||||
|
||||
protected override void WriteImpl(ReadOnlySpan<byte> source, long offset)
|
||||
{
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
}
|
||||
}
|
||||
}
|
93
LibHac/IO/Save/AllocationTable.cs
Normal file
93
LibHac/IO/Save/AllocationTable.cs
Normal file
@ -0,0 +1,93 @@
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.IO.Save
|
||||
{
|
||||
public class AllocationTable
|
||||
{
|
||||
private IStorage BaseStorage { get; }
|
||||
private IStorage HeaderStorage { get; }
|
||||
|
||||
public AllocationTableEntry[] Entries { get; }
|
||||
public AllocationTableHeader Header { get; }
|
||||
|
||||
public AllocationTable(IStorage storage, IStorage header)
|
||||
{
|
||||
BaseStorage = storage;
|
||||
HeaderStorage = header;
|
||||
Header = new AllocationTableHeader(HeaderStorage);
|
||||
|
||||
Stream tableStream = storage.AsStream();
|
||||
int blockCount = (int)(Header.AllocationTableBlockCount);
|
||||
|
||||
Entries = new AllocationTableEntry[blockCount];
|
||||
tableStream.Position = 0;
|
||||
var reader = new BinaryReader(tableStream);
|
||||
|
||||
for (int i = 0; i < blockCount; i++)
|
||||
{
|
||||
int parent = reader.ReadInt32();
|
||||
int child = reader.ReadInt32();
|
||||
|
||||
Entries[i] = new AllocationTableEntry { Next = child, Prev = parent };
|
||||
}
|
||||
}
|
||||
|
||||
public IStorage GetBaseStorage() => BaseStorage.WithAccess(FileAccess.Read);
|
||||
public IStorage GetHeaderStorage() => HeaderStorage.WithAccess(FileAccess.Read);
|
||||
}
|
||||
|
||||
public class AllocationTableEntry
|
||||
{
|
||||
public int Prev { get; set; }
|
||||
public int Next { get; set; }
|
||||
|
||||
public bool IsListStart()
|
||||
{
|
||||
return Prev == int.MinValue;
|
||||
}
|
||||
|
||||
public bool IsListEnd()
|
||||
{
|
||||
return (Next & 0x7FFFFFFF) == 0;
|
||||
}
|
||||
|
||||
public bool IsMultiBlockSegment()
|
||||
{
|
||||
return Next < 0;
|
||||
}
|
||||
|
||||
public bool IsSingleBlockSegment()
|
||||
{
|
||||
return Next >= 0;
|
||||
}
|
||||
}
|
||||
|
||||
public class AllocationTableHeader
|
||||
{
|
||||
public long BlockSize { get; }
|
||||
public long AllocationTableOffset { get; }
|
||||
public long AllocationTableBlockCount { get; }
|
||||
public long DataOffset { get; }
|
||||
public long DataBlockCount { get; }
|
||||
public int DirectoryTableBlock { get; }
|
||||
public int FileTableBlock { get; }
|
||||
|
||||
public AllocationTableHeader(IStorage storage)
|
||||
{
|
||||
var reader = new BinaryReader(storage.AsStream());
|
||||
|
||||
BlockSize = reader.ReadInt64();
|
||||
|
||||
AllocationTableOffset = reader.ReadInt64();
|
||||
AllocationTableBlockCount = reader.ReadInt32();
|
||||
reader.BaseStream.Position += 4;
|
||||
|
||||
DataOffset = reader.ReadInt64();
|
||||
DataBlockCount = reader.ReadInt32();
|
||||
reader.BaseStream.Position += 4;
|
||||
|
||||
DirectoryTableBlock = reader.ReadInt32();
|
||||
FileTableBlock = reader.ReadInt32();
|
||||
}
|
||||
}
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
using System;
|
||||
|
||||
namespace LibHac.Save
|
||||
namespace LibHac.IO.Save
|
||||
{
|
||||
public class AllocationTableIterator
|
||||
{
|
||||
@ -13,6 +13,7 @@ namespace LibHac.Save
|
||||
public AllocationTableIterator(AllocationTable table, int initialBlock)
|
||||
{
|
||||
Fat = table;
|
||||
|
||||
if (!BeginIteration(initialBlock))
|
||||
{
|
||||
throw new ArgumentException($"Attempted to start FAT iteration from an invalid block. ({initialBlock}");
|
||||
@ -88,5 +89,24 @@ namespace LibHac.Save
|
||||
PhysicalBlock = newBlock - 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
public bool Seek(int block)
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
if (block < VirtualBlock)
|
||||
{
|
||||
if (!MovePrevious()) return false;
|
||||
}
|
||||
else if (block >= VirtualBlock + CurrentSegmentSize)
|
||||
{
|
||||
if (!MoveNext()) return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
82
LibHac/IO/Save/AllocationTableStorage.cs
Normal file
82
LibHac/IO/Save/AllocationTableStorage.cs
Normal file
@ -0,0 +1,82 @@
|
||||
using System;
|
||||
|
||||
namespace LibHac.IO.Save
|
||||
{
|
||||
public class AllocationTableStorage : Storage
|
||||
{
|
||||
private IStorage BaseStorage { get; }
|
||||
private int BlockSize { get; }
|
||||
private int InitialBlock { get; }
|
||||
private AllocationTable Fat { get; }
|
||||
|
||||
public override long Length { get; }
|
||||
|
||||
public AllocationTableStorage(IStorage data, AllocationTable table, int blockSize, int initialBlock, long length)
|
||||
{
|
||||
BaseStorage = data;
|
||||
BlockSize = blockSize;
|
||||
Length = length;
|
||||
Fat = table;
|
||||
InitialBlock = initialBlock;
|
||||
}
|
||||
|
||||
protected override void ReadImpl(Span<byte> destination, long offset)
|
||||
{
|
||||
var iterator = new AllocationTableIterator(Fat, InitialBlock);
|
||||
|
||||
long inPos = offset;
|
||||
int outPos = 0;
|
||||
int remaining = destination.Length;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
int blockNum = (int)(inPos / BlockSize);
|
||||
iterator.Seek(blockNum);
|
||||
|
||||
int segmentPos = (int)(inPos - (long)iterator.VirtualBlock * BlockSize);
|
||||
long physicalOffset = iterator.PhysicalBlock * BlockSize + segmentPos;
|
||||
|
||||
int remainingInSegment = iterator.CurrentSegmentSize * BlockSize - segmentPos;
|
||||
int bytesToRead = Math.Min(remaining, remainingInSegment);
|
||||
|
||||
BaseStorage.Read(destination.Slice(outPos, bytesToRead), physicalOffset);
|
||||
|
||||
outPos += bytesToRead;
|
||||
inPos += bytesToRead;
|
||||
remaining -= bytesToRead;
|
||||
}
|
||||
}
|
||||
|
||||
protected override void WriteImpl(ReadOnlySpan<byte> source, long offset)
|
||||
{
|
||||
var iterator = new AllocationTableIterator(Fat, InitialBlock);
|
||||
|
||||
long inPos = offset;
|
||||
int outPos = 0;
|
||||
int remaining = source.Length;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
int blockNum = (int)(inPos / BlockSize);
|
||||
iterator.Seek(blockNum);
|
||||
|
||||
int segmentPos = (int)(inPos - (long)iterator.VirtualBlock * BlockSize);
|
||||
long physicalOffset = iterator.PhysicalBlock * BlockSize + segmentPos;
|
||||
|
||||
int remainingInSegment = iterator.CurrentSegmentSize * BlockSize - segmentPos;
|
||||
int bytesToWrite = Math.Min(remaining, remainingInSegment);
|
||||
|
||||
BaseStorage.Write(source.Slice(outPos, bytesToWrite), physicalOffset);
|
||||
|
||||
outPos += bytesToWrite;
|
||||
inPos += bytesToWrite;
|
||||
remaining -= bytesToWrite;
|
||||
}
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
BaseStorage.Flush();
|
||||
}
|
||||
}
|
||||
}
|
@ -2,16 +2,16 @@
|
||||
using System.Collections;
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.Save
|
||||
namespace LibHac.IO.Save
|
||||
{
|
||||
public class DuplexBitmap
|
||||
{
|
||||
private Stream Data { get; }
|
||||
private IStorage Data { get; }
|
||||
public BitArray Bitmap { get; }
|
||||
|
||||
public DuplexBitmap(Stream bitmapStream, int lengthBits)
|
||||
public DuplexBitmap(IStorage bitmapStorage, int lengthBits)
|
||||
{
|
||||
Data = bitmapStream;
|
||||
Data = bitmapStorage;
|
||||
Bitmap = new BitArray(lengthBits);
|
||||
ReadBitmap(lengthBits);
|
||||
}
|
||||
@ -19,7 +19,7 @@ namespace LibHac.Save
|
||||
private void ReadBitmap(int lengthBits)
|
||||
{
|
||||
uint mask = unchecked((uint)(1 << 31));
|
||||
var reader = new BinaryReader(Data);
|
||||
var reader = new BinaryReader(Data.AsStream());
|
||||
int bitsRemaining = lengthBits;
|
||||
int bitmapPos = 0;
|
||||
|
79
LibHac/IO/Save/DuplexStorage.cs
Normal file
79
LibHac/IO/Save/DuplexStorage.cs
Normal file
@ -0,0 +1,79 @@
|
||||
using System;
|
||||
|
||||
namespace LibHac.IO.Save
|
||||
{
|
||||
public class DuplexStorage : Storage
|
||||
{
|
||||
private int BlockSize { get; }
|
||||
private IStorage BitmapStorage { get; }
|
||||
private IStorage DataA { get; }
|
||||
private IStorage DataB { get; }
|
||||
private DuplexBitmap Bitmap { get; }
|
||||
|
||||
public DuplexStorage(IStorage dataA, IStorage dataB, IStorage bitmap, int blockSize)
|
||||
{
|
||||
DataA = dataA;
|
||||
DataB = dataB;
|
||||
BitmapStorage = bitmap;
|
||||
BlockSize = blockSize;
|
||||
|
||||
Bitmap = new DuplexBitmap(BitmapStorage, (int)(bitmap.Length * 8));
|
||||
Length = DataA.Length;
|
||||
}
|
||||
|
||||
protected override void ReadImpl(Span<byte> destination, long offset)
|
||||
{
|
||||
long inPos = offset;
|
||||
int outPos = 0;
|
||||
int remaining = destination.Length;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
int blockNum = (int)(inPos / BlockSize);
|
||||
int blockPos = (int)(inPos % BlockSize);
|
||||
|
||||
int bytesToRead = Math.Min(remaining, BlockSize - blockPos);
|
||||
|
||||
IStorage data = Bitmap.Bitmap[blockNum] ? DataB : DataA;
|
||||
|
||||
data.Read(destination.Slice(outPos, bytesToRead), inPos);
|
||||
|
||||
outPos += bytesToRead;
|
||||
inPos += bytesToRead;
|
||||
remaining -= bytesToRead;
|
||||
}
|
||||
}
|
||||
|
||||
protected override void WriteImpl(ReadOnlySpan<byte> source, long offset)
|
||||
{
|
||||
long inPos = offset;
|
||||
int outPos = 0;
|
||||
int remaining = source.Length;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
int blockNum = (int)(inPos / BlockSize);
|
||||
int blockPos = (int)(inPos % BlockSize);
|
||||
|
||||
int bytesToWrite = Math.Min(remaining, BlockSize - blockPos);
|
||||
|
||||
IStorage data = Bitmap.Bitmap[blockNum] ? DataB : DataA;
|
||||
|
||||
data.Write(source.Slice(outPos, bytesToWrite), inPos);
|
||||
|
||||
outPos += bytesToWrite;
|
||||
inPos += bytesToWrite;
|
||||
remaining -= bytesToWrite;
|
||||
}
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
BitmapStorage?.Flush();
|
||||
DataA?.Flush();
|
||||
DataB?.Flush();
|
||||
}
|
||||
|
||||
public override long Length { get; }
|
||||
}
|
||||
}
|
@ -3,7 +3,7 @@ using System.Diagnostics;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
|
||||
namespace LibHac.Save
|
||||
namespace LibHac.IO.Save
|
||||
{
|
||||
[DebuggerDisplay("{" + nameof(FullPath) + "}")]
|
||||
public abstract class FsEntry
|
292
LibHac/IO/Save/Header.cs
Normal file
292
LibHac/IO/Save/Header.cs
Normal file
@ -0,0 +1,292 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.IO.Save
|
||||
{
|
||||
public class Header
|
||||
{
|
||||
public IStorage MainStorage { get; }
|
||||
public IStorage MainHeader { get; }
|
||||
public IStorage DuplexHeader { get; }
|
||||
public IStorage DataIvfcHeader { get; }
|
||||
public IStorage JournalHeader { get; }
|
||||
public IStorage SaveHeader { get; }
|
||||
public IStorage MainRemapHeader { get; }
|
||||
public IStorage MetaDataRemapHeader { get; }
|
||||
public IStorage ExtraDataStorage { get; }
|
||||
public IStorage FatIvfcHeader { get; }
|
||||
public IStorage DuplexMasterBitmapA { get; }
|
||||
public IStorage DuplexMasterBitmapB { get; }
|
||||
public IStorage DataIvfcMaster { get; }
|
||||
public IStorage FatIvfcMaster { get; }
|
||||
|
||||
public byte[] Cmac { get; set; }
|
||||
public FsLayout Layout { get; set; }
|
||||
public DuplexHeader Duplex { get; set; }
|
||||
public IvfcHeader Ivfc { get; set; }
|
||||
public IvfcHeader FatIvfc { get; set; }
|
||||
|
||||
public ExtraData ExtraData { get; set; }
|
||||
|
||||
public IStorage MasterHash { get; }
|
||||
|
||||
public Validity SignatureValidity { get; }
|
||||
public Validity HeaderHashValidity { get; }
|
||||
|
||||
public byte[] Data { get; }
|
||||
|
||||
public Header(Keyset keyset, IStorage storage)
|
||||
{
|
||||
MainStorage = storage;
|
||||
MainHeader = MainStorage.Slice(0x100, 0x200);
|
||||
DuplexHeader = MainStorage.Slice(0x300, 0x44);
|
||||
DataIvfcHeader = MainStorage.Slice(0x344, 0xC0);
|
||||
JournalHeader = MainStorage.Slice(0x408, 0x200);
|
||||
SaveHeader = MainStorage.Slice(0x608, 0x48);
|
||||
MainRemapHeader = MainStorage.Slice(0x650, 0x40);
|
||||
MetaDataRemapHeader = MainStorage.Slice(0x690, 0x40);
|
||||
ExtraDataStorage = MainStorage.Slice(0x6D8, 0x400);
|
||||
FatIvfcHeader = MainStorage.Slice(0xAD8, 0xC0);
|
||||
|
||||
Layout = new FsLayout(MainHeader);
|
||||
|
||||
DuplexMasterBitmapA = MainStorage.Slice(Layout.DuplexMasterOffsetA, Layout.DuplexMasterSize);
|
||||
DuplexMasterBitmapB = MainStorage.Slice(Layout.DuplexMasterOffsetB, Layout.DuplexMasterSize);
|
||||
DataIvfcMaster = MainStorage.Slice(Layout.IvfcMasterHashOffsetA, Layout.IvfcMasterHashSize);
|
||||
FatIvfcMaster = MainStorage.Slice(Layout.FatIvfcMasterHashA, Layout.IvfcMasterHashSize);
|
||||
|
||||
var reader = new BinaryReader(storage.AsStream());
|
||||
|
||||
reader.BaseStream.Position = 0;
|
||||
Data = reader.ReadBytes(0x4000);
|
||||
reader.BaseStream.Position = 0;
|
||||
|
||||
Cmac = reader.ReadBytes(0x10);
|
||||
|
||||
reader.BaseStream.Position = 0x100;
|
||||
|
||||
reader.BaseStream.Position = 0x300;
|
||||
Duplex = new DuplexHeader(reader);
|
||||
|
||||
reader.BaseStream.Position = 0x6D8;
|
||||
ExtraData = new ExtraData(reader);
|
||||
|
||||
Ivfc = new IvfcHeader(DataIvfcHeader) { NumLevels = 5 };
|
||||
|
||||
if (Layout.Version >= 0x50000)
|
||||
{
|
||||
FatIvfc = new IvfcHeader(FatIvfcHeader) { NumLevels = 4 };
|
||||
}
|
||||
|
||||
MasterHash = storage.Slice(Layout.IvfcMasterHashOffsetA, Layout.IvfcMasterHashSize);
|
||||
|
||||
HeaderHashValidity = Crypto.CheckMemoryHashTable(Data, Layout.Hash, 0x300, 0x3d00);
|
||||
SignatureValidity = ValidateSignature(keyset);
|
||||
}
|
||||
|
||||
private Validity ValidateSignature(Keyset keyset)
|
||||
{
|
||||
var calculatedCmac = new byte[0x10];
|
||||
|
||||
Crypto.CalculateAesCmac(keyset.SaveMacKey, Data, 0x100, calculatedCmac, 0, 0x200);
|
||||
|
||||
return Util.ArraysEqual(calculatedCmac, Cmac) ? Validity.Valid : Validity.Invalid;
|
||||
}
|
||||
}
|
||||
|
||||
public class FsLayout
|
||||
{
|
||||
public string Magic { get; set; }
|
||||
public uint Version { get; set; }
|
||||
public byte[] Hash { get; set; }
|
||||
public long FileMapEntryOffset { get; set; }
|
||||
public long FileMapEntrySize { get; set; }
|
||||
public long MetaMapEntryOffset { get; set; }
|
||||
public long MetaMapEntrySize { get; set; }
|
||||
public long FileMapDataOffset { get; set; }
|
||||
public long FileMapDataSize { get; set; }
|
||||
public long DuplexL1OffsetA { get; set; }
|
||||
public long DuplexL1OffsetB { get; set; }
|
||||
public long DuplexL1Size { get; set; }
|
||||
public long DuplexDataOffsetA { get; set; }
|
||||
public long DuplexDataOffsetB { get; set; }
|
||||
public long DuplexDataSize { get; set; }
|
||||
public long JournalDataOffset { get; set; }
|
||||
public long JournalDataSizeA { get; set; }
|
||||
public long JournalDataSizeB { get; set; }
|
||||
public long JournalSize { get; set; }
|
||||
public long DuplexMasterOffsetA { get; set; }
|
||||
public long DuplexMasterOffsetB { get; set; }
|
||||
public long DuplexMasterSize { get; set; }
|
||||
public long IvfcMasterHashOffsetA { get; set; }
|
||||
public long IvfcMasterHashOffsetB { get; set; }
|
||||
public long IvfcMasterHashSize { get; set; }
|
||||
public long JournalMapTableOffset { get; set; }
|
||||
public long JournalMapTableSize { get; set; }
|
||||
public long JournalPhysicalBitmapOffset { get; set; }
|
||||
public long JournalPhysicalBitmapSize { get; set; }
|
||||
public long JournalVirtualBitmapOffset { get; set; }
|
||||
public long JournalVirtualBitmapSize { get; set; }
|
||||
public long JournalFreeBitmapOffset { get; set; }
|
||||
public long JournalFreeBitmapSize { get; set; }
|
||||
public long IvfcL1Offset { get; set; }
|
||||
public long IvfcL1Size { get; set; }
|
||||
public long IvfcL2Offset { get; set; }
|
||||
public long IvfcL2Size { get; set; }
|
||||
public long IvfcL3Offset { get; set; }
|
||||
public long IvfcL3Size { get; set; }
|
||||
public long FatOffset { get; set; }
|
||||
public long FatSize { get; set; }
|
||||
public long DuplexIndex { get; set; }
|
||||
public long FatIvfcMasterHashA { get; set; }
|
||||
public long FatIvfcMasterHashB { get; set; }
|
||||
public long FatIvfcL1Offset { get; set; }
|
||||
public long FatIvfcL1Size { get; set; }
|
||||
public long FatIvfcL2Offset { get; set; }
|
||||
public long FatIvfcL2Size { get; set; }
|
||||
|
||||
public FsLayout(IStorage storage)
|
||||
{
|
||||
var reader = new BinaryReader(storage.AsStream());
|
||||
|
||||
Magic = reader.ReadAscii(4);
|
||||
Version = reader.ReadUInt32();
|
||||
Hash = reader.ReadBytes(0x20);
|
||||
FileMapEntryOffset = reader.ReadInt64();
|
||||
FileMapEntrySize = reader.ReadInt64();
|
||||
MetaMapEntryOffset = reader.ReadInt64();
|
||||
MetaMapEntrySize = reader.ReadInt64();
|
||||
FileMapDataOffset = reader.ReadInt64();
|
||||
FileMapDataSize = reader.ReadInt64();
|
||||
DuplexL1OffsetA = reader.ReadInt64();
|
||||
DuplexL1OffsetB = reader.ReadInt64();
|
||||
DuplexL1Size = reader.ReadInt64();
|
||||
DuplexDataOffsetA = reader.ReadInt64();
|
||||
DuplexDataOffsetB = reader.ReadInt64();
|
||||
DuplexDataSize = reader.ReadInt64();
|
||||
JournalDataOffset = reader.ReadInt64();
|
||||
JournalDataSizeA = reader.ReadInt64();
|
||||
JournalDataSizeB = reader.ReadInt64();
|
||||
JournalSize = reader.ReadInt64();
|
||||
DuplexMasterOffsetA = reader.ReadInt64();
|
||||
DuplexMasterOffsetB = reader.ReadInt64();
|
||||
DuplexMasterSize = reader.ReadInt64();
|
||||
IvfcMasterHashOffsetA = reader.ReadInt64();
|
||||
IvfcMasterHashOffsetB = reader.ReadInt64();
|
||||
IvfcMasterHashSize = reader.ReadInt64();
|
||||
JournalMapTableOffset = reader.ReadInt64();
|
||||
JournalMapTableSize = reader.ReadInt64();
|
||||
JournalPhysicalBitmapOffset = reader.ReadInt64();
|
||||
JournalPhysicalBitmapSize = reader.ReadInt64();
|
||||
JournalVirtualBitmapOffset = reader.ReadInt64();
|
||||
JournalVirtualBitmapSize = reader.ReadInt64();
|
||||
JournalFreeBitmapOffset = reader.ReadInt64();
|
||||
JournalFreeBitmapSize = reader.ReadInt64();
|
||||
IvfcL1Offset = reader.ReadInt64();
|
||||
IvfcL1Size = reader.ReadInt64();
|
||||
IvfcL2Offset = reader.ReadInt64();
|
||||
IvfcL2Size = reader.ReadInt64();
|
||||
IvfcL3Offset = reader.ReadInt64();
|
||||
IvfcL3Size = reader.ReadInt64();
|
||||
FatOffset = reader.ReadInt64();
|
||||
FatSize = reader.ReadInt64();
|
||||
DuplexIndex = reader.ReadByte();
|
||||
|
||||
reader.BaseStream.Position += 7;
|
||||
FatIvfcMasterHashA = reader.ReadInt64();
|
||||
FatIvfcMasterHashB = reader.ReadInt64();
|
||||
FatIvfcL1Offset = reader.ReadInt64();
|
||||
FatIvfcL1Size = reader.ReadInt64();
|
||||
FatIvfcL2Offset = reader.ReadInt64();
|
||||
FatIvfcL2Size = reader.ReadInt64();
|
||||
}
|
||||
}
|
||||
|
||||
public class DuplexHeader
|
||||
{
|
||||
public string Magic { get; }
|
||||
public uint Version { get; }
|
||||
public DuplexInfo[] Layers { get; } = new DuplexInfo[3];
|
||||
|
||||
public DuplexHeader(BinaryReader reader)
|
||||
{
|
||||
Magic = reader.ReadAscii(4);
|
||||
Version = reader.ReadUInt32();
|
||||
|
||||
for (int i = 0; i < Layers.Length; i++)
|
||||
{
|
||||
Layers[i] = new DuplexInfo(reader);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public class DuplexInfo
|
||||
{
|
||||
public long Offset { get; }
|
||||
public long Length { get; set; }
|
||||
public int BlockSizePower { get; set; }
|
||||
public int BlockSize { get; set; }
|
||||
|
||||
public DuplexInfo() { }
|
||||
|
||||
public DuplexInfo(BinaryReader reader)
|
||||
{
|
||||
Offset = reader.ReadInt64();
|
||||
Length = reader.ReadInt64();
|
||||
BlockSizePower = reader.ReadInt32();
|
||||
BlockSize = 1 << BlockSizePower;
|
||||
}
|
||||
}
|
||||
|
||||
public class ExtraData
|
||||
{
|
||||
public ulong TitleId { get; }
|
||||
public Guid UserId { get; }
|
||||
public ulong SaveId { get; }
|
||||
public SaveDataType Type { get; }
|
||||
|
||||
public ulong SaveOwnerId { get; }
|
||||
public long Timestamp { get; }
|
||||
public long Field50 { get; }
|
||||
public uint Field54 { get; }
|
||||
public long DataSize { get; }
|
||||
public long JournalSize { get; }
|
||||
|
||||
public ExtraData(BinaryReader reader)
|
||||
{
|
||||
TitleId = reader.ReadUInt64();
|
||||
UserId = ToGuid(reader.ReadBytes(0x10));
|
||||
SaveId = reader.ReadUInt64();
|
||||
Type = (SaveDataType)reader.ReadByte();
|
||||
reader.BaseStream.Position += 0x1f;
|
||||
|
||||
SaveOwnerId = reader.ReadUInt64();
|
||||
Timestamp = reader.ReadInt64();
|
||||
Field50 = reader.ReadUInt32();
|
||||
Field54 = reader.ReadUInt32();
|
||||
DataSize = reader.ReadInt64();
|
||||
JournalSize = reader.ReadInt64();
|
||||
}
|
||||
|
||||
private static Guid ToGuid(byte[] bytes)
|
||||
{
|
||||
var b = new byte[0x10];
|
||||
Array.Copy(bytes, b, 0x10);
|
||||
|
||||
// The Guid constructor uses a weird, mixed-endian format
|
||||
Array.Reverse(b, 10, 6);
|
||||
|
||||
return new Guid(b);
|
||||
}
|
||||
}
|
||||
|
||||
public enum SaveDataType
|
||||
{
|
||||
SystemSaveData,
|
||||
SaveData,
|
||||
BcatDeliveryCacheStorage,
|
||||
DeviceSaveData,
|
||||
TemporaryStorage,
|
||||
CacheStorage
|
||||
}
|
||||
}
|
58
LibHac/IO/Save/HierarchicalDuplexStorage.cs
Normal file
58
LibHac/IO/Save/HierarchicalDuplexStorage.cs
Normal file
@ -0,0 +1,58 @@
|
||||
using System;
|
||||
|
||||
namespace LibHac.IO.Save
|
||||
{
|
||||
public class HierarchicalDuplexStorage : Storage
|
||||
{
|
||||
private DuplexStorage[] Layers { get; }
|
||||
private DuplexStorage DataLayer { get; }
|
||||
|
||||
public HierarchicalDuplexStorage(DuplexFsLayerInfo[] layers, bool masterBit)
|
||||
{
|
||||
Layers = new DuplexStorage[layers.Length - 1];
|
||||
|
||||
for (int i = 0; i < Layers.Length; i++)
|
||||
{
|
||||
IStorage bitmap;
|
||||
|
||||
if (i == 0)
|
||||
{
|
||||
bitmap = masterBit ? layers[0].DataB : layers[0].DataA;
|
||||
}
|
||||
else
|
||||
{
|
||||
bitmap = Layers[i - 1];
|
||||
}
|
||||
|
||||
Layers[i] = new DuplexStorage(layers[i + 1].DataA, layers[i + 1].DataB, bitmap, layers[i + 1].Info.BlockSize);
|
||||
}
|
||||
|
||||
DataLayer = Layers[Layers.Length - 1];
|
||||
Length = DataLayer.Length;
|
||||
}
|
||||
|
||||
protected override void ReadImpl(Span<byte> destination, long offset)
|
||||
{
|
||||
DataLayer.Read(destination, offset);
|
||||
}
|
||||
|
||||
protected override void WriteImpl(ReadOnlySpan<byte> source, long offset)
|
||||
{
|
||||
DataLayer.Write(source, offset);
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
DataLayer.Flush();
|
||||
}
|
||||
|
||||
public override long Length { get; }
|
||||
}
|
||||
|
||||
public class DuplexFsLayerInfo
|
||||
{
|
||||
public IStorage DataA { get; set; }
|
||||
public IStorage DataB { get; set; }
|
||||
public DuplexInfo Info { get; set; }
|
||||
}
|
||||
}
|
85
LibHac/IO/Save/JournalMap.cs
Normal file
85
LibHac/IO/Save/JournalMap.cs
Normal file
@ -0,0 +1,85 @@
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.IO.Save
|
||||
{
|
||||
public class JournalMap
|
||||
{
|
||||
public JournalMapHeader Header { get; }
|
||||
private JournalMapEntry[] Entries { get; }
|
||||
|
||||
private IStorage HeaderStorage { get; }
|
||||
private IStorage MapStorage { get; }
|
||||
private IStorage ModifiedPhysicalBlocks { get; }
|
||||
private IStorage ModifiedVirtualBlocks { get; }
|
||||
private IStorage FreeBlocks { get; }
|
||||
|
||||
public JournalMap(IStorage header, JournalMapParams mapInfo)
|
||||
{
|
||||
HeaderStorage = header;
|
||||
MapStorage = mapInfo.MapStorage;
|
||||
ModifiedPhysicalBlocks = mapInfo.PhysicalBlockBitmap;
|
||||
ModifiedVirtualBlocks = mapInfo.VirtualBlockBitmap;
|
||||
FreeBlocks = mapInfo.FreeBlockBitmap;
|
||||
|
||||
Header = new JournalMapHeader(HeaderStorage);
|
||||
Entries = ReadMapEntries(MapStorage, Header.MainDataBlockCount);
|
||||
}
|
||||
|
||||
public int GetPhysicalBlock(int virtualBlock)
|
||||
{
|
||||
return Entries[virtualBlock].PhysicalIndex;
|
||||
}
|
||||
|
||||
private static JournalMapEntry[] ReadMapEntries(IStorage mapTable, int count)
|
||||
{
|
||||
var tableReader = new BinaryReader(mapTable.AsStream());
|
||||
var map = new JournalMapEntry[count];
|
||||
|
||||
for (int i = 0; i < count; i++)
|
||||
{
|
||||
var entry = new JournalMapEntry
|
||||
{
|
||||
VirtualIndex = i,
|
||||
PhysicalIndex = tableReader.ReadInt32() & 0x7FFFFFFF
|
||||
};
|
||||
|
||||
map[i] = entry;
|
||||
tableReader.BaseStream.Position += 4;
|
||||
}
|
||||
|
||||
return map;
|
||||
}
|
||||
|
||||
public IStorage GetMapStorage() => MapStorage.WithAccess(FileAccess.Read);
|
||||
public IStorage GetHeaderStorage() => HeaderStorage.WithAccess(FileAccess.Read);
|
||||
public IStorage GetModifiedPhysicalBlocksStorage() => ModifiedPhysicalBlocks.WithAccess(FileAccess.Read);
|
||||
public IStorage GetModifiedVirtualBlocksStorage() => ModifiedVirtualBlocks.WithAccess(FileAccess.Read);
|
||||
public IStorage GetFreeBlocksStorage() => FreeBlocks.WithAccess(FileAccess.Read);
|
||||
}
|
||||
|
||||
public class JournalMapHeader
|
||||
{
|
||||
public int Version { get; }
|
||||
public int MainDataBlockCount { get; }
|
||||
public int JournalBlockCount { get; }
|
||||
public int FieldC { get; }
|
||||
|
||||
public JournalMapHeader(IStorage storage)
|
||||
{
|
||||
var reader = new BinaryReader(storage.AsStream());
|
||||
|
||||
Version = reader.ReadInt32();
|
||||
MainDataBlockCount = reader.ReadInt32();
|
||||
JournalBlockCount = reader.ReadInt32();
|
||||
FieldC = reader.ReadInt32();
|
||||
}
|
||||
}
|
||||
|
||||
public class JournalMapParams
|
||||
{
|
||||
public IStorage MapStorage { get; set; }
|
||||
public IStorage PhysicalBlockBitmap { get; set; }
|
||||
public IStorage VirtualBlockBitmap { get; set; }
|
||||
public IStorage FreeBlockBitmap { get; set; }
|
||||
}
|
||||
}
|
112
LibHac/IO/Save/JournalStorage.cs
Normal file
112
LibHac/IO/Save/JournalStorage.cs
Normal file
@ -0,0 +1,112 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.IO.Save
|
||||
{
|
||||
public class JournalStorage : Storage
|
||||
{
|
||||
private IStorage BaseStorage { get; }
|
||||
private IStorage HeaderStorage { get; }
|
||||
public JournalMap Map { get; }
|
||||
|
||||
public JournalHeader Header { get; }
|
||||
|
||||
public int BlockSize { get; }
|
||||
public override long Length { get; }
|
||||
|
||||
public JournalStorage(IStorage baseStorage, IStorage header, JournalMapParams mapInfo, bool leaveOpen)
|
||||
{
|
||||
BaseStorage = baseStorage;
|
||||
HeaderStorage = header;
|
||||
Header = new JournalHeader(HeaderStorage);
|
||||
|
||||
IStorage mapHeader = header.Slice(0x20, 0x10);
|
||||
Map = new JournalMap(mapHeader, mapInfo);
|
||||
|
||||
BlockSize = (int)Header.BlockSize;
|
||||
Length = Header.TotalSize - Header.JournalSize;
|
||||
|
||||
if (!leaveOpen) ToDispose.Add(baseStorage);
|
||||
}
|
||||
|
||||
protected override void ReadImpl(Span<byte> destination, long offset)
|
||||
{
|
||||
long inPos = offset;
|
||||
int outPos = 0;
|
||||
int remaining = destination.Length;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
int blockNum = (int)(inPos / BlockSize);
|
||||
int blockPos = (int)(inPos % BlockSize);
|
||||
|
||||
long physicalOffset = Map.GetPhysicalBlock(blockNum) * BlockSize + blockPos;
|
||||
|
||||
int bytesToRead = Math.Min(remaining, BlockSize - blockPos);
|
||||
|
||||
BaseStorage.Read(destination.Slice(outPos, bytesToRead), physicalOffset);
|
||||
|
||||
outPos += bytesToRead;
|
||||
inPos += bytesToRead;
|
||||
remaining -= bytesToRead;
|
||||
}
|
||||
}
|
||||
|
||||
protected override void WriteImpl(ReadOnlySpan<byte> source, long offset)
|
||||
{
|
||||
long inPos = offset;
|
||||
int outPos = 0;
|
||||
int remaining = source.Length;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
int blockNum = (int)(inPos / BlockSize);
|
||||
int blockPos = (int)(inPos % BlockSize);
|
||||
|
||||
long physicalOffset = Map.GetPhysicalBlock(blockNum) * BlockSize + blockPos;
|
||||
|
||||
int bytesToWrite = Math.Min(remaining, BlockSize - blockPos);
|
||||
|
||||
BaseStorage.Write(source.Slice(outPos, bytesToWrite), physicalOffset);
|
||||
|
||||
outPos += bytesToWrite;
|
||||
inPos += bytesToWrite;
|
||||
remaining -= bytesToWrite;
|
||||
}
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
BaseStorage.Flush();
|
||||
}
|
||||
|
||||
public IStorage GetBaseStorage() => BaseStorage.WithAccess(FileAccess.Read);
|
||||
public IStorage GetHeaderStorage() => HeaderStorage.WithAccess(FileAccess.Read);
|
||||
}
|
||||
|
||||
public class JournalHeader
|
||||
{
|
||||
public string Magic { get; }
|
||||
public uint Version { get; }
|
||||
public long TotalSize { get; }
|
||||
public long JournalSize { get; }
|
||||
public long BlockSize { get; }
|
||||
|
||||
public JournalHeader(IStorage storage)
|
||||
{
|
||||
var reader = new BinaryReader(storage.AsStream());
|
||||
|
||||
Magic = reader.ReadAscii(4);
|
||||
Version = reader.ReadUInt32();
|
||||
TotalSize = reader.ReadInt64();
|
||||
JournalSize = reader.ReadInt64();
|
||||
BlockSize = reader.ReadInt64();
|
||||
}
|
||||
}
|
||||
|
||||
public class JournalMapEntry
|
||||
{
|
||||
public int PhysicalIndex { get; set; }
|
||||
public int VirtualIndex { get; set; }
|
||||
}
|
||||
}
|
232
LibHac/IO/Save/RemapStorage.cs
Normal file
232
LibHac/IO/Save/RemapStorage.cs
Normal file
@ -0,0 +1,232 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.IO.Save
|
||||
{
|
||||
public class RemapStorage : Storage
|
||||
{
|
||||
private IStorage BaseStorage { get; }
|
||||
private IStorage HeaderStorage { get; }
|
||||
private IStorage MapEntryStorage { get; }
|
||||
|
||||
private RemapHeader Header { get; }
|
||||
public MapEntry[] MapEntries { get; set; }
|
||||
public RemapSegment[] Segments { get; set; }
|
||||
|
||||
public override long Length { get; } = -1;
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new <see cref="RemapStorage"/>
|
||||
/// </summary>
|
||||
/// <param name="storage">A <see cref="IStorage"/> of the main data of the RemapStream.
|
||||
/// The <see cref="RemapStorage"/> object assumes complete ownership of the Storage.</param>
|
||||
/// <param name="header">The header for this RemapStorage.</param>
|
||||
/// <param name="mapEntries">The remapping entries for this RemapStorage.</param>
|
||||
/// <param name="leaveOpen"><see langword="true"/> to leave the storage open after the <see cref="RemapStorage"/> object is disposed; otherwise, <see langword="false"/>.</param>
|
||||
public RemapStorage(IStorage storage, IStorage header, IStorage mapEntries, bool leaveOpen)
|
||||
{
|
||||
BaseStorage = storage;
|
||||
HeaderStorage = header;
|
||||
MapEntryStorage = mapEntries;
|
||||
|
||||
Header = new RemapHeader(HeaderStorage);
|
||||
|
||||
MapEntries = new MapEntry[Header.MapEntryCount];
|
||||
var reader = new BinaryReader(MapEntryStorage.AsStream());
|
||||
|
||||
for (int i = 0; i < Header.MapEntryCount; i++)
|
||||
{
|
||||
MapEntries[i] = new MapEntry(reader);
|
||||
}
|
||||
|
||||
if (!leaveOpen) ToDispose.Add(BaseStorage);
|
||||
|
||||
Segments = InitSegments(Header, MapEntries);
|
||||
}
|
||||
|
||||
protected override void ReadImpl(Span<byte> destination, long offset)
|
||||
{
|
||||
MapEntry entry = GetMapEntry(offset);
|
||||
|
||||
long inPos = offset;
|
||||
int outPos = 0;
|
||||
int remaining = destination.Length;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
long entryPos = inPos - entry.VirtualOffset;
|
||||
|
||||
int bytesToRead = (int)Math.Min(entry.VirtualOffsetEnd - inPos, remaining);
|
||||
BaseStorage.Read(destination.Slice(outPos, bytesToRead), entry.PhysicalOffset + entryPos);
|
||||
|
||||
outPos += bytesToRead;
|
||||
inPos += bytesToRead;
|
||||
remaining -= bytesToRead;
|
||||
|
||||
if (inPos >= entry.VirtualOffsetEnd)
|
||||
{
|
||||
entry = entry.Next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected override void WriteImpl(ReadOnlySpan<byte> source, long offset)
|
||||
{
|
||||
MapEntry entry = GetMapEntry(offset);
|
||||
|
||||
long inPos = offset;
|
||||
int outPos = 0;
|
||||
int remaining = source.Length;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
long entryPos = inPos - entry.VirtualOffset;
|
||||
|
||||
int bytesToWrite = (int)Math.Min(entry.VirtualOffsetEnd - inPos, remaining);
|
||||
BaseStorage.Write(source.Slice(outPos, bytesToWrite), entry.PhysicalOffset + entryPos);
|
||||
|
||||
outPos += bytesToWrite;
|
||||
inPos += bytesToWrite;
|
||||
remaining -= bytesToWrite;
|
||||
|
||||
if (inPos >= entry.VirtualOffsetEnd)
|
||||
{
|
||||
entry = entry.Next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
BaseStorage.Flush();
|
||||
}
|
||||
|
||||
public IStorage GetBaseStorage() => BaseStorage.WithAccess(FileAccess.Read);
|
||||
public IStorage GetHeaderStorage() => HeaderStorage.WithAccess(FileAccess.Read);
|
||||
public IStorage GetMapEntryStorage() => MapEntryStorage.WithAccess(FileAccess.Read);
|
||||
|
||||
private static RemapSegment[] InitSegments(RemapHeader header, MapEntry[] mapEntries)
|
||||
{
|
||||
var segments = new RemapSegment[header.MapSegmentCount];
|
||||
int entryIdx = 0;
|
||||
|
||||
for (int i = 0; i < header.MapSegmentCount; i++)
|
||||
{
|
||||
var seg = new RemapSegment();
|
||||
seg.Entries.Add(mapEntries[entryIdx]);
|
||||
seg.Offset = mapEntries[entryIdx].VirtualOffset;
|
||||
mapEntries[entryIdx].Segment = seg;
|
||||
entryIdx++;
|
||||
|
||||
while (entryIdx < mapEntries.Length &&
|
||||
mapEntries[entryIdx - 1].VirtualOffsetEnd == mapEntries[entryIdx].VirtualOffset)
|
||||
{
|
||||
mapEntries[entryIdx].Segment = seg;
|
||||
mapEntries[entryIdx - 1].Next = mapEntries[entryIdx];
|
||||
seg.Entries.Add(mapEntries[entryIdx]);
|
||||
entryIdx++;
|
||||
}
|
||||
|
||||
seg.Length = seg.Entries[seg.Entries.Count - 1].VirtualOffsetEnd - seg.Entries[0].VirtualOffset;
|
||||
segments[i] = seg;
|
||||
}
|
||||
|
||||
return segments;
|
||||
}
|
||||
|
||||
private MapEntry GetMapEntry(long offset)
|
||||
{
|
||||
int segmentIdx = GetSegmentFromVirtualOffset(offset);
|
||||
|
||||
if (segmentIdx < Segments.Length)
|
||||
{
|
||||
RemapSegment segment = Segments[segmentIdx];
|
||||
|
||||
foreach (MapEntry entry in segment.Entries)
|
||||
{
|
||||
if (entry.VirtualOffsetEnd > offset) return entry;
|
||||
}
|
||||
}
|
||||
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
}
|
||||
|
||||
public int GetSegmentFromVirtualOffset(long virtualOffset)
|
||||
{
|
||||
return (int)((ulong)virtualOffset >> (64 - Header.SegmentBits));
|
||||
}
|
||||
|
||||
public long GetOffsetFromVirtualOffset(long virtualOffset)
|
||||
{
|
||||
return virtualOffset & GetOffsetMask();
|
||||
}
|
||||
|
||||
public long ToVirtualOffset(int segment, long offset)
|
||||
{
|
||||
long seg = (segment << (64 - Header.SegmentBits)) & GetSegmentMask();
|
||||
long off = offset & GetOffsetMask();
|
||||
return seg | off;
|
||||
}
|
||||
|
||||
private long GetOffsetMask()
|
||||
{
|
||||
return (1 << (64 - Header.SegmentBits)) - 1;
|
||||
}
|
||||
|
||||
private long GetSegmentMask()
|
||||
{
|
||||
return ~GetOffsetMask();
|
||||
}
|
||||
}
|
||||
|
||||
public class RemapHeader
|
||||
{
|
||||
public string Magic { get; }
|
||||
public uint Verison { get; }
|
||||
public int MapEntryCount { get; }
|
||||
public int MapSegmentCount { get; }
|
||||
public int SegmentBits { get; }
|
||||
|
||||
public RemapHeader(IStorage storage)
|
||||
{
|
||||
var reader = new BinaryReader(storage.AsStream());
|
||||
|
||||
Magic = reader.ReadAscii(4);
|
||||
Verison = reader.ReadUInt32();
|
||||
MapEntryCount = reader.ReadInt32();
|
||||
MapSegmentCount = reader.ReadInt32();
|
||||
SegmentBits = reader.ReadInt32();
|
||||
}
|
||||
}
|
||||
|
||||
public class MapEntry
|
||||
{
|
||||
public long VirtualOffset { get; }
|
||||
public long PhysicalOffset { get; }
|
||||
public long Size { get; }
|
||||
public int Alignment { get; }
|
||||
public int Field1C { get; }
|
||||
|
||||
public long VirtualOffsetEnd => VirtualOffset + Size;
|
||||
public long PhysicalOffsetEnd => PhysicalOffset + Size;
|
||||
internal RemapSegment Segment { get; set; }
|
||||
internal MapEntry Next { get; set; }
|
||||
|
||||
public MapEntry(BinaryReader reader)
|
||||
{
|
||||
VirtualOffset = reader.ReadInt64();
|
||||
PhysicalOffset = reader.ReadInt64();
|
||||
Size = reader.ReadInt64();
|
||||
Alignment = reader.ReadInt32();
|
||||
Field1C = reader.ReadInt32();
|
||||
}
|
||||
}
|
||||
|
||||
public class RemapSegment
|
||||
{
|
||||
public List<MapEntry> Entries { get; } = new List<MapEntry>();
|
||||
public long Offset { get; internal set; }
|
||||
public long Length { get; internal set; }
|
||||
}
|
||||
}
|
@ -1,13 +1,14 @@
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using LibHac.Streams;
|
||||
|
||||
namespace LibHac.Save
|
||||
namespace LibHac.IO.Save
|
||||
{
|
||||
public class SaveFs
|
||||
{
|
||||
private SharedStreamSource StreamSource { get; }
|
||||
private AllocationTable AllocationTable { get; }
|
||||
private IStorage BaseStorage { get; }
|
||||
private IStorage HeaderStorage { get; }
|
||||
|
||||
public AllocationTable AllocationTable { get; }
|
||||
private SaveHeader Header { get; }
|
||||
|
||||
public DirectoryEntry RootDirectory { get; private set; }
|
||||
@ -15,11 +16,13 @@ namespace LibHac.Save
|
||||
public DirectoryEntry[] Directories { get; private set; }
|
||||
public Dictionary<string, FileEntry> FileDictionary { get; }
|
||||
|
||||
public SaveFs(Stream storage, Stream allocationTable, SaveHeader header)
|
||||
public SaveFs(IStorage storage, IStorage allocationTable, IStorage header)
|
||||
{
|
||||
StreamSource = new SharedStreamSource(storage);
|
||||
AllocationTable = new AllocationTable(allocationTable);
|
||||
Header = header;
|
||||
HeaderStorage = header;
|
||||
BaseStorage = storage;
|
||||
AllocationTable = new AllocationTable(allocationTable, header.Slice(0x18, 0x30));
|
||||
|
||||
Header = new SaveHeader(HeaderStorage);
|
||||
|
||||
ReadFileInfo();
|
||||
var dictionary = new Dictionary<string, FileEntry>();
|
||||
@ -31,7 +34,7 @@ namespace LibHac.Save
|
||||
FileDictionary = dictionary;
|
||||
}
|
||||
|
||||
public Stream OpenFile(string filename)
|
||||
public IStorage OpenFile(string filename)
|
||||
{
|
||||
if (!FileDictionary.TryGetValue(filename, out FileEntry file))
|
||||
{
|
||||
@ -41,11 +44,12 @@ namespace LibHac.Save
|
||||
return OpenFile(file);
|
||||
}
|
||||
|
||||
public Stream OpenFile(FileEntry file)
|
||||
public IStorage OpenFile(FileEntry file)
|
||||
{
|
||||
if (file.BlockIndex < 0)
|
||||
{
|
||||
return Stream.Null;
|
||||
// todo
|
||||
return new MemoryStorage(new byte[0]);
|
||||
}
|
||||
|
||||
return OpenFatBlock(file.BlockIndex, file.FileSize);
|
||||
@ -53,13 +57,14 @@ namespace LibHac.Save
|
||||
|
||||
public bool FileExists(string filename) => FileDictionary.ContainsKey(filename);
|
||||
|
||||
public Stream OpenRawSaveFs() => StreamSource.CreateStream();
|
||||
public IStorage GetBaseStorage() => BaseStorage.WithAccess(FileAccess.Read);
|
||||
public IStorage GetHeaderStorage() => HeaderStorage.WithAccess(FileAccess.Read);
|
||||
|
||||
private void ReadFileInfo()
|
||||
{
|
||||
// todo: Query the FAT for the file size when none is given
|
||||
AllocationTableStream dirTableStream = OpenFatBlock(Header.DirectoryTableBlock, 1000000);
|
||||
AllocationTableStream fileTableStream = OpenFatBlock(Header.FileTableBlock, 1000000);
|
||||
AllocationTableStorage dirTableStream = OpenFatBlock(AllocationTable.Header.DirectoryTableBlock, 1000000);
|
||||
AllocationTableStorage fileTableStream = OpenFatBlock(AllocationTable.Header.FileTableBlock, 1000000);
|
||||
|
||||
DirectoryEntry[] dirEntries = ReadDirEntries(dirTableStream);
|
||||
FileEntry[] fileEntries = ReadFileEntries(fileTableStream);
|
||||
@ -107,9 +112,9 @@ namespace LibHac.Save
|
||||
FsEntry.ResolveFilenames(Directories);
|
||||
}
|
||||
|
||||
private FileEntry[] ReadFileEntries(Stream stream)
|
||||
private FileEntry[] ReadFileEntries(IStorage storage)
|
||||
{
|
||||
var reader = new BinaryReader(stream);
|
||||
var reader = new BinaryReader(storage.AsStream());
|
||||
int count = reader.ReadInt32();
|
||||
|
||||
reader.BaseStream.Position -= 4;
|
||||
@ -123,9 +128,9 @@ namespace LibHac.Save
|
||||
return entries;
|
||||
}
|
||||
|
||||
private DirectoryEntry[] ReadDirEntries(Stream stream)
|
||||
private DirectoryEntry[] ReadDirEntries(IStorage storage)
|
||||
{
|
||||
var reader = new BinaryReader(stream);
|
||||
var reader = new BinaryReader(storage.AsStream());
|
||||
int count = reader.ReadInt32();
|
||||
|
||||
reader.BaseStream.Position -= 4;
|
||||
@ -139,9 +144,28 @@ namespace LibHac.Save
|
||||
return entries;
|
||||
}
|
||||
|
||||
private AllocationTableStream OpenFatBlock(int blockIndex, long size)
|
||||
private AllocationTableStorage OpenFatBlock(int blockIndex, long size)
|
||||
{
|
||||
return new AllocationTableStream(StreamSource.CreateStream(), AllocationTable, (int)Header.BlockSize, blockIndex, size);
|
||||
return new AllocationTableStorage(BaseStorage, AllocationTable, (int)Header.BlockSize, blockIndex, size);
|
||||
}
|
||||
}
|
||||
|
||||
public class SaveHeader
|
||||
{
|
||||
public string Magic { get; }
|
||||
public uint Version { get; }
|
||||
public long BlockCount { get; }
|
||||
public long BlockSize { get; }
|
||||
|
||||
|
||||
public SaveHeader(IStorage storage)
|
||||
{
|
||||
var reader = new BinaryReader(storage.AsStream());
|
||||
|
||||
Magic = reader.ReadAscii(4);
|
||||
Version = reader.ReadUInt32();
|
||||
BlockCount = reader.ReadInt64();
|
||||
BlockSize = reader.ReadInt64();
|
||||
}
|
||||
}
|
||||
}
|
207
LibHac/IO/Save/Savefile.cs
Normal file
207
LibHac/IO/Save/Savefile.cs
Normal file
@ -0,0 +1,207 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.IO.Save
|
||||
{
|
||||
public class Savefile : IDisposable
|
||||
{
|
||||
public Header Header { get; }
|
||||
public IStorage BaseStorage { get; }
|
||||
public bool LeaveOpen { get; }
|
||||
|
||||
public HierarchicalIntegrityVerificationStorage IvfcStorage { get; }
|
||||
public SaveFs SaveFs { get; }
|
||||
|
||||
public RemapStorage DataRemapStorage { get; }
|
||||
public RemapStorage MetaRemapStorage { get; }
|
||||
|
||||
public HierarchicalDuplexStorage DuplexStorage { get; }
|
||||
public JournalStorage JournalStorage { get; }
|
||||
|
||||
public DirectoryEntry RootDirectory => SaveFs.RootDirectory;
|
||||
public FileEntry[] Files => SaveFs.Files;
|
||||
public DirectoryEntry[] Directories => SaveFs.Directories;
|
||||
|
||||
public Savefile(Keyset keyset, IStorage storage, IntegrityCheckLevel integrityCheckLevel, bool leaveOpen)
|
||||
{
|
||||
BaseStorage = storage;
|
||||
LeaveOpen = leaveOpen;
|
||||
|
||||
Header = new Header(keyset, BaseStorage);
|
||||
FsLayout layout = Header.Layout;
|
||||
|
||||
IStorage dataRemapBase = BaseStorage.Slice(layout.FileMapDataOffset, layout.FileMapDataSize);
|
||||
IStorage dataRemapEntries = BaseStorage.Slice(layout.FileMapEntryOffset, layout.FileMapEntrySize);
|
||||
IStorage metadataRemapEntries = BaseStorage.Slice(layout.MetaMapEntryOffset, layout.MetaMapEntrySize);
|
||||
|
||||
DataRemapStorage = new RemapStorage(dataRemapBase, Header.MainRemapHeader, dataRemapEntries, leaveOpen);
|
||||
|
||||
DuplexStorage = InitDuplexStorage(DataRemapStorage, Header);
|
||||
|
||||
MetaRemapStorage = new RemapStorage(DuplexStorage, Header.MetaDataRemapHeader, metadataRemapEntries, leaveOpen);
|
||||
|
||||
var journalMapInfo = new JournalMapParams
|
||||
{
|
||||
MapStorage = MetaRemapStorage.Slice(layout.JournalMapTableOffset, layout.JournalMapTableSize),
|
||||
PhysicalBlockBitmap = MetaRemapStorage.Slice(layout.JournalPhysicalBitmapOffset, layout.JournalPhysicalBitmapSize),
|
||||
VirtualBlockBitmap = MetaRemapStorage.Slice(layout.JournalVirtualBitmapOffset, layout.JournalVirtualBitmapSize),
|
||||
FreeBlockBitmap = MetaRemapStorage.Slice(layout.JournalFreeBitmapOffset, layout.JournalFreeBitmapSize),
|
||||
};
|
||||
|
||||
IStorage journalData = DataRemapStorage.Slice(layout.JournalDataOffset,
|
||||
layout.JournalDataSizeB + layout.JournalSize);
|
||||
|
||||
JournalStorage = new JournalStorage(journalData, Header.JournalHeader, journalMapInfo, leaveOpen);
|
||||
|
||||
IvfcStorage = InitJournalIvfcStorage(integrityCheckLevel);
|
||||
|
||||
IStorage fatStorage = MetaRemapStorage.Slice(layout.FatOffset, layout.FatSize);
|
||||
|
||||
if (Header.Layout.Version >= 0x50000)
|
||||
{
|
||||
fatStorage = InitFatIvfcStorage(integrityCheckLevel);
|
||||
}
|
||||
|
||||
SaveFs = new SaveFs(IvfcStorage, fatStorage, Header.SaveHeader);
|
||||
}
|
||||
|
||||
private static HierarchicalDuplexStorage InitDuplexStorage(IStorage baseStorage, Header header)
|
||||
{
|
||||
FsLayout layout = header.Layout;
|
||||
var duplexLayers = new DuplexFsLayerInfo[3];
|
||||
|
||||
duplexLayers[0] = new DuplexFsLayerInfo
|
||||
{
|
||||
DataA = header.DuplexMasterBitmapA,
|
||||
DataB = header.DuplexMasterBitmapB,
|
||||
Info = header.Duplex.Layers[0]
|
||||
};
|
||||
|
||||
duplexLayers[1] = new DuplexFsLayerInfo
|
||||
{
|
||||
DataA = baseStorage.Slice(layout.DuplexL1OffsetA, layout.DuplexL1Size),
|
||||
DataB = baseStorage.Slice(layout.DuplexL1OffsetB, layout.DuplexL1Size),
|
||||
Info = header.Duplex.Layers[1]
|
||||
};
|
||||
|
||||
duplexLayers[2] = new DuplexFsLayerInfo
|
||||
{
|
||||
DataA = baseStorage.Slice(layout.DuplexDataOffsetA, layout.DuplexDataSize),
|
||||
DataB = baseStorage.Slice(layout.DuplexDataOffsetB, layout.DuplexDataSize),
|
||||
Info = header.Duplex.Layers[2]
|
||||
};
|
||||
|
||||
return new HierarchicalDuplexStorage(duplexLayers, layout.DuplexIndex == 1);
|
||||
}
|
||||
|
||||
private HierarchicalIntegrityVerificationStorage InitJournalIvfcStorage(IntegrityCheckLevel integrityCheckLevel)
|
||||
{
|
||||
const int ivfcLevels = 5;
|
||||
IvfcHeader ivfc = Header.Ivfc;
|
||||
var levels = new List<IStorage> { Header.DataIvfcMaster };
|
||||
|
||||
for (int i = 0; i < ivfcLevels - 2; i++)
|
||||
{
|
||||
IvfcLevelHeader level = ivfc.LevelHeaders[i];
|
||||
levels.Add(MetaRemapStorage.Slice(level.Offset, level.Size));
|
||||
}
|
||||
|
||||
IvfcLevelHeader dataLevel = ivfc.LevelHeaders[ivfcLevels - 2];
|
||||
levels.Add(JournalStorage.Slice(dataLevel.Offset, dataLevel.Size));
|
||||
|
||||
return new HierarchicalIntegrityVerificationStorage(ivfc, levels, IntegrityStorageType.Save, integrityCheckLevel, LeaveOpen);
|
||||
}
|
||||
|
||||
private HierarchicalIntegrityVerificationStorage InitFatIvfcStorage(IntegrityCheckLevel integrityCheckLevel)
|
||||
{
|
||||
return new HierarchicalIntegrityVerificationStorage(Header.FatIvfc, Header.FatIvfcMaster, MetaRemapStorage,
|
||||
IntegrityStorageType.Save, integrityCheckLevel, LeaveOpen);
|
||||
}
|
||||
|
||||
public IStorage OpenFile(string filename)
|
||||
{
|
||||
return SaveFs.OpenFile(filename);
|
||||
}
|
||||
|
||||
public IStorage OpenFile(FileEntry file)
|
||||
{
|
||||
return SaveFs.OpenFile(file);
|
||||
}
|
||||
|
||||
public bool FileExists(string filename) => SaveFs.FileExists(filename);
|
||||
|
||||
public bool CommitHeader(Keyset keyset)
|
||||
{
|
||||
// todo
|
||||
Stream headerStream = BaseStorage.AsStream();
|
||||
|
||||
var hashData = new byte[0x3d00];
|
||||
|
||||
headerStream.Position = 0x300;
|
||||
headerStream.Read(hashData, 0, hashData.Length);
|
||||
|
||||
byte[] hash = Crypto.ComputeSha256(hashData, 0, hashData.Length);
|
||||
headerStream.Position = 0x108;
|
||||
headerStream.Write(hash, 0, hash.Length);
|
||||
|
||||
if (keyset.SaveMacKey.IsEmpty()) return false;
|
||||
|
||||
var cmacData = new byte[0x200];
|
||||
var cmac = new byte[0x10];
|
||||
|
||||
headerStream.Position = 0x100;
|
||||
headerStream.Read(cmacData, 0, 0x200);
|
||||
|
||||
Crypto.CalculateAesCmac(keyset.SaveMacKey, cmacData, 0, cmac, 0, 0x200);
|
||||
|
||||
headerStream.Position = 0;
|
||||
headerStream.Write(cmac, 0, 0x10);
|
||||
headerStream.Flush();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
public Validity Verify(IProgressReport logger = null)
|
||||
{
|
||||
Validity validity = IvfcStorage.Validate(true, logger);
|
||||
IvfcStorage.SetLevelValidities(Header.Ivfc);
|
||||
|
||||
return validity;
|
||||
}
|
||||
|
||||
protected virtual void Dispose(bool disposing)
|
||||
{
|
||||
if (disposing && !LeaveOpen)
|
||||
{
|
||||
BaseStorage?.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
Dispose(true);
|
||||
GC.SuppressFinalize(this);
|
||||
}
|
||||
}
|
||||
|
||||
public static class SavefileExtensions
|
||||
{
|
||||
public static void Extract(this Savefile save, string outDir, IProgressReport logger = null)
|
||||
{
|
||||
foreach (FileEntry file in save.Files)
|
||||
{
|
||||
IStorage storage = save.OpenFile(file);
|
||||
string outName = outDir + file.FullPath;
|
||||
string dir = Path.GetDirectoryName(outName);
|
||||
if (!string.IsNullOrWhiteSpace(dir)) Directory.CreateDirectory(dir);
|
||||
|
||||
using (var outFile = new FileStream(outName, FileMode.Create, FileAccess.ReadWrite))
|
||||
{
|
||||
logger?.LogMessage(file.FullPath);
|
||||
storage.CopyToStream(outFile, storage.Length, logger);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
54
LibHac/IO/SectorStorage.cs
Normal file
54
LibHac/IO/SectorStorage.cs
Normal file
@ -0,0 +1,54 @@
|
||||
using System;
|
||||
|
||||
namespace LibHac.IO
|
||||
{
|
||||
public class SectorStorage : Storage
|
||||
{
|
||||
protected IStorage BaseStorage { get; }
|
||||
|
||||
public int SectorSize { get; }
|
||||
public int SectorCount { get; }
|
||||
|
||||
public SectorStorage(IStorage baseStorage, int sectorSize, bool leaveOpen)
|
||||
{
|
||||
BaseStorage = baseStorage;
|
||||
SectorSize = sectorSize;
|
||||
SectorCount = (int)Util.DivideByRoundUp(BaseStorage.Length, sectorSize);
|
||||
Length = baseStorage.Length;
|
||||
|
||||
if (!leaveOpen) ToDispose.Add(BaseStorage);
|
||||
}
|
||||
|
||||
protected override void ReadImpl(Span<byte> destination, long offset)
|
||||
{
|
||||
ValidateSize(destination.Length, offset);
|
||||
BaseStorage.Read(destination, offset);
|
||||
}
|
||||
|
||||
protected override void WriteImpl(ReadOnlySpan<byte> source, long offset)
|
||||
{
|
||||
ValidateSize(source.Length, offset);
|
||||
BaseStorage.Write(source, offset);
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
BaseStorage.Flush();
|
||||
}
|
||||
|
||||
public override long Length { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Validates that the size is a multiple of the sector size
|
||||
/// </summary>
|
||||
protected void ValidateSize(long size, long offset)
|
||||
{
|
||||
if (size < 0)
|
||||
throw new ArgumentException("Size must be non-negative");
|
||||
if (offset < 0)
|
||||
throw new ArgumentException("Offset must be non-negative");
|
||||
if (offset % SectorSize != 0)
|
||||
throw new ArgumentException($"Offset must be a multiple of {SectorSize}");
|
||||
}
|
||||
}
|
||||
}
|
114
LibHac/IO/Storage.cs
Normal file
114
LibHac/IO/Storage.cs
Normal file
@ -0,0 +1,114 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.IO
|
||||
{
|
||||
public abstract class Storage : IStorage
|
||||
{
|
||||
private bool _isDisposed;
|
||||
protected internal List<IDisposable> ToDispose { get; } = new List<IDisposable>();
|
||||
|
||||
protected abstract void ReadImpl(Span<byte> destination, long offset);
|
||||
protected abstract void WriteImpl(ReadOnlySpan<byte> source, long offset);
|
||||
public abstract void Flush();
|
||||
public abstract long Length { get; }
|
||||
|
||||
protected FileAccess Access { get; set; } = FileAccess.ReadWrite;
|
||||
|
||||
public void Read(Span<byte> destination, long offset)
|
||||
{
|
||||
EnsureCanRead();
|
||||
ValidateSpanParameters(destination, offset);
|
||||
ReadImpl(destination, offset);
|
||||
}
|
||||
|
||||
public virtual void Read(byte[] buffer, long offset, int count, int bufferOffset)
|
||||
{
|
||||
ValidateArrayParameters(buffer, offset, count, bufferOffset);
|
||||
Read(buffer.AsSpan(bufferOffset, count), offset);
|
||||
}
|
||||
|
||||
public void Write(ReadOnlySpan<byte> source, long offset)
|
||||
{
|
||||
EnsureCanWrite();
|
||||
ValidateSpanParameters(source, offset);
|
||||
WriteImpl(source, offset);
|
||||
}
|
||||
|
||||
public virtual void Write(byte[] buffer, long offset, int count, int bufferOffset)
|
||||
{
|
||||
ValidateArrayParameters(buffer, offset, count, bufferOffset);
|
||||
Write(buffer.AsSpan(bufferOffset, count), offset);
|
||||
}
|
||||
|
||||
public virtual Storage Slice(long start, long length, bool leaveOpen)
|
||||
{
|
||||
return new SubStorage(this, start, length, leaveOpen);
|
||||
}
|
||||
|
||||
protected virtual void Dispose(bool disposing)
|
||||
{
|
||||
if (_isDisposed) return;
|
||||
|
||||
if (disposing)
|
||||
{
|
||||
Flush();
|
||||
foreach (IDisposable item in ToDispose)
|
||||
{
|
||||
item?.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
_isDisposed = true;
|
||||
}
|
||||
|
||||
public void SetReadOnly() => Access = FileAccess.Read;
|
||||
|
||||
public virtual bool CanRead => (Access & FileAccess.Read) != 0;
|
||||
public virtual bool CanWrite => (Access & FileAccess.Write) != 0;
|
||||
|
||||
private void EnsureCanRead()
|
||||
{
|
||||
if (!CanRead) throw new InvalidOperationException("Storage is not readable");
|
||||
}
|
||||
|
||||
private void EnsureCanWrite()
|
||||
{
|
||||
if (!CanWrite) throw new InvalidOperationException("Storage is not writable");
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
Dispose(true);
|
||||
GC.SuppressFinalize(this);
|
||||
}
|
||||
|
||||
protected void ValidateArrayParameters(byte[] buffer, long offset, int count, int bufferOffset)
|
||||
{
|
||||
if (_isDisposed) throw new ObjectDisposedException(null);
|
||||
if (buffer == null) throw new ArgumentNullException(nameof(buffer));
|
||||
if (offset < 0) throw new ArgumentOutOfRangeException(nameof(offset), "Argument must be non-negative.");
|
||||
if (count < 0) throw new ArgumentOutOfRangeException(nameof(count), "Argument must be non-negative.");
|
||||
if (bufferOffset < 0) throw new ArgumentOutOfRangeException(nameof(bufferOffset), "Argument must be non-negative.");
|
||||
if (buffer.Length - bufferOffset < count) throw new ArgumentException("bufferOffset, length, and count were out of bounds for the array.");
|
||||
|
||||
if (Length != -1)
|
||||
{
|
||||
if (offset + count > Length) throw new ArgumentException();
|
||||
}
|
||||
}
|
||||
|
||||
protected void ValidateSpanParameters(ReadOnlySpan<byte> destination, long offset)
|
||||
{
|
||||
if (_isDisposed) throw new ObjectDisposedException(null);
|
||||
if (destination == null) throw new ArgumentNullException(nameof(destination));
|
||||
if (offset < 0) throw new ArgumentOutOfRangeException(nameof(offset), "Argument must be non-negative.");
|
||||
|
||||
if (Length != -1)
|
||||
{
|
||||
if (offset + destination.Length > Length) throw new ArgumentException("Storage");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
127
LibHac/IO/StorageExtensions.cs
Normal file
127
LibHac/IO/StorageExtensions.cs
Normal file
@ -0,0 +1,127 @@
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.IO
|
||||
{
|
||||
public static class StorageExtensions
|
||||
{
|
||||
public static Storage Slice(this IStorage storage, long start)
|
||||
{
|
||||
if (storage.Length == -1)
|
||||
{
|
||||
return storage.Slice(start, storage.Length);
|
||||
}
|
||||
|
||||
return storage.Slice(start, storage.Length - start);
|
||||
}
|
||||
|
||||
public static Storage Slice(this IStorage storage, long start, long length)
|
||||
{
|
||||
return storage.Slice(start, length, true);
|
||||
}
|
||||
|
||||
public static Storage Slice(this IStorage storage, long start, long length, bool leaveOpen)
|
||||
{
|
||||
if (storage is Storage s)
|
||||
{
|
||||
return s.Slice(start, length, leaveOpen);
|
||||
}
|
||||
|
||||
return new SubStorage(storage, start, length, leaveOpen);
|
||||
}
|
||||
|
||||
public static Storage WithAccess(this IStorage storage, FileAccess access)
|
||||
{
|
||||
return storage.WithAccess(access, true);
|
||||
}
|
||||
|
||||
public static Storage WithAccess(this IStorage storage, FileAccess access, bool leaveOpen)
|
||||
{
|
||||
return new SubStorage(storage, 0, storage.Length, leaveOpen, access);
|
||||
}
|
||||
|
||||
public static Stream AsStream(this IStorage storage) => new StorageStream(storage, true);
|
||||
|
||||
public static void CopyTo(this IStorage input, IStorage output, IProgressReport progress = null)
|
||||
{
|
||||
const int bufferSize = 81920;
|
||||
long remaining = Math.Min(input.Length, output.Length);
|
||||
if (remaining < 0) throw new ArgumentException("Storage must have an explicit length");
|
||||
progress?.SetTotal(remaining);
|
||||
|
||||
long pos = 0;
|
||||
|
||||
byte[] buffer = ArrayPool<byte>.Shared.Rent(bufferSize);
|
||||
try
|
||||
{
|
||||
while (remaining > 0)
|
||||
{
|
||||
int toCopy = (int)Math.Min(bufferSize, remaining);
|
||||
Span<byte> buf = buffer.AsSpan(0, toCopy);
|
||||
input.Read(buf, pos);
|
||||
output.Write(buf, pos);
|
||||
|
||||
remaining -= toCopy;
|
||||
pos += toCopy;
|
||||
|
||||
progress?.ReportAdd(toCopy);
|
||||
}
|
||||
}
|
||||
finally
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(buffer);
|
||||
}
|
||||
|
||||
progress?.SetTotal(0);
|
||||
}
|
||||
|
||||
public static void WriteAllBytes(this IStorage input, string filename, IProgressReport progress = null)
|
||||
{
|
||||
using (var outFile = new FileStream(filename, FileMode.Create, FileAccess.Write))
|
||||
{
|
||||
input.CopyToStream(outFile, input.Length, progress);
|
||||
}
|
||||
}
|
||||
|
||||
public static void CopyToStream(this IStorage input, Stream output, long length, IProgressReport progress = null)
|
||||
{
|
||||
const int bufferSize = 0x8000;
|
||||
long remaining = length;
|
||||
long inOffset = 0;
|
||||
var buffer = new byte[bufferSize];
|
||||
progress?.SetTotal(length);
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
int toWrite = (int) Math.Min(buffer.Length, remaining);
|
||||
input.Read(buffer, inOffset, toWrite, 0);
|
||||
|
||||
output.Write(buffer, 0, toWrite);
|
||||
remaining -= toWrite;
|
||||
inOffset += toWrite;
|
||||
progress?.ReportAdd(toWrite);
|
||||
}
|
||||
}
|
||||
|
||||
public static void CopyToStream(this IStorage input, Stream output) => CopyToStream(input, output, input.Length);
|
||||
|
||||
public static Storage AsStorage(this Stream stream)
|
||||
{
|
||||
if (stream == null) return null;
|
||||
return new StreamStorage(stream, true);
|
||||
}
|
||||
|
||||
public static Storage AsStorage(this Stream stream, long start)
|
||||
{
|
||||
if (stream == null) return null;
|
||||
return new StreamStorage(stream, true).Slice(start);
|
||||
}
|
||||
|
||||
public static Storage AsStorage(this Stream stream, long start, int length)
|
||||
{
|
||||
if (stream == null) return null;
|
||||
return new StreamStorage(stream, true).Slice(start, length);
|
||||
}
|
||||
}
|
||||
}
|
73
LibHac/IO/StorageStream.cs
Normal file
73
LibHac/IO/StorageStream.cs
Normal file
@ -0,0 +1,73 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.IO
|
||||
{
|
||||
public class StorageStream : Stream
|
||||
{
|
||||
private IStorage BaseStorage { get; }
|
||||
private bool LeaveOpen { get; }
|
||||
|
||||
public StorageStream(IStorage baseStorage, bool leaveOpen)
|
||||
{
|
||||
BaseStorage = baseStorage;
|
||||
LeaveOpen = leaveOpen;
|
||||
Length = baseStorage.Length;
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
int toRead = (int) Math.Min(count, Length - Position);
|
||||
BaseStorage.Read(buffer, Position, toRead, offset);
|
||||
|
||||
Position += toRead;
|
||||
return toRead;
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
BaseStorage.Write(buffer, Position, count, offset);
|
||||
Position += count;
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
BaseStorage.Flush();
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
switch (origin)
|
||||
{
|
||||
case SeekOrigin.Begin:
|
||||
Position = offset;
|
||||
break;
|
||||
case SeekOrigin.Current:
|
||||
Position += offset;
|
||||
break;
|
||||
case SeekOrigin.End:
|
||||
Position = Length - offset;
|
||||
break;
|
||||
}
|
||||
|
||||
return Position;
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override bool CanRead => true;
|
||||
public override bool CanSeek => true;
|
||||
public override bool CanWrite => true;
|
||||
public override long Length { get; }
|
||||
public override long Position { get; set; }
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (!LeaveOpen) BaseStorage?.Dispose();
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
}
|
||||
}
|
92
LibHac/IO/StreamStorage.cs
Normal file
92
LibHac/IO/StreamStorage.cs
Normal file
@ -0,0 +1,92 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
#if !STREAM_SPAN
|
||||
using System.Buffers;
|
||||
#endif
|
||||
|
||||
namespace LibHac.IO
|
||||
{
|
||||
public class StreamStorage : Storage
|
||||
{
|
||||
private Stream BaseStream { get; }
|
||||
private object Locker { get; } = new object();
|
||||
public override long Length { get; }
|
||||
|
||||
public StreamStorage(Stream baseStream, bool leaveOpen)
|
||||
{
|
||||
BaseStream = baseStream;
|
||||
Length = BaseStream.Length;
|
||||
if (!leaveOpen) ToDispose.Add(BaseStream);
|
||||
}
|
||||
|
||||
public override void Read(byte[] buffer, long offset, int count, int bufferOffset)
|
||||
{
|
||||
lock (Locker)
|
||||
{
|
||||
BaseStream.Position = offset;
|
||||
BaseStream.Read(buffer, bufferOffset, count);
|
||||
}
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, long offset, int count, int bufferOffset)
|
||||
{
|
||||
lock (Locker)
|
||||
{
|
||||
BaseStream.Position = offset;
|
||||
BaseStream.Write(buffer, bufferOffset, count);
|
||||
}
|
||||
}
|
||||
|
||||
protected override void ReadImpl(Span<byte> destination, long offset)
|
||||
{
|
||||
#if STREAM_SPAN
|
||||
lock (Locker)
|
||||
{
|
||||
if (BaseStream.Position != offset)
|
||||
{
|
||||
BaseStream.Position = offset;
|
||||
}
|
||||
|
||||
BaseStream.Read(destination);
|
||||
}
|
||||
#else
|
||||
byte[] buffer = ArrayPool<byte>.Shared.Rent(destination.Length);
|
||||
try
|
||||
{
|
||||
Read(buffer, offset, destination.Length, 0);
|
||||
|
||||
new Span<byte>(buffer, 0, destination.Length).CopyTo(destination);
|
||||
}
|
||||
finally { ArrayPool<byte>.Shared.Return(buffer); }
|
||||
#endif
|
||||
}
|
||||
|
||||
protected override void WriteImpl(ReadOnlySpan<byte> source, long offset)
|
||||
{
|
||||
#if STREAM_SPAN
|
||||
lock (Locker)
|
||||
{
|
||||
BaseStream.Position = offset;
|
||||
BaseStream.Write(source);
|
||||
}
|
||||
#else
|
||||
byte[] buffer = ArrayPool<byte>.Shared.Rent(source.Length);
|
||||
try
|
||||
{
|
||||
source.CopyTo(buffer);
|
||||
Write(buffer, offset, source.Length, 0);
|
||||
}
|
||||
finally { ArrayPool<byte>.Shared.Return(buffer); }
|
||||
#endif
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
lock (Locker)
|
||||
{
|
||||
BaseStream.Flush();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
54
LibHac/IO/SubStorage.cs
Normal file
54
LibHac/IO/SubStorage.cs
Normal file
@ -0,0 +1,54 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.IO
|
||||
{
|
||||
public class SubStorage : Storage
|
||||
{
|
||||
private IStorage BaseStorage { get; }
|
||||
private long Offset { get; }
|
||||
public override long Length { get; }
|
||||
|
||||
public SubStorage(IStorage baseStorage, long offset, long length)
|
||||
{
|
||||
BaseStorage = baseStorage;
|
||||
Offset = offset;
|
||||
Length = length;
|
||||
}
|
||||
|
||||
public SubStorage(IStorage baseStorage, long offset, long length, bool leaveOpen)
|
||||
: this(baseStorage, offset, length)
|
||||
{
|
||||
if (!leaveOpen) ToDispose.Add(BaseStorage);
|
||||
}
|
||||
|
||||
public SubStorage(IStorage baseStorage, long offset, long length, bool leaveOpen, FileAccess access)
|
||||
: this(baseStorage, offset, length, leaveOpen)
|
||||
{
|
||||
Access = access;
|
||||
}
|
||||
|
||||
protected override void ReadImpl(Span<byte> destination, long offset)
|
||||
{
|
||||
BaseStorage.Read(destination, offset + Offset);
|
||||
}
|
||||
|
||||
protected override void WriteImpl(ReadOnlySpan<byte> source, long offset)
|
||||
{
|
||||
BaseStorage.Write(source, offset + Offset);
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
BaseStorage.Flush();
|
||||
}
|
||||
|
||||
public override Storage Slice(long start, long length, bool leaveOpen)
|
||||
{
|
||||
Storage storage = BaseStorage.Slice(Offset + start, length, true);
|
||||
if (!leaveOpen) storage.ToDispose.Add(this);
|
||||
|
||||
return storage;
|
||||
}
|
||||
}
|
||||
}
|
@ -1,199 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using System.Security.Cryptography;
|
||||
using LibHac.Streams;
|
||||
|
||||
namespace LibHac
|
||||
{
|
||||
public class IntegrityVerificationStream : SectorStream
|
||||
{
|
||||
private const int DigestSize = 0x20;
|
||||
|
||||
private Stream HashStream { get; }
|
||||
public IntegrityCheckLevel IntegrityCheckLevel { get; }
|
||||
public Validity[] BlockValidities { get; }
|
||||
|
||||
private byte[] Salt { get; }
|
||||
private IntegrityStreamType Type { get; }
|
||||
|
||||
private readonly byte[] _hashBuffer = new byte[DigestSize];
|
||||
private readonly SHA256 _hash = SHA256.Create();
|
||||
|
||||
public IntegrityVerificationStream(IntegrityVerificationInfo info, Stream hashStream, IntegrityCheckLevel integrityCheckLevel)
|
||||
: base(info.Data, info.BlockSize)
|
||||
{
|
||||
HashStream = hashStream;
|
||||
IntegrityCheckLevel = integrityCheckLevel;
|
||||
Salt = info.Salt;
|
||||
Type = info.Type;
|
||||
|
||||
BlockValidities = new Validity[SectorCount];
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
switch (origin)
|
||||
{
|
||||
case SeekOrigin.Begin:
|
||||
Position = offset;
|
||||
break;
|
||||
case SeekOrigin.Current:
|
||||
Position += offset;
|
||||
break;
|
||||
case SeekOrigin.End:
|
||||
Position = Length - offset;
|
||||
break;
|
||||
}
|
||||
|
||||
return Position;
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count) =>
|
||||
Read(buffer, offset, count, IntegrityCheckLevel);
|
||||
|
||||
public int Read(byte[] buffer, int offset, int count, IntegrityCheckLevel integrityCheckLevel)
|
||||
{
|
||||
long blockNum = CurrentSector;
|
||||
HashStream.Position = blockNum * DigestSize;
|
||||
HashStream.Read(_hashBuffer, 0, DigestSize);
|
||||
|
||||
int bytesRead = base.Read(buffer, offset, count);
|
||||
int bytesToHash = SectorSize;
|
||||
|
||||
if (bytesRead == 0) return 0;
|
||||
|
||||
// If a hash is zero the data for the entire block is zero
|
||||
if (Type == IntegrityStreamType.Save && _hashBuffer.IsEmpty())
|
||||
{
|
||||
Array.Clear(buffer, offset, SectorSize);
|
||||
BlockValidities[blockNum] = Validity.Valid;
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
if (bytesRead < SectorSize)
|
||||
{
|
||||
// Pad out unused portion of block
|
||||
Array.Clear(buffer, offset + bytesRead, SectorSize - bytesRead);
|
||||
|
||||
// Partition FS hashes don't pad out an incomplete block
|
||||
if (Type == IntegrityStreamType.PartitionFs)
|
||||
{
|
||||
bytesToHash = bytesRead;
|
||||
}
|
||||
}
|
||||
|
||||
if (BlockValidities[blockNum] == Validity.Invalid && integrityCheckLevel == IntegrityCheckLevel.ErrorOnInvalid)
|
||||
{
|
||||
throw new InvalidDataException("Hash error!");
|
||||
}
|
||||
|
||||
if (integrityCheckLevel == IntegrityCheckLevel.None) return bytesRead;
|
||||
|
||||
if (BlockValidities[blockNum] != Validity.Unchecked) return bytesRead;
|
||||
|
||||
byte[] hash = DoHash(buffer, offset, bytesToHash);
|
||||
|
||||
Validity validity = Util.ArraysEqual(_hashBuffer, hash) ? Validity.Valid : Validity.Invalid;
|
||||
BlockValidities[blockNum] = validity;
|
||||
|
||||
if (validity == Validity.Invalid && integrityCheckLevel == IntegrityCheckLevel.ErrorOnInvalid)
|
||||
{
|
||||
throw new InvalidDataException("Hash error!");
|
||||
}
|
||||
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
long blockNum = CurrentSector;
|
||||
int toWrite = (int)Math.Min(count, Length - Position);
|
||||
byte[] hash = DoHash(buffer, offset, toWrite);
|
||||
|
||||
if (Type == IntegrityStreamType.Save && buffer.IsEmpty())
|
||||
{
|
||||
Array.Clear(hash, 0, DigestSize);
|
||||
}
|
||||
|
||||
base.Write(buffer, offset, count);
|
||||
|
||||
HashStream.Position = blockNum * DigestSize;
|
||||
HashStream.Write(hash, 0, DigestSize);
|
||||
}
|
||||
|
||||
private byte[] DoHash(byte[] buffer, int offset, int count)
|
||||
{
|
||||
_hash.Initialize();
|
||||
|
||||
if (Type == IntegrityStreamType.Save)
|
||||
{
|
||||
_hash.TransformBlock(Salt, 0, Salt.Length, null, 0);
|
||||
}
|
||||
|
||||
_hash.TransformBlock(buffer, offset, count, null, 0);
|
||||
_hash.TransformFinalBlock(buffer, 0, 0);
|
||||
|
||||
byte[] hash = _hash.Hash;
|
||||
|
||||
if (Type == IntegrityStreamType.Save)
|
||||
{
|
||||
// This bit is set on all save hashes
|
||||
hash[0x1F] |= 0x80;
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
HashStream.Flush();
|
||||
base.Flush();
|
||||
}
|
||||
|
||||
public override bool CanRead => true;
|
||||
public override bool CanSeek => true;
|
||||
public override bool CanWrite => false;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Information for creating an <see cref="IntegrityVerificationStream"/>
|
||||
/// </summary>
|
||||
public class IntegrityVerificationInfo
|
||||
{
|
||||
public Stream Data { get; set; }
|
||||
public int BlockSize { get; set; }
|
||||
public byte[] Salt { get; set; }
|
||||
public IntegrityStreamType Type { get; set; }
|
||||
}
|
||||
|
||||
public enum IntegrityStreamType
|
||||
{
|
||||
Save,
|
||||
RomFs,
|
||||
PartitionFs
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Represents the level of integrity checks to be performed.
|
||||
/// </summary>
|
||||
public enum IntegrityCheckLevel
|
||||
{
|
||||
/// <summary>
|
||||
/// No integrity checks will be performed.
|
||||
/// </summary>
|
||||
None,
|
||||
/// <summary>
|
||||
/// Invalid blocks will be marked as invalid when read, and will not cause an error.
|
||||
/// </summary>
|
||||
IgnoreOnInvalid,
|
||||
/// <summary>
|
||||
/// An <see cref="InvalidDataException"/> will be thrown if an integrity check fails.
|
||||
/// </summary>
|
||||
ErrorOnInvalid
|
||||
}
|
||||
}
|
@ -4,7 +4,7 @@ using System.IO;
|
||||
using System.Linq;
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
using LibHac.Streams;
|
||||
using LibHac.IO;
|
||||
|
||||
namespace LibHac
|
||||
{
|
||||
@ -40,9 +40,6 @@ namespace LibHac
|
||||
public byte[][][] KeyAreaKeys { get; } = Util.CreateJaggedArray<byte[][][]>(0x20, 3, 0x10);
|
||||
public byte[] SaveMacKey { get; } = new byte[0x10];
|
||||
public byte[][] SdCardKeys { get; } = Util.CreateJaggedArray<byte[][]>(2, 0x20);
|
||||
public byte[] NcaHdrFixedKeyModulus { get; } = new byte[0x100];
|
||||
public byte[] AcidFixedKeyModulus { get; } = new byte[0x100];
|
||||
public byte[] Package2FixedKeyModulus { get; } = new byte[0x100];
|
||||
public byte[] EticketRsaKek { get; } = new byte[0x10];
|
||||
public byte[] RetailSpecificAesKeySource { get; } = new byte[0x10];
|
||||
public byte[] PerConsoleKeySource { get; } = new byte[0x10];
|
||||
@ -57,12 +54,72 @@ namespace LibHac
|
||||
|
||||
public RSAParameters EticketExtKeyRsa { get; set; }
|
||||
|
||||
public byte[] NcaHdrFixedKeyModulus { get; } =
|
||||
{
|
||||
0xBF, 0xBE, 0x40, 0x6C, 0xF4, 0xA7, 0x80, 0xE9, 0xF0, 0x7D, 0x0C, 0x99, 0x61, 0x1D, 0x77, 0x2F,
|
||||
0x96, 0xBC, 0x4B, 0x9E, 0x58, 0x38, 0x1B, 0x03, 0xAB, 0xB1, 0x75, 0x49, 0x9F, 0x2B, 0x4D, 0x58,
|
||||
0x34, 0xB0, 0x05, 0xA3, 0x75, 0x22, 0xBE, 0x1A, 0x3F, 0x03, 0x73, 0xAC, 0x70, 0x68, 0xD1, 0x16,
|
||||
0xB9, 0x04, 0x46, 0x5E, 0xB7, 0x07, 0x91, 0x2F, 0x07, 0x8B, 0x26, 0xDE, 0xF6, 0x00, 0x07, 0xB2,
|
||||
0xB4, 0x51, 0xF8, 0x0D, 0x0A, 0x5E, 0x58, 0xAD, 0xEB, 0xBC, 0x9A, 0xD6, 0x49, 0xB9, 0x64, 0xEF,
|
||||
0xA7, 0x82, 0xB5, 0xCF, 0x6D, 0x70, 0x13, 0xB0, 0x0F, 0x85, 0xF6, 0xA9, 0x08, 0xAA, 0x4D, 0x67,
|
||||
0x66, 0x87, 0xFA, 0x89, 0xFF, 0x75, 0x90, 0x18, 0x1E, 0x6B, 0x3D, 0xE9, 0x8A, 0x68, 0xC9, 0x26,
|
||||
0x04, 0xD9, 0x80, 0xCE, 0x3F, 0x5E, 0x92, 0xCE, 0x01, 0xFF, 0x06, 0x3B, 0xF2, 0xC1, 0xA9, 0x0C,
|
||||
0xCE, 0x02, 0x6F, 0x16, 0xBC, 0x92, 0x42, 0x0A, 0x41, 0x64, 0xCD, 0x52, 0xB6, 0x34, 0x4D, 0xAE,
|
||||
0xC0, 0x2E, 0xDE, 0xA4, 0xDF, 0x27, 0x68, 0x3C, 0xC1, 0xA0, 0x60, 0xAD, 0x43, 0xF3, 0xFC, 0x86,
|
||||
0xC1, 0x3E, 0x6C, 0x46, 0xF7, 0x7C, 0x29, 0x9F, 0xFA, 0xFD, 0xF0, 0xE3, 0xCE, 0x64, 0xE7, 0x35,
|
||||
0xF2, 0xF6, 0x56, 0x56, 0x6F, 0x6D, 0xF1, 0xE2, 0x42, 0xB0, 0x83, 0x40, 0xA5, 0xC3, 0x20, 0x2B,
|
||||
0xCC, 0x9A, 0xAE, 0xCA, 0xED, 0x4D, 0x70, 0x30, 0xA8, 0x70, 0x1C, 0x70, 0xFD, 0x13, 0x63, 0x29,
|
||||
0x02, 0x79, 0xEA, 0xD2, 0xA7, 0xAF, 0x35, 0x28, 0x32, 0x1C, 0x7B, 0xE6, 0x2F, 0x1A, 0xAA, 0x40,
|
||||
0x7E, 0x32, 0x8C, 0x27, 0x42, 0xFE, 0x82, 0x78, 0xEC, 0x0D, 0xEB, 0xE6, 0x83, 0x4B, 0x6D, 0x81,
|
||||
0x04, 0x40, 0x1A, 0x9E, 0x9A, 0x67, 0xF6, 0x72, 0x29, 0xFA, 0x04, 0xF0, 0x9D, 0xE4, 0xF4, 0x03
|
||||
};
|
||||
|
||||
public byte[] AcidFixedKeyModulus { get; } =
|
||||
{
|
||||
0xDD, 0xC8, 0xDD, 0xF2, 0x4E, 0x6D, 0xF0, 0xCA, 0x9E, 0xC7, 0x5D, 0xC7, 0x7B, 0xAD, 0xFE, 0x7D,
|
||||
0x23, 0x89, 0x69, 0xB6, 0xF2, 0x06, 0xA2, 0x02, 0x88, 0xE1, 0x55, 0x91, 0xAB, 0xCB, 0x4D, 0x50,
|
||||
0x2E, 0xFC, 0x9D, 0x94, 0x76, 0xD6, 0x4C, 0xD8, 0xFF, 0x10, 0xFA, 0x5E, 0x93, 0x0A, 0xB4, 0x57,
|
||||
0xAC, 0x51, 0xC7, 0x16, 0x66, 0xF4, 0x1A, 0x54, 0xC2, 0xC5, 0x04, 0x3D, 0x1B, 0xFE, 0x30, 0x20,
|
||||
0x8A, 0xAC, 0x6F, 0x6F, 0xF5, 0xC7, 0xB6, 0x68, 0xB8, 0xC9, 0x40, 0x6B, 0x42, 0xAD, 0x11, 0x21,
|
||||
0xE7, 0x8B, 0xE9, 0x75, 0x01, 0x86, 0xE4, 0x48, 0x9B, 0x0A, 0x0A, 0xF8, 0x7F, 0xE8, 0x87, 0xF2,
|
||||
0x82, 0x01, 0xE6, 0xA3, 0x0F, 0xE4, 0x66, 0xAE, 0x83, 0x3F, 0x4E, 0x9F, 0x5E, 0x01, 0x30, 0xA4,
|
||||
0x00, 0xB9, 0x9A, 0xAE, 0x5F, 0x03, 0xCC, 0x18, 0x60, 0xE5, 0xEF, 0x3B, 0x5E, 0x15, 0x16, 0xFE,
|
||||
0x1C, 0x82, 0x78, 0xB5, 0x2F, 0x47, 0x7C, 0x06, 0x66, 0x88, 0x5D, 0x35, 0xA2, 0x67, 0x20, 0x10,
|
||||
0xE7, 0x6C, 0x43, 0x68, 0xD3, 0xE4, 0x5A, 0x68, 0x2A, 0x5A, 0xE2, 0x6D, 0x73, 0xB0, 0x31, 0x53,
|
||||
0x1C, 0x20, 0x09, 0x44, 0xF5, 0x1A, 0x9D, 0x22, 0xBE, 0x12, 0xA1, 0x77, 0x11, 0xE2, 0xA1, 0xCD,
|
||||
0x40, 0x9A, 0xA2, 0x8B, 0x60, 0x9B, 0xEF, 0xA0, 0xD3, 0x48, 0x63, 0xA2, 0xF8, 0xA3, 0x2C, 0x08,
|
||||
0x56, 0x52, 0x2E, 0x60, 0x19, 0x67, 0x5A, 0xA7, 0x9F, 0xDC, 0x3F, 0x3F, 0x69, 0x2B, 0x31, 0x6A,
|
||||
0xB7, 0x88, 0x4A, 0x14, 0x84, 0x80, 0x33, 0x3C, 0x9D, 0x44, 0xB7, 0x3F, 0x4C, 0xE1, 0x75, 0xEA,
|
||||
0x37, 0xEA, 0xE8, 0x1E, 0x7C, 0x77, 0xB7, 0xC6, 0x1A, 0xA2, 0xF0, 0x9F, 0x10, 0x61, 0xCD, 0x7B,
|
||||
0x5B, 0x32, 0x4C, 0x37, 0xEF, 0xB1, 0x71, 0x68, 0x53, 0x0A, 0xED, 0x51, 0x7D, 0x35, 0x22, 0xFD
|
||||
};
|
||||
|
||||
public byte[] Package2FixedKeyModulus { get; } =
|
||||
{
|
||||
0x8D, 0x13, 0xA7, 0x77, 0x6A, 0xE5, 0xDC, 0xC0, 0x3B, 0x25, 0xD0, 0x58, 0xE4, 0x20, 0x69, 0x59,
|
||||
0x55, 0x4B, 0xAB, 0x70, 0x40, 0x08, 0x28, 0x07, 0xA8, 0xA7, 0xFD, 0x0F, 0x31, 0x2E, 0x11, 0xFE,
|
||||
0x47, 0xA0, 0xF9, 0x9D, 0xDF, 0x80, 0xDB, 0x86, 0x5A, 0x27, 0x89, 0xCD, 0x97, 0x6C, 0x85, 0xC5,
|
||||
0x6C, 0x39, 0x7F, 0x41, 0xF2, 0xFF, 0x24, 0x20, 0xC3, 0x95, 0xA6, 0xF7, 0x9D, 0x4A, 0x45, 0x74,
|
||||
0x8B, 0x5D, 0x28, 0x8A, 0xC6, 0x99, 0x35, 0x68, 0x85, 0xA5, 0x64, 0x32, 0x80, 0x9F, 0xD3, 0x48,
|
||||
0x39, 0xA2, 0x1D, 0x24, 0x67, 0x69, 0xDF, 0x75, 0xAC, 0x12, 0xB5, 0xBD, 0xC3, 0x29, 0x90, 0xBE,
|
||||
0x37, 0xE4, 0xA0, 0x80, 0x9A, 0xBE, 0x36, 0xBF, 0x1F, 0x2C, 0xAB, 0x2B, 0xAD, 0xF5, 0x97, 0x32,
|
||||
0x9A, 0x42, 0x9D, 0x09, 0x8B, 0x08, 0xF0, 0x63, 0x47, 0xA3, 0xE9, 0x1B, 0x36, 0xD8, 0x2D, 0x8A,
|
||||
0xD7, 0xE1, 0x54, 0x11, 0x95, 0xE4, 0x45, 0x88, 0x69, 0x8A, 0x2B, 0x35, 0xCE, 0xD0, 0xA5, 0x0B,
|
||||
0xD5, 0x5D, 0xAC, 0xDB, 0xAF, 0x11, 0x4D, 0xCA, 0xB8, 0x1E, 0xE7, 0x01, 0x9E, 0xF4, 0x46, 0xA3,
|
||||
0x8A, 0x94, 0x6D, 0x76, 0xBD, 0x8A, 0xC8, 0x3B, 0xD2, 0x31, 0x58, 0x0C, 0x79, 0xA8, 0x26, 0xE9,
|
||||
0xD1, 0x79, 0x9C, 0xCB, 0xD4, 0x2B, 0x6A, 0x4F, 0xC6, 0xCC, 0xCF, 0x90, 0xA7, 0xB9, 0x98, 0x47,
|
||||
0xFD, 0xFA, 0x4C, 0x6C, 0x6F, 0x81, 0x87, 0x3B, 0xCA, 0xB8, 0x50, 0xF6, 0x3E, 0x39, 0x5D, 0x4D,
|
||||
0x97, 0x3F, 0x0F, 0x35, 0x39, 0x53, 0xFB, 0xFA, 0xCD, 0xAB, 0xA8, 0x7A, 0x62, 0x9A, 0x3F, 0xF2,
|
||||
0x09, 0x27, 0x96, 0x3F, 0x07, 0x9A, 0x91, 0xF7, 0x16, 0xBF, 0xC6, 0x3A, 0x82, 0x5A, 0x4B, 0xCF,
|
||||
0x49, 0x50, 0x95, 0x8C, 0x55, 0x80, 0x7E, 0x39, 0xB1, 0x48, 0x05, 0x1E, 0x21, 0xC7, 0x24, 0x4F
|
||||
};
|
||||
|
||||
public Dictionary<byte[], byte[]> TitleKeys { get; } = new Dictionary<byte[], byte[]>(new ByteArray128BitComparer());
|
||||
|
||||
public void SetSdSeed(byte[] sdseed)
|
||||
{
|
||||
Array.Copy(sdseed, SdSeed, SdSeed.Length);
|
||||
DeriveKeys();
|
||||
DeriveSdCardKeys();
|
||||
}
|
||||
|
||||
public void DeriveKeys(IProgressReport logger = null)
|
||||
@ -120,10 +177,10 @@ namespace LibHac
|
||||
|
||||
Array.Copy(EncryptedKeyblobs[i], 0x10, counter, 0, 0x10);
|
||||
|
||||
using (var keyblobDec = new RandomAccessSectorStream(new Aes128CtrStream(
|
||||
new MemoryStream(EncryptedKeyblobs[i], 0x20, Keyblobs[i].Length), KeyblobKeys[i], counter)))
|
||||
using (var keyblobDec = new Aes128CtrStorage(
|
||||
new MemoryStorage(EncryptedKeyblobs[i], 0x20, Keyblobs[i].Length), KeyblobKeys[i], counter, false))
|
||||
{
|
||||
keyblobDec.Read(Keyblobs[i], 0, Keyblobs[i].Length);
|
||||
keyblobDec.Read(Keyblobs[i], 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using LibHac.Streams;
|
||||
using LibHac.IO;
|
||||
|
||||
namespace LibHac
|
||||
{
|
||||
@ -13,12 +13,12 @@ namespace LibHac
|
||||
public int[] SectionOffsets { get; } = new int[6];
|
||||
public int Size { get; }
|
||||
|
||||
private SharedStreamSource StreamSource { get; }
|
||||
private IStorage Storage { get; }
|
||||
|
||||
public Kip(Stream stream)
|
||||
public Kip(IStorage storage)
|
||||
{
|
||||
StreamSource = new SharedStreamSource(stream);
|
||||
Header = new KipHeader(StreamSource.CreateStream());
|
||||
Storage = storage;
|
||||
Header = new KipHeader(Storage);
|
||||
|
||||
Size = HeaderSize;
|
||||
|
||||
@ -30,26 +30,26 @@ namespace LibHac
|
||||
}
|
||||
}
|
||||
|
||||
public Stream OpenSection(int index)
|
||||
public IStorage OpenSection(int index)
|
||||
{
|
||||
if (index < 0 || index > 5)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(index), "Section index must be between 0-5");
|
||||
}
|
||||
|
||||
return StreamSource.CreateStream(SectionOffsets[index], Header.Sections[index].CompressedSize);
|
||||
return Storage.Slice(SectionOffsets[index], Header.Sections[index].CompressedSize);
|
||||
}
|
||||
|
||||
public byte[] DecompressSection(int index)
|
||||
{
|
||||
Stream compStream = OpenSection(index);
|
||||
IStorage compStream = OpenSection(index);
|
||||
var compressed = new byte[compStream.Length];
|
||||
compStream.Read(compressed, 0, compressed.Length);
|
||||
compStream.Read(compressed, 0);
|
||||
|
||||
return DecompressBlz(compressed);
|
||||
}
|
||||
|
||||
public Stream OpenRawFile() => StreamSource.CreateStream();
|
||||
public IStorage OpenRawFile() => Storage;
|
||||
|
||||
private static byte[] DecompressBlz(byte[] compressed)
|
||||
{
|
||||
@ -119,9 +119,9 @@ namespace LibHac
|
||||
public KipSectionHeader[] Sections { get; } = new KipSectionHeader[6];
|
||||
public byte[] Capabilities { get; }
|
||||
|
||||
public KipHeader(Stream stream)
|
||||
public KipHeader(IStorage storage)
|
||||
{
|
||||
var reader = new BinaryReader(stream);
|
||||
var reader = new BinaryReader(storage.AsStream());
|
||||
|
||||
Magic = reader.ReadAscii(4);
|
||||
if (Magic != "KIP1")
|
||||
@ -172,14 +172,13 @@ namespace LibHac
|
||||
public int Size { get; }
|
||||
public int KipCount { get; }
|
||||
|
||||
private SharedStreamSource StreamSource { get; }
|
||||
private IStorage Storage { get; }
|
||||
|
||||
public Ini1(Stream stream)
|
||||
public Ini1(IStorage storage)
|
||||
{
|
||||
StreamSource = new SharedStreamSource(stream);
|
||||
Stream initStream = StreamSource.CreateStream();
|
||||
Storage = storage;
|
||||
|
||||
var reader = new BinaryReader(initStream);
|
||||
var reader = new BinaryReader(Storage.AsStream());
|
||||
|
||||
Magic = reader.ReadAscii(4);
|
||||
if (Magic != "INI1")
|
||||
@ -196,9 +195,9 @@ namespace LibHac
|
||||
for (int i = 0; i < KipCount; i++)
|
||||
{
|
||||
// How to get the KIP's size the lazy way
|
||||
var kip = new Kip(StreamSource.CreateStream(offset));
|
||||
var kip = new Kip(Storage.Slice(offset));
|
||||
|
||||
Kips[i] = new Kip(StreamSource.CreateStream(offset, kip.Size));
|
||||
Kips[i] = new Kip(Storage.Slice(offset, kip.Size));
|
||||
|
||||
offset += kip.Size;
|
||||
}
|
||||
|
@ -26,8 +26,14 @@
|
||||
<DefineConstants>$(DefineConstants);USE_RSA_CNG</DefineConstants>
|
||||
</PropertyGroup>
|
||||
|
||||
<PropertyGroup Condition=" '$(TargetFramework)' == 'netcoreapp2.1' ">
|
||||
<DefineConstants>$(DefineConstants);STREAM_SPAN</DefineConstants>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="System.Numerics.Vectors" Version="4.5.0" Condition=" '$(TargetFramework)' == 'net46' " />
|
||||
<PackageReference Include="System.Memory" Version="4.5.1" Condition=" '$(TargetFramework)' == 'net46' " />
|
||||
<PackageReference Include="System.Buffers" Version="4.5.0" Condition=" '$(TargetFramework)' == 'net46' " />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
|
@ -2,37 +2,34 @@
|
||||
using System.IO;
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
using LibHac.Streams;
|
||||
using LibHac.XTSSharp;
|
||||
using LibHac.IO;
|
||||
|
||||
namespace LibHac
|
||||
{
|
||||
public class Nax0 : IDisposable
|
||||
{
|
||||
private const int SectorSize = 0x4000;
|
||||
|
||||
public byte[] Hmac { get; private set; }
|
||||
public byte[][] EncKeys { get; } = Util.CreateJaggedArray<byte[][]>(2, 0x10);
|
||||
public byte[][] Keys { get; } = Util.CreateJaggedArray<byte[][]>(2, 0x10);
|
||||
public byte[] Key { get; } = new byte[0x20];
|
||||
public long Length { get; private set; }
|
||||
public Stream Stream { get; }
|
||||
private bool KeepOpen { get; }
|
||||
public IStorage BaseStorage { get; }
|
||||
private bool LeaveOpen { get; }
|
||||
|
||||
public Nax0(Keyset keyset, Stream stream, string sdPath, bool keepOpen)
|
||||
public Nax0(Keyset keyset, IStorage storage, string sdPath, bool leaveOpen)
|
||||
{
|
||||
stream.Position = 0;
|
||||
KeepOpen = keepOpen;
|
||||
ReadHeader(stream);
|
||||
DeriveKeys(keyset, sdPath, stream);
|
||||
LeaveOpen = leaveOpen;
|
||||
ReadHeader(storage.AsStream());
|
||||
DeriveKeys(keyset, sdPath, storage);
|
||||
|
||||
stream.Position = 0x4000;
|
||||
Xts xts = XtsAes128.Create(Keys[0], Keys[1]);
|
||||
Stream = new RandomAccessSectorStream(new XtsSectorStream(stream, xts, 0x4000, 0x4000), keepOpen);
|
||||
BaseStorage = new CachedStorage(new Aes128XtsStorage(storage.Slice(SectorSize), Key, SectorSize, leaveOpen), 4, leaveOpen);
|
||||
}
|
||||
|
||||
private void ReadHeader(Stream stream)
|
||||
{
|
||||
var header = new byte[0x60];
|
||||
stream.Read(header, 0, 0x60);
|
||||
var reader = new BinaryReader(new MemoryStream(header));
|
||||
var reader = new BinaryReader(stream);
|
||||
|
||||
Hmac = reader.ReadBytes(0x20);
|
||||
string magic = reader.ReadAscii(4);
|
||||
@ -43,11 +40,10 @@ namespace LibHac
|
||||
Length = reader.ReadInt64();
|
||||
}
|
||||
|
||||
private void DeriveKeys(Keyset keyset, string sdPath, Stream stream)
|
||||
private void DeriveKeys(Keyset keyset, string sdPath, IStorage storage)
|
||||
{
|
||||
stream.Position = 0x20;
|
||||
var validationHashKey = new byte[0x60];
|
||||
stream.Read(validationHashKey, 0, 0x60);
|
||||
storage.Read(validationHashKey, 0x20);
|
||||
|
||||
// Try both the NCA and save key sources and pick the one that works
|
||||
for (int k = 0; k < 2; k++)
|
||||
@ -66,6 +62,8 @@ namespace LibHac
|
||||
// Decrypt this NAX0's keys
|
||||
Crypto.DecryptEcb(naxSpecificKeys[0], EncKeys[0], Keys[0], 0x10);
|
||||
Crypto.DecryptEcb(naxSpecificKeys[1], EncKeys[1], Keys[1], 0x10);
|
||||
Array.Copy(Keys[0], 0, Key, 0, 0x10);
|
||||
Array.Copy(Keys[1], 0, Key, 0x10, 0x10);
|
||||
|
||||
// Copy the decrypted keys into the NAX0 header and use that for the HMAC key
|
||||
// for validating that the keys are correct
|
||||
@ -86,9 +84,9 @@ namespace LibHac
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (!KeepOpen)
|
||||
if (!LeaveOpen)
|
||||
{
|
||||
Stream?.Dispose();
|
||||
BaseStorage?.Dispose();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
262
LibHac/Nca.cs
262
LibHac/Nca.cs
@ -1,35 +1,40 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using LibHac.Streams;
|
||||
using LibHac.XTSSharp;
|
||||
using System.Linq;
|
||||
using LibHac.IO;
|
||||
|
||||
namespace LibHac
|
||||
{
|
||||
public class Nca : IDisposable
|
||||
{
|
||||
public NcaHeader Header { get; private set; }
|
||||
public NcaHeader Header { get; }
|
||||
public string NcaId { get; set; }
|
||||
public string Filename { get; set; }
|
||||
public bool HasRightsId { get; private set; }
|
||||
public int CryptoType { get; private set; }
|
||||
public bool HasRightsId { get; }
|
||||
public int CryptoType { get; }
|
||||
public byte[][] DecryptedKeys { get; } = Util.CreateJaggedArray<byte[][]>(4, 0x10);
|
||||
public byte[] TitleKey { get; }
|
||||
public byte[] TitleKeyDec { get; } = new byte[0x10];
|
||||
private SharedStreamSource StreamSource { get; }
|
||||
private bool KeepOpen { get; }
|
||||
private bool LeaveOpen { get; }
|
||||
private Nca BaseNca { get; set; }
|
||||
private IStorage BaseStorage { get; }
|
||||
private Keyset Keyset { get; }
|
||||
|
||||
public Npdm.NpdmBinary Npdm { get; private set; }
|
||||
|
||||
private bool IsMissingTitleKey { get; set; }
|
||||
private string MissingKeyName { get; set; }
|
||||
|
||||
public NcaSection[] Sections { get; } = new NcaSection[4];
|
||||
|
||||
public Nca(Keyset keyset, Stream stream, bool keepOpen)
|
||||
public Nca(Keyset keyset, IStorage storage, bool leaveOpen)
|
||||
{
|
||||
stream.Position = 0;
|
||||
KeepOpen = keepOpen;
|
||||
StreamSource = new SharedStreamSource(stream, keepOpen);
|
||||
DecryptHeader(keyset, stream);
|
||||
LeaveOpen = leaveOpen;
|
||||
BaseStorage = storage;
|
||||
Keyset = keyset;
|
||||
|
||||
Header = DecryptHeader();
|
||||
|
||||
CryptoType = Math.Max(Header.CryptoType, Header.CryptoType2);
|
||||
if (CryptoType > 0) CryptoType--;
|
||||
@ -65,12 +70,12 @@ namespace LibHac
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens a <see cref="Stream"/> of the underlying NCA file.
|
||||
/// Opens the <see cref="IStorage"/> of the underlying NCA file.
|
||||
/// </summary>
|
||||
/// <returns>A <see cref="Stream"/> that provides access to the entire raw NCA file.</returns>
|
||||
public Stream GetStream()
|
||||
/// <returns>The <see cref="IStorage"/> that provides access to the entire raw NCA file.</returns>
|
||||
public IStorage GetStorage()
|
||||
{
|
||||
return StreamSource.CreateStream();
|
||||
return BaseStorage.WithAccess(FileAccess.Read);
|
||||
}
|
||||
|
||||
public bool CanOpenSection(int index)
|
||||
@ -83,7 +88,7 @@ namespace LibHac
|
||||
return sect.Header.EncryptionType == NcaEncryptionType.None || !IsMissingTitleKey && string.IsNullOrWhiteSpace(MissingKeyName);
|
||||
}
|
||||
|
||||
private Stream OpenRawSection(int index)
|
||||
private IStorage OpenRawSection(int index, bool leaveOpen)
|
||||
{
|
||||
if (index < 0 || index > 3) throw new ArgumentOutOfRangeException(nameof(index));
|
||||
|
||||
@ -106,32 +111,38 @@ namespace LibHac
|
||||
long offset = sect.Offset;
|
||||
long size = sect.Size;
|
||||
|
||||
if (!Util.IsSubRange(offset, size, StreamSource.Length))
|
||||
{
|
||||
throw new InvalidDataException(
|
||||
$"Section offset (0x{offset:x}) and length (0x{size:x}) fall outside the total NCA length (0x{StreamSource.Length:x}).");
|
||||
}
|
||||
// todo
|
||||
//if (!Util.IsSubRange(offset, size, StreamSource.Length))
|
||||
//{
|
||||
// throw new InvalidDataException(
|
||||
// $"Section offset (0x{offset:x}) and length (0x{size:x}) fall outside the total NCA length (0x{StreamSource.Length:x}).");
|
||||
//}
|
||||
|
||||
Stream rawStream = StreamSource.CreateStream(offset, size);
|
||||
IStorage rawStorage = BaseStorage.Slice(offset, size, leaveOpen);
|
||||
|
||||
switch (sect.Header.EncryptionType)
|
||||
{
|
||||
case NcaEncryptionType.None:
|
||||
return rawStream;
|
||||
return rawStorage;
|
||||
case NcaEncryptionType.XTS:
|
||||
throw new NotImplementedException("NCA sections using XTS are not supported");
|
||||
case NcaEncryptionType.AesCtr:
|
||||
return new RandomAccessSectorStream(new Aes128CtrStream(rawStream, DecryptedKeys[2], offset, sect.Header.Ctr), false);
|
||||
return new CachedStorage(new Aes128CtrStorage(rawStorage, DecryptedKeys[2], offset, sect.Header.Ctr, leaveOpen), 0x4000, 4, leaveOpen);
|
||||
case NcaEncryptionType.AesCtrEx:
|
||||
rawStream = new RandomAccessSectorStream(
|
||||
new BktrCryptoStream(rawStream, DecryptedKeys[2], 0, size, offset, sect.Header.Ctr, sect.Header.BktrInfo),
|
||||
false);
|
||||
if (BaseNca == null) return rawStream;
|
||||
BktrPatchInfo info = sect.Header.BktrInfo;
|
||||
|
||||
Stream baseStream = BaseNca.OpenSection(ProgramPartitionType.Data, true, IntegrityCheckLevel.None) ?? Stream.Null;
|
||||
long bktrOffset = info.RelocationHeader.Offset;
|
||||
long bktrSize = size - bktrOffset;
|
||||
long dataSize = info.RelocationHeader.Offset;
|
||||
|
||||
return new Bktr(rawStream, baseStream, sect);
|
||||
IStorage bucketTreeHeader = new MemoryStorage(sect.Header.BktrInfo.EncryptionHeader.Header);
|
||||
IStorage bucketTreeData = new CachedStorage(new Aes128CtrStorage(rawStorage.Slice(bktrOffset, bktrSize, leaveOpen), DecryptedKeys[2], bktrOffset + offset, sect.Header.Ctr, leaveOpen), 4, leaveOpen);
|
||||
|
||||
IStorage encryptionBucketTreeData = bucketTreeData.Slice(info.EncryptionHeader.Offset - bktrOffset);
|
||||
IStorage decStorage = new Aes128CtrExStorage(rawStorage.Slice(0, dataSize, leaveOpen), bucketTreeHeader, encryptionBucketTreeData, DecryptedKeys[2], offset, sect.Header.Ctr, leaveOpen);
|
||||
decStorage = new CachedStorage(decStorage, 0x4000, 4, leaveOpen);
|
||||
|
||||
return new ConcatenationStorage(new[] { decStorage, bucketTreeData }, leaveOpen);
|
||||
default:
|
||||
throw new ArgumentOutOfRangeException();
|
||||
}
|
||||
@ -144,43 +155,58 @@ namespace LibHac
|
||||
/// <param name="raw"><see langword="true"/> to open the raw section with hash metadata.</param>
|
||||
/// <param name="integrityCheckLevel">The level of integrity checks to be performed when reading the section.
|
||||
/// Always <see cref="IntegrityCheckLevel.None"/> if <paramref name="raw"/> is <see langword="false"/>.</param>
|
||||
/// <param name="leaveOpen"><see langword="true"/> to leave the storage open after the <see cref="Nca"/> object is disposed; otherwise, <see langword="false"/>.</param>
|
||||
/// <returns>A <see cref="Stream"/> that provides access to the specified section. <see langword="null"/> if the section does not exist.</returns>
|
||||
/// <exception cref="ArgumentOutOfRangeException">The specified <paramref name="index"/> is outside the valid range.</exception>
|
||||
public Stream OpenSection(int index, bool raw, IntegrityCheckLevel integrityCheckLevel)
|
||||
public IStorage OpenSection(int index, bool raw, IntegrityCheckLevel integrityCheckLevel, bool leaveOpen)
|
||||
{
|
||||
Stream rawStream = OpenRawSection(index);
|
||||
|
||||
if (raw || rawStream == null) return rawStream;
|
||||
IStorage rawStorage = OpenRawSection(index, leaveOpen);
|
||||
|
||||
NcaSection sect = Sections[index];
|
||||
NcaFsHeader header = sect.Header;
|
||||
|
||||
// If it's a patch section without a base, return the raw section because it has no hash data
|
||||
if (header.EncryptionType == NcaEncryptionType.AesCtrEx && BaseNca == null) return rawStream;
|
||||
if (header.EncryptionType == NcaEncryptionType.AesCtrEx)
|
||||
{
|
||||
if (raw && BaseNca == null) return rawStorage;
|
||||
|
||||
BktrHeader bktrInfo = header.BktrInfo.RelocationHeader;
|
||||
IStorage patchStorage = rawStorage.Slice(0, bktrInfo.Offset, leaveOpen);
|
||||
|
||||
if (BaseNca == null) return patchStorage;
|
||||
|
||||
IStorage baseStorage = BaseNca.OpenSection(ProgramPartitionType.Data, true, IntegrityCheckLevel.None, leaveOpen);
|
||||
IStorage bktrHeader = new MemoryStorage(bktrInfo.Header);
|
||||
IStorage bktrData = rawStorage.Slice(bktrInfo.Offset, bktrInfo.Size, leaveOpen);
|
||||
|
||||
rawStorage = new IndirectStorage(bktrHeader, bktrData, leaveOpen, baseStorage, patchStorage);
|
||||
}
|
||||
|
||||
if (raw || rawStorage == null) return rawStorage;
|
||||
|
||||
switch (header.HashType)
|
||||
{
|
||||
case NcaHashType.Sha256:
|
||||
return InitIvfcForPartitionfs(header.Sha256Info, new SharedStreamSource(rawStream), integrityCheckLevel);
|
||||
return InitIvfcForPartitionfs(header.Sha256Info, rawStorage, integrityCheckLevel, leaveOpen);
|
||||
case NcaHashType.Ivfc:
|
||||
return InitIvfcForRomfs(header.IvfcInfo, new SharedStreamSource(rawStream), integrityCheckLevel);
|
||||
|
||||
return new HierarchicalIntegrityVerificationStorage(header.IvfcInfo, new MemoryStorage(header.IvfcInfo.MasterHash), rawStorage,
|
||||
IntegrityStorageType.RomFs, integrityCheckLevel, leaveOpen);
|
||||
default:
|
||||
throw new ArgumentOutOfRangeException();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Opens one of the sections in the current <see cref="Nca"/> as a <see cref="HierarchicalIntegrityVerificationStream"/>
|
||||
/// Opens one of the sections in the current <see cref="Nca"/> as a <see cref="HierarchicalIntegrityVerificationStorage"/>
|
||||
/// Only works with sections that have a <see cref="NcaFsHeader.HashType"/> of <see cref="NcaHashType.Ivfc"/> or <see cref="NcaHashType.Sha256"/>.
|
||||
/// </summary>
|
||||
/// <param name="index">The index of the NCA section to open. Valid indexes are 0-3.</param>
|
||||
/// <param name="integrityCheckLevel">The level of integrity checks to be performed when reading the section.</param>
|
||||
/// <param name="leaveOpen"><see langword="true"/> to leave the storage open after the <see cref="Nca"/> object is disposed; otherwise, <see langword="false"/>.</param>
|
||||
/// <returns>A <see cref="Stream"/> that provides access to the specified section. <see langword="null"/> if the section does not exist,
|
||||
/// or is has no hash metadata.</returns>
|
||||
/// <exception cref="ArgumentOutOfRangeException">The specified <paramref name="index"/> is outside the valid range.</exception>
|
||||
public HierarchicalIntegrityVerificationStream OpenHashedSection(int index, IntegrityCheckLevel integrityCheckLevel) =>
|
||||
OpenSection(index, false, integrityCheckLevel) as HierarchicalIntegrityVerificationStream;
|
||||
public HierarchicalIntegrityVerificationStorage OpenHashedSection(int index, IntegrityCheckLevel integrityCheckLevel, bool leaveOpen) =>
|
||||
OpenSection(index, false, integrityCheckLevel, leaveOpen) as HierarchicalIntegrityVerificationStorage;
|
||||
|
||||
/// <summary>
|
||||
/// Opens one of the sections in the current <see cref="Nca"/>. For use with <see cref="ContentType.Program"/> type NCAs.
|
||||
@ -189,70 +215,44 @@ namespace LibHac
|
||||
/// <param name="raw"><see langword="true"/> to open the raw section with hash metadata.</param>
|
||||
/// <param name="integrityCheckLevel">The level of integrity checks to be performed when reading the section.
|
||||
/// Always <see cref="IntegrityCheckLevel.None"/> if <paramref name="raw"/> is <see langword="false"/>.</param>
|
||||
/// <param name="leaveOpen"><see langword="true"/> to leave the storage open after the <see cref="Nca"/> object is disposed; otherwise, <see langword="false"/>.</param>
|
||||
/// <returns>A <see cref="Stream"/> that provides access to the specified section. <see langword="null"/> if the section does not exist.</returns>
|
||||
/// <exception cref="ArgumentOutOfRangeException">The specified <paramref name="type"/> is outside the valid range.</exception>
|
||||
public Stream OpenSection(ProgramPartitionType type, bool raw, IntegrityCheckLevel integrityCheckLevel) =>
|
||||
OpenSection((int)type, raw, integrityCheckLevel);
|
||||
public IStorage OpenSection(ProgramPartitionType type, bool raw, IntegrityCheckLevel integrityCheckLevel, bool leaveOpen) =>
|
||||
OpenSection((int)type, raw, integrityCheckLevel, leaveOpen);
|
||||
|
||||
private static HierarchicalIntegrityVerificationStream InitIvfcForRomfs(IvfcHeader ivfc,
|
||||
SharedStreamSource romfsStreamSource, IntegrityCheckLevel integrityCheckLevel)
|
||||
private static HierarchicalIntegrityVerificationStorage InitIvfcForPartitionfs(Sha256Info sb,
|
||||
IStorage pfsStorage, IntegrityCheckLevel integrityCheckLevel, bool leaveOpen)
|
||||
{
|
||||
var initInfo = new IntegrityVerificationInfo[ivfc.NumLevels];
|
||||
|
||||
// Set the master hash
|
||||
initInfo[0] = new IntegrityVerificationInfo
|
||||
{
|
||||
Data = new MemoryStream(ivfc.MasterHash),
|
||||
BlockSize = 0
|
||||
};
|
||||
|
||||
for (int i = 1; i < ivfc.NumLevels; i++)
|
||||
{
|
||||
IvfcLevelHeader level = ivfc.LevelHeaders[i - 1];
|
||||
Stream data = romfsStreamSource.CreateStream(level.LogicalOffset, level.HashDataSize);
|
||||
|
||||
initInfo[i] = new IntegrityVerificationInfo
|
||||
{
|
||||
Data = data,
|
||||
BlockSize = 1 << level.BlockSizePower,
|
||||
Type = IntegrityStreamType.RomFs
|
||||
};
|
||||
}
|
||||
|
||||
return new HierarchicalIntegrityVerificationStream(initInfo, integrityCheckLevel);
|
||||
}
|
||||
|
||||
private static Stream InitIvfcForPartitionfs(Sha256Info sb,
|
||||
SharedStreamSource pfsStreamSource, IntegrityCheckLevel integrityCheckLevel)
|
||||
{
|
||||
SharedStream hashStream = pfsStreamSource.CreateStream(sb.HashTableOffset, sb.HashTableSize);
|
||||
SharedStream dataStream = pfsStreamSource.CreateStream(sb.DataOffset, sb.DataSize);
|
||||
IStorage hashStorage = pfsStorage.Slice(sb.HashTableOffset, sb.HashTableSize, leaveOpen);
|
||||
IStorage dataStorage = pfsStorage.Slice(sb.DataOffset, sb.DataSize, leaveOpen);
|
||||
|
||||
var initInfo = new IntegrityVerificationInfo[3];
|
||||
|
||||
// Set the master hash
|
||||
initInfo[0] = new IntegrityVerificationInfo
|
||||
{
|
||||
Data = new MemoryStream(sb.MasterHash),
|
||||
Data = new StreamStorage(new MemoryStream(sb.MasterHash), true),
|
||||
|
||||
BlockSize = 0,
|
||||
Type = IntegrityStreamType.PartitionFs
|
||||
Type = IntegrityStorageType.PartitionFs
|
||||
};
|
||||
|
||||
initInfo[1] = new IntegrityVerificationInfo
|
||||
{
|
||||
Data = hashStream,
|
||||
Data = hashStorage,
|
||||
BlockSize = (int)sb.HashTableSize,
|
||||
Type = IntegrityStreamType.PartitionFs
|
||||
Type = IntegrityStorageType.PartitionFs
|
||||
};
|
||||
|
||||
initInfo[2] = new IntegrityVerificationInfo
|
||||
{
|
||||
Data = dataStream,
|
||||
Data = dataStorage,
|
||||
BlockSize = sb.BlockSize,
|
||||
Type = IntegrityStreamType.PartitionFs
|
||||
Type = IntegrityStorageType.PartitionFs
|
||||
};
|
||||
|
||||
return new HierarchicalIntegrityVerificationStream(initInfo, integrityCheckLevel);
|
||||
return new HierarchicalIntegrityVerificationStorage(initInfo, integrityCheckLevel, leaveOpen);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
@ -273,23 +273,50 @@ namespace LibHac
|
||||
}
|
||||
}
|
||||
|
||||
private void DecryptHeader(Keyset keyset, Stream stream)
|
||||
public void ParseNpdm()
|
||||
{
|
||||
if (keyset.HeaderKey.IsEmpty())
|
||||
if (Header.ContentType != ContentType.Program) return;
|
||||
|
||||
var pfs = new Pfs(OpenSection(ProgramPartitionType.Code, false, IntegrityCheckLevel.ErrorOnInvalid, true));
|
||||
|
||||
if (!pfs.TryOpenFile("main.npdm", out IStorage npdmStorage)) return;
|
||||
|
||||
Npdm = new Npdm.NpdmBinary(npdmStorage.AsStream(), Keyset);
|
||||
|
||||
Header.ValidateNpdmSignature(Npdm.AciD.Rsa2048Modulus);
|
||||
}
|
||||
|
||||
public IStorage OpenDecryptedNca()
|
||||
{
|
||||
var list = new List<IStorage> { OpenHeaderStorage() };
|
||||
|
||||
foreach (NcaSection section in Sections.Where(x => x != null).OrderBy(x => x.Offset))
|
||||
{
|
||||
list.Add(OpenRawSection(section.SectionNum, true));
|
||||
}
|
||||
|
||||
return new ConcatenationStorage(list, true);
|
||||
}
|
||||
|
||||
private NcaHeader DecryptHeader()
|
||||
{
|
||||
if (Keyset.HeaderKey.IsEmpty())
|
||||
{
|
||||
throw new MissingKeyException("Unable to decrypt NCA header.", "header_key", KeyType.Common);
|
||||
}
|
||||
|
||||
var headerBytes = new byte[0xC00];
|
||||
Xts xts = XtsAes128.Create(keyset.HeaderKey);
|
||||
using (var headerDec = new RandomAccessSectorStream(new XtsSectorStream(stream, xts, 0x200)))
|
||||
{
|
||||
headerDec.Read(headerBytes, 0, headerBytes.Length);
|
||||
}
|
||||
return new NcaHeader(new BinaryReader(OpenHeaderStorage().AsStream()), Keyset);
|
||||
}
|
||||
|
||||
var reader = new BinaryReader(new MemoryStream(headerBytes));
|
||||
private CachedStorage OpenHeaderStorage()
|
||||
{
|
||||
int size = 0x4000;
|
||||
|
||||
Header = new NcaHeader(reader);
|
||||
// Support reading headers that are only 0xC00 bytes long, but still return
|
||||
// the entire header if available.
|
||||
if (BaseStorage.Length >= 0xC00 && BaseStorage.Length < 0x4000) size = 0xC00;
|
||||
|
||||
return new CachedStorage(new Aes128XtsStorage(BaseStorage.Slice(0, size), Keyset.HeaderKey, 0x200, true), 1, true);
|
||||
}
|
||||
|
||||
private void DecryptKeyArea(Keyset keyset)
|
||||
@ -331,10 +358,10 @@ namespace LibHac
|
||||
// Decrypt this value and compare it to the encryption table offset found in the NCA header
|
||||
|
||||
long offset = sect.Header.BktrInfo.EncryptionHeader.Offset;
|
||||
using (var streamDec = new RandomAccessSectorStream(new Aes128CtrStream(GetStream(), DecryptedKeys[2], sect.Offset, sect.Size, sect.Offset, sect.Header.Ctr)))
|
||||
using (var streamDec = new CachedStorage(new Aes128CtrStorage(GetStorage().Slice(sect.Offset, sect.Size), DecryptedKeys[2], sect.Offset, sect.Header.Ctr, true), 0x4000, 4, false))
|
||||
{
|
||||
var reader = new BinaryReader(streamDec);
|
||||
streamDec.Position = offset + 8;
|
||||
var reader = new BinaryReader(streamDec.AsStream());
|
||||
reader.BaseStream.Position = offset + 8;
|
||||
long size = reader.ReadInt64();
|
||||
|
||||
if (size != offset)
|
||||
@ -369,26 +396,39 @@ namespace LibHac
|
||||
CheckBktrKey(sect);
|
||||
return;
|
||||
case NcaHashType.Ivfc:
|
||||
offset = sect.Header.IvfcInfo.LevelHeaders[0].LogicalOffset;
|
||||
offset = sect.Header.IvfcInfo.LevelHeaders[0].Offset;
|
||||
size = 1 << sect.Header.IvfcInfo.LevelHeaders[0].BlockSizePower;
|
||||
break;
|
||||
}
|
||||
|
||||
Stream stream = OpenSection(index, true, IntegrityCheckLevel.None);
|
||||
IStorage storage = OpenSection(index, true, IntegrityCheckLevel.None, true);
|
||||
|
||||
var hashTable = new byte[size];
|
||||
stream.Position = offset;
|
||||
stream.Read(hashTable, 0, hashTable.Length);
|
||||
storage.Read(hashTable, offset);
|
||||
|
||||
sect.MasterHashValidity = Crypto.CheckMemoryHashTable(hashTable, expected, 0, hashTable.Length);
|
||||
}
|
||||
|
||||
protected virtual void Dispose(bool disposing)
|
||||
{
|
||||
if (disposing)
|
||||
{
|
||||
BaseStorage?.Flush();
|
||||
BaseNca?.BaseStorage?.Flush();
|
||||
|
||||
if (!LeaveOpen)
|
||||
{
|
||||
BaseStorage?.Dispose();
|
||||
BaseNca?.Dispose();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (!KeepOpen)
|
||||
{
|
||||
StreamSource?.Dispose();
|
||||
}
|
||||
Dispose(true);
|
||||
GC.SuppressFinalize(this);
|
||||
}
|
||||
}
|
||||
|
||||
@ -440,13 +480,13 @@ namespace LibHac
|
||||
if (index < 0 || index > 3) throw new IndexOutOfRangeException();
|
||||
if (nca.Sections[index] == null) return;
|
||||
|
||||
Stream section = nca.OpenSection(index, raw, integrityCheckLevel);
|
||||
IStorage storage = nca.OpenSection(index, raw, integrityCheckLevel, true);
|
||||
string dir = Path.GetDirectoryName(filename);
|
||||
if (!string.IsNullOrWhiteSpace(dir)) Directory.CreateDirectory(dir);
|
||||
|
||||
using (var outFile = new FileStream(filename, FileMode.Create, FileAccess.ReadWrite))
|
||||
{
|
||||
section.CopyStream(outFile, section.Length, logger);
|
||||
storage.CopyToStream(outFile, storage.Length, logger);
|
||||
}
|
||||
}
|
||||
|
||||
@ -456,18 +496,18 @@ namespace LibHac
|
||||
if (nca.Sections[index] == null) return;
|
||||
|
||||
NcaSection section = nca.Sections[index];
|
||||
Stream stream = nca.OpenSection(index, false, integrityCheckLevel);
|
||||
IStorage storage = nca.OpenSection(index, false, integrityCheckLevel, true);
|
||||
|
||||
switch (section.Type)
|
||||
{
|
||||
case SectionType.Invalid:
|
||||
break;
|
||||
case SectionType.Pfs0:
|
||||
var pfs0 = new Pfs(stream);
|
||||
var pfs0 = new Pfs(storage);
|
||||
pfs0.Extract(outputDir, logger);
|
||||
break;
|
||||
case SectionType.Romfs:
|
||||
var romfs = new Romfs(stream);
|
||||
var romfs = new Romfs(storage);
|
||||
romfs.Extract(outputDir, logger);
|
||||
break;
|
||||
case SectionType.Bktr:
|
||||
@ -498,7 +538,7 @@ namespace LibHac
|
||||
NcaHashType hashType = sect.Header.HashType;
|
||||
if (hashType != NcaHashType.Sha256 && hashType != NcaHashType.Ivfc) return Validity.Unchecked;
|
||||
|
||||
HierarchicalIntegrityVerificationStream stream = nca.OpenHashedSection(index, IntegrityCheckLevel.IgnoreOnInvalid);
|
||||
HierarchicalIntegrityVerificationStorage stream = nca.OpenHashedSection(index, IntegrityCheckLevel.IgnoreOnInvalid, true);
|
||||
if (stream == null) return Validity.Unchecked;
|
||||
|
||||
if (!quiet) logger?.LogMessage($"Verifying section {index}...");
|
||||
|
@ -1,5 +1,6 @@
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using LibHac.IO;
|
||||
|
||||
namespace LibHac
|
||||
{
|
||||
@ -25,12 +26,22 @@ namespace LibHac
|
||||
|
||||
public NcaFsHeader[] FsHeaders = new NcaFsHeader[4];
|
||||
|
||||
public NcaHeader(BinaryReader reader)
|
||||
private byte[] SignatureData { get; }
|
||||
public Validity FixedSigValidity { get; }
|
||||
public Validity NpdmSigValidity { get; private set; }
|
||||
|
||||
public NcaHeader(BinaryReader reader, Keyset keyset)
|
||||
{
|
||||
Signature1 = reader.ReadBytes(0x100);
|
||||
Signature2 = reader.ReadBytes(0x100);
|
||||
Magic = reader.ReadAscii(4);
|
||||
if (Magic != "NCA3") throw new InvalidDataException("Not an NCA3 file");
|
||||
|
||||
reader.BaseStream.Position -= 4;
|
||||
SignatureData = reader.ReadBytes(0x200);
|
||||
FixedSigValidity = Crypto.Rsa2048PssVerify(SignatureData, Signature1, keyset.NcaHdrFixedKeyModulus);
|
||||
|
||||
reader.BaseStream.Position -= 0x200 - 4;
|
||||
Distribution = (DistributionType)reader.ReadByte();
|
||||
ContentType = (ContentType)reader.ReadByte();
|
||||
CryptoType = reader.ReadByte();
|
||||
@ -67,6 +78,11 @@ namespace LibHac
|
||||
FsHeaders[i] = new NcaFsHeader(reader);
|
||||
}
|
||||
}
|
||||
|
||||
internal void ValidateNpdmSignature(byte[] modulus)
|
||||
{
|
||||
NpdmSigValidity = Crypto.Rsa2048PssVerify(SignatureData, Signature2, modulus);
|
||||
}
|
||||
}
|
||||
|
||||
public class NcaSectionEntry
|
||||
@ -154,52 +170,6 @@ namespace LibHac
|
||||
public BktrHeader EncryptionHeader;
|
||||
}
|
||||
|
||||
public class IvfcHeader
|
||||
{
|
||||
public string Magic;
|
||||
public int Version;
|
||||
public int MasterHashSize;
|
||||
public int NumLevels;
|
||||
public IvfcLevelHeader[] LevelHeaders = new IvfcLevelHeader[6];
|
||||
public byte[] SaltSource;
|
||||
public byte[] MasterHash;
|
||||
|
||||
public IvfcHeader(BinaryReader reader)
|
||||
{
|
||||
Magic = reader.ReadAscii(4);
|
||||
reader.BaseStream.Position += 2;
|
||||
Version = reader.ReadInt16();
|
||||
MasterHashSize = reader.ReadInt32();
|
||||
NumLevels = reader.ReadInt32();
|
||||
|
||||
for (int i = 0; i < LevelHeaders.Length; i++)
|
||||
{
|
||||
LevelHeaders[i] = new IvfcLevelHeader(reader);
|
||||
}
|
||||
|
||||
SaltSource = reader.ReadBytes(0x20);
|
||||
MasterHash = reader.ReadBytes(0x20);
|
||||
}
|
||||
}
|
||||
|
||||
public class IvfcLevelHeader
|
||||
{
|
||||
public long LogicalOffset;
|
||||
public long HashDataSize;
|
||||
public int BlockSizePower;
|
||||
public uint Reserved;
|
||||
|
||||
public Validity HashValidity = Validity.Unchecked;
|
||||
|
||||
public IvfcLevelHeader(BinaryReader reader)
|
||||
{
|
||||
LogicalOffset = reader.ReadInt64();
|
||||
HashDataSize = reader.ReadInt64();
|
||||
BlockSizePower = reader.ReadInt32();
|
||||
Reserved = reader.ReadUInt32();
|
||||
}
|
||||
}
|
||||
|
||||
public class Sha256Info
|
||||
{
|
||||
public byte[] MasterHash;
|
||||
@ -234,6 +204,8 @@ namespace LibHac
|
||||
public uint NumEntries;
|
||||
public uint Field1C;
|
||||
|
||||
public byte[] Header;
|
||||
|
||||
public BktrHeader(BinaryReader reader)
|
||||
{
|
||||
Offset = reader.ReadInt64();
|
||||
@ -242,6 +214,9 @@ namespace LibHac
|
||||
Version = reader.ReadUInt32();
|
||||
NumEntries = reader.ReadUInt32();
|
||||
Field1C = reader.ReadUInt32();
|
||||
|
||||
reader.BaseStream.Position -= 0x10;
|
||||
Header = reader.ReadBytes(0x10);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
// ReSharper disable UnusedVariable
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
// ReSharper disable UnusedVariable
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
@ -8,7 +9,7 @@ namespace LibHac.Npdm
|
||||
public string Magic;
|
||||
public byte[] Rsa2048Signature { get; }
|
||||
public byte[] Rsa2048Modulus { get; }
|
||||
public int Unknown1 { get; }
|
||||
public int Size { get; }
|
||||
public int Flags { get; }
|
||||
|
||||
public long TitleIdRangeMin { get; }
|
||||
@ -18,7 +19,9 @@ namespace LibHac.Npdm
|
||||
public ServiceAccessControl ServiceAccess { get; }
|
||||
public KernelAccessControl KernelAccess { get; }
|
||||
|
||||
public Acid(Stream stream, int offset)
|
||||
public Validity SignatureValidity { get; }
|
||||
|
||||
public Acid(Stream stream, int offset, Keyset keyset)
|
||||
{
|
||||
stream.Seek(offset, SeekOrigin.Begin);
|
||||
|
||||
@ -33,9 +36,13 @@ namespace LibHac.Npdm
|
||||
throw new Exception("ACID Stream doesn't contain ACID section!");
|
||||
}
|
||||
|
||||
//Size field used with the above signature (?).
|
||||
Unknown1 = reader.ReadInt32();
|
||||
Size = reader.ReadInt32();
|
||||
|
||||
reader.BaseStream.Position = offset + 0x100;
|
||||
byte[] signatureData = reader.ReadBytes(Size);
|
||||
SignatureValidity = Crypto.Rsa2048PssVerify(signatureData, Rsa2048Signature, keyset.AcidFixedKeyModulus);
|
||||
|
||||
reader.BaseStream.Position = offset + 0x208;
|
||||
reader.ReadInt32();
|
||||
|
||||
//Bit0 must be 1 on retail, on devunit 0 is also allowed. Bit1 is unknown.
|
||||
|
@ -1,4 +1,5 @@
|
||||
using System;
|
||||
// ReSharper disable UnusedVariable
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.Npdm
|
||||
|
@ -1,3 +1,4 @@
|
||||
// ReSharper disable InconsistentNaming
|
||||
namespace LibHac.Npdm
|
||||
{
|
||||
public enum FsPermissionBool : ulong
|
||||
|
@ -1,3 +1,4 @@
|
||||
// ReSharper disable InconsistentNaming
|
||||
namespace LibHac.Npdm
|
||||
{
|
||||
public enum FsPermissionRw : ulong
|
||||
|
@ -66,7 +66,7 @@ namespace LibHac.Npdm
|
||||
|
||||
for (int sysCall = 0; sysCall < 0x18 && sysCallBase + sysCall < 0x80; sysCall++)
|
||||
{
|
||||
Items[index].AllowedSvcs[sysCallBase + sysCall] = (descriptor & 1) != 0;
|
||||
items[index].AllowedSvcs[sysCallBase + sysCall] = (descriptor & 1) != 0;
|
||||
|
||||
descriptor >>= 1;
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
// ReSharper disable UnusedVariable
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
@ -7,7 +8,7 @@ namespace LibHac.Npdm
|
||||
//https://github.com/SciresM/hactool/blob/master/npdm.c
|
||||
//https://github.com/SciresM/hactool/blob/master/npdm.h
|
||||
//http://switchbrew.org/index.php?title=NPDM
|
||||
public class Npdm
|
||||
public class NpdmBinary
|
||||
{
|
||||
public string Magic;
|
||||
public bool Is64Bits { get; }
|
||||
@ -23,7 +24,7 @@ namespace LibHac.Npdm
|
||||
public Aci0 Aci0 { get; }
|
||||
public Acid AciD { get; }
|
||||
|
||||
public Npdm(Stream stream)
|
||||
public NpdmBinary(Stream stream, Keyset keyset)
|
||||
{
|
||||
var reader = new BinaryReader(stream);
|
||||
|
||||
@ -70,7 +71,7 @@ namespace LibHac.Npdm
|
||||
int acidSize = reader.ReadInt32();
|
||||
|
||||
Aci0 = new Aci0(stream, aci0Offset);
|
||||
AciD = new Acid(stream, acidOffset);
|
||||
AciD = new Acid(stream, acidOffset, keyset);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
using System.Collections;
|
||||
using System.IO;
|
||||
using LibHac.Streams;
|
||||
using LibHac.IO;
|
||||
|
||||
namespace LibHac
|
||||
{
|
||||
@ -11,20 +11,20 @@ namespace LibHac
|
||||
public uint BssSize { get; }
|
||||
public byte[] BuildId { get; } = new byte[0x20];
|
||||
|
||||
private SharedStreamSource StreamSource { get; }
|
||||
private IStorage Storage { get; }
|
||||
|
||||
public Nso(Stream stream)
|
||||
public Nso(IStorage storage)
|
||||
{
|
||||
StreamSource = new SharedStreamSource(stream);
|
||||
var reader = new BinaryReader(StreamSource.CreateStream());
|
||||
Storage = storage;
|
||||
var reader = new BinaryReader(Storage.AsStream());
|
||||
if (reader.ReadAscii(4) != "NSO0")
|
||||
throw new InvalidDataException("NSO magic is incorrect!");
|
||||
reader.ReadUInt32(); // Version
|
||||
reader.ReadUInt32(); // Reserved/Unused
|
||||
var flags = new BitArray(new[] { (int)reader.ReadUInt32() });
|
||||
var textSection = new NsoSection(StreamSource);
|
||||
var rodataSection = new NsoSection(StreamSource);
|
||||
var dataSection = new NsoSection(StreamSource);
|
||||
var textSection = new NsoSection(Storage);
|
||||
var rodataSection = new NsoSection(Storage);
|
||||
var dataSection = new NsoSection(Storage);
|
||||
textSection.IsCompressed = flags[0];
|
||||
rodataSection.IsCompressed = flags[1];
|
||||
dataSection.IsCompressed = flags[2];
|
||||
@ -58,7 +58,7 @@ namespace LibHac
|
||||
|
||||
public class NsoSection
|
||||
{
|
||||
private SharedStreamSource StreamSource { get; }
|
||||
private IStorage Storage { get; }
|
||||
|
||||
public bool IsCompressed { get; set; }
|
||||
public bool CheckHash { get; set; }
|
||||
@ -69,20 +69,20 @@ namespace LibHac
|
||||
|
||||
public byte[] Hash { get; } = new byte[0x20];
|
||||
|
||||
public NsoSection(SharedStreamSource streamSource)
|
||||
public NsoSection(IStorage storage)
|
||||
{
|
||||
StreamSource = streamSource;
|
||||
Storage = storage;
|
||||
}
|
||||
|
||||
public Stream OpenSection()
|
||||
public IStorage OpenSection()
|
||||
{
|
||||
return StreamSource.CreateStream(FileOffset, CompressedSize);
|
||||
return Storage.Slice(FileOffset, CompressedSize);
|
||||
}
|
||||
|
||||
public byte[] DecompressSection()
|
||||
{
|
||||
var compressed = new byte[CompressedSize];
|
||||
OpenSection().Read(compressed, 0, (int)CompressedSize);
|
||||
OpenSection().Read(compressed, 0);
|
||||
|
||||
if (IsCompressed)
|
||||
return Lz4.Decompress(compressed, (int)DecompressedSize);
|
||||
|
@ -1,6 +1,6 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using LibHac.Streams;
|
||||
using LibHac.IO;
|
||||
|
||||
namespace LibHac
|
||||
{
|
||||
@ -16,12 +16,12 @@ namespace LibHac
|
||||
public int KeyRevision { get; }
|
||||
public Pk11 Pk11 { get; }
|
||||
|
||||
private SharedStreamSource StreamSource { get; }
|
||||
private IStorage Storage { get; }
|
||||
|
||||
public Package1(Keyset keyset, Stream stream)
|
||||
public Package1(Keyset keyset, IStorage storage)
|
||||
{
|
||||
StreamSource = new SharedStreamSource(stream);
|
||||
var reader = new BinaryReader(stream);
|
||||
Storage = storage;
|
||||
var reader = new BinaryReader(storage.AsStream());
|
||||
|
||||
BuildHash = reader.ReadBytes(0x10);
|
||||
BuildDate = reader.ReadAscii(0xE);
|
||||
@ -34,20 +34,19 @@ namespace LibHac
|
||||
Counter = reader.ReadBytes(0x10);
|
||||
|
||||
// Try decrypting the PK11 blob with all known package1 keys
|
||||
Stream encStream = StreamSource.CreateStream(0x4000, Pk11Size);
|
||||
IStorage encStorage = Storage.Slice(0x4000, Pk11Size);
|
||||
var decBuffer = new byte[0x10];
|
||||
|
||||
for (int i = 0; i < 0x20; i++)
|
||||
{
|
||||
var dec = new Aes128CtrStream(encStream, keyset.Package1Keys[i], Counter);
|
||||
dec.Read(decBuffer, 0, 0x10);
|
||||
var dec = new Aes128CtrStorage(encStorage, keyset.Package1Keys[i], Counter, true);
|
||||
dec.Read(decBuffer, 0);
|
||||
|
||||
if (BitConverter.ToUInt32(decBuffer, 0) == Pk11Magic)
|
||||
{
|
||||
KeyRevision = i;
|
||||
|
||||
dec.Position = 0;
|
||||
Pk11 = new Pk11(new RandomAccessSectorStream(dec));
|
||||
Pk11 = new Pk11(new CachedStorage(dec, 4, true));
|
||||
|
||||
return;
|
||||
}
|
||||
@ -56,7 +55,14 @@ namespace LibHac
|
||||
throw new InvalidDataException("Failed to decrypt PK11! Is the correct key present?");
|
||||
}
|
||||
|
||||
public Stream OpenPackage1Ldr() => StreamSource.CreateStream(0, 0x4000);
|
||||
public IStorage OpenDecryptedPackage()
|
||||
{
|
||||
IStorage[] storages = { OpenPackage1Ldr(), Pk11.OpenDecryptedPk11() };
|
||||
|
||||
return new ConcatenationStorage(storages, true);
|
||||
}
|
||||
|
||||
public IStorage OpenPackage1Ldr() => Storage.Slice(0, 0x4000);
|
||||
}
|
||||
|
||||
public class Pk11
|
||||
@ -67,12 +73,12 @@ namespace LibHac
|
||||
public int[] SectionSizes { get; } = new int[3];
|
||||
public int[] SectionOffsets { get; } = new int[3];
|
||||
|
||||
private SharedStreamSource StreamSource { get; }
|
||||
private IStorage Storage { get; }
|
||||
|
||||
public Pk11(Stream stream)
|
||||
public Pk11(IStorage storage)
|
||||
{
|
||||
StreamSource = new SharedStreamSource(stream);
|
||||
var reader = new BinaryReader(stream);
|
||||
Storage = storage;
|
||||
var reader = new BinaryReader(storage.AsStream());
|
||||
|
||||
Magic = reader.ReadAscii(4);
|
||||
SectionSizes[0] = reader.ReadInt32();
|
||||
@ -89,21 +95,21 @@ namespace LibHac
|
||||
SectionOffsets[2] = SectionOffsets[1] + SectionSizes[1];
|
||||
}
|
||||
|
||||
public Stream OpenSection(int index)
|
||||
public IStorage OpenSection(int index)
|
||||
{
|
||||
if (index < 0 || index > 2)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(index), "Section index must be one of: 0, 1, 2");
|
||||
}
|
||||
|
||||
return StreamSource.CreateStream(SectionOffsets[index], SectionSizes[index]);
|
||||
return Storage.Slice(SectionOffsets[index], SectionSizes[index]);
|
||||
}
|
||||
|
||||
public Stream OpenDecryptedPk11() => StreamSource.CreateStream();
|
||||
public IStorage OpenDecryptedPk11() => Storage;
|
||||
|
||||
public Stream OpenWarmboot() => OpenSection(GetWarmbootSection());
|
||||
public Stream OpenNxBootloader() => OpenSection(GetNxBootloaderSection());
|
||||
public Stream OpenSecureMonitor() => OpenSection(GetSecureMonitorSection());
|
||||
public IStorage OpenWarmboot() => OpenSection(GetWarmbootSection());
|
||||
public IStorage OpenNxBootloader() => OpenSection(GetNxBootloaderSection());
|
||||
public IStorage OpenSecureMonitor() => OpenSection(GetSecureMonitorSection());
|
||||
|
||||
// todo: Handle the old layout from before 2.0.0
|
||||
private int GetWarmbootSection() => 0;
|
||||
|
@ -1,6 +1,6 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using LibHac.Streams;
|
||||
using LibHac.IO;
|
||||
|
||||
namespace LibHac
|
||||
{
|
||||
@ -14,17 +14,17 @@ namespace LibHac
|
||||
public int PackageSize { get; }
|
||||
public int HeaderVersion { get; }
|
||||
|
||||
private SharedStreamSource StreamSource { get; }
|
||||
private IStorage Storage { get; }
|
||||
|
||||
public Package2(Keyset keyset, Stream stream)
|
||||
public Package2(Keyset keyset, IStorage storage)
|
||||
{
|
||||
StreamSource = new SharedStreamSource(stream);
|
||||
SharedStream headerStream = StreamSource.CreateStream(0, 0x200);
|
||||
Storage = storage;
|
||||
IStorage headerStorage = Storage.Slice(0, 0x200);
|
||||
|
||||
KeyRevision = FindKeyGeneration(keyset, headerStream);
|
||||
KeyRevision = FindKeyGeneration(keyset, headerStorage);
|
||||
Key = keyset.Package2Keys[KeyRevision];
|
||||
|
||||
Header = new Package2Header(headerStream, Key);
|
||||
Header = new Package2Header(headerStorage, keyset, KeyRevision);
|
||||
|
||||
PackageSize = BitConverter.ToInt32(Header.Counter, 0) ^ BitConverter.ToInt32(Header.Counter, 8) ^
|
||||
BitConverter.ToInt32(Header.Counter, 12);
|
||||
@ -37,14 +37,21 @@ namespace LibHac
|
||||
}
|
||||
}
|
||||
|
||||
public Stream OpenHeaderPart1()
|
||||
public IStorage OpenDecryptedPackage()
|
||||
{
|
||||
return StreamSource.CreateStream(0, 0x110);
|
||||
IStorage[] storages = { OpenHeaderPart1(), OpenHeaderPart2(), OpenKernel(), OpenIni1() };
|
||||
|
||||
return new ConcatenationStorage(storages, true);
|
||||
}
|
||||
|
||||
public Stream OpenHeaderPart2()
|
||||
private IStorage OpenHeaderPart1()
|
||||
{
|
||||
SharedStream encStream = StreamSource.CreateStream(0x110, 0xF0);
|
||||
return Storage.Slice(0, 0x110);
|
||||
}
|
||||
|
||||
private IStorage OpenHeaderPart2()
|
||||
{
|
||||
IStorage encStorage = Storage.Slice(0x110, 0xF0);
|
||||
|
||||
// The counter starts counting at 0x100, but the block at 0x100 isn't encrypted.
|
||||
// Increase the counter by one and start decrypting at 0x110.
|
||||
@ -52,42 +59,39 @@ namespace LibHac
|
||||
Array.Copy(Header.Counter, counter, 0x10);
|
||||
Util.IncrementByteArray(counter);
|
||||
|
||||
return new RandomAccessSectorStream(new Aes128CtrStream(encStream, Key, counter));
|
||||
return new CachedStorage(new Aes128CtrStorage(encStorage, Key, counter, true), 0x4000, 4, true);
|
||||
}
|
||||
|
||||
public Stream OpenKernel()
|
||||
public IStorage OpenKernel()
|
||||
{
|
||||
int offset = 0x200;
|
||||
SharedStream encStream = StreamSource.CreateStream(offset, Header.SectionSizes[0]);
|
||||
IStorage encStorage = Storage.Slice(offset, Header.SectionSizes[0]);
|
||||
|
||||
return new RandomAccessSectorStream(new Aes128CtrStream(encStream, Key, Header.SectionCounters[0]));
|
||||
return new CachedStorage(new Aes128CtrStorage(encStorage, Key, Header.SectionCounters[0], true), 0x4000, 4, true);
|
||||
}
|
||||
|
||||
public Stream OpenIni1()
|
||||
public IStorage OpenIni1()
|
||||
{
|
||||
int offset = 0x200 + Header.SectionSizes[0];
|
||||
SharedStream encStream = StreamSource.CreateStream(offset, Header.SectionSizes[1]);
|
||||
IStorage encStorage = Storage.Slice(offset, Header.SectionSizes[1]);
|
||||
|
||||
return new RandomAccessSectorStream(new Aes128CtrStream(encStream, Key, Header.SectionCounters[1]));
|
||||
return new CachedStorage(new Aes128CtrStorage(encStorage, Key, Header.SectionCounters[1], true), 0x4000, 4, true);
|
||||
}
|
||||
|
||||
private int FindKeyGeneration(Keyset keyset, Stream stream)
|
||||
private int FindKeyGeneration(Keyset keyset, IStorage storage)
|
||||
{
|
||||
var counter = new byte[0x10];
|
||||
var decBuffer = new byte[0x10];
|
||||
|
||||
stream.Position = 0x100;
|
||||
stream.Read(counter, 0, 0x10);
|
||||
storage.Read(counter, 0x100);
|
||||
|
||||
for (int i = 0; i < 0x20; i++)
|
||||
{
|
||||
var dec = new Aes128CtrStream(stream, keyset.Package2Keys[i], 0x100, 0x100, counter);
|
||||
dec.Position = 0x50;
|
||||
dec.Read(decBuffer, 0, 0x10);
|
||||
var dec = new Aes128CtrStorage(storage.Slice(0x100), keyset.Package2Keys[i], counter, false);
|
||||
dec.Read(decBuffer, 0x50);
|
||||
|
||||
if (BitConverter.ToUInt32(decBuffer, 0) == Pk21Magic)
|
||||
{
|
||||
stream.Position = 0;
|
||||
return i;
|
||||
}
|
||||
}
|
||||
@ -111,14 +115,21 @@ namespace LibHac
|
||||
public int VersionMax { get; }
|
||||
public int VersionMin { get; }
|
||||
|
||||
public Package2Header(Stream stream, byte[] key)
|
||||
public Validity SignatureValidity { get; }
|
||||
|
||||
public Package2Header(IStorage storage, Keyset keyset, int keyGeneration)
|
||||
{
|
||||
var reader = new BinaryReader(stream);
|
||||
var reader = new BinaryReader(storage.AsStream());
|
||||
byte[] key = keyset.Package2Keys[keyGeneration];
|
||||
|
||||
Signature = reader.ReadBytes(0x100);
|
||||
byte[] sigData = reader.ReadBytes(0x100);
|
||||
SignatureValidity = Crypto.Rsa2048PssVerify(sigData, Signature, keyset.Package2FixedKeyModulus);
|
||||
|
||||
reader.BaseStream.Position -= 0x100;
|
||||
Counter = reader.ReadBytes(0x10);
|
||||
|
||||
var headerStream = new RandomAccessSectorStream(new Aes128CtrStream(stream, key, 0x100, 0x100, Counter));
|
||||
Stream headerStream = new CachedStorage(new Aes128CtrStorage(storage.Slice(0x100), key, Counter, true), 0x4000, 4, true).AsStream();
|
||||
|
||||
headerStream.Position = 0x10;
|
||||
reader = new BinaryReader(headerStream);
|
||||
|
@ -3,7 +3,7 @@ using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using LibHac.Streams;
|
||||
using LibHac.IO;
|
||||
|
||||
namespace LibHac
|
||||
{
|
||||
@ -14,11 +14,11 @@ namespace LibHac
|
||||
public PfsFileEntry[] Files { get; }
|
||||
|
||||
private Dictionary<string, PfsFileEntry> FileDict { get; }
|
||||
private SharedStreamSource StreamSource { get; }
|
||||
private IStorage BaseStorage { get; }
|
||||
|
||||
public Pfs(Stream stream)
|
||||
public Pfs(IStorage storage)
|
||||
{
|
||||
using (var reader = new BinaryReader(stream, Encoding.Default, true))
|
||||
using (var reader = new BinaryReader(storage.AsStream(), Encoding.Default, true))
|
||||
{
|
||||
Header = new PfsHeader(reader);
|
||||
}
|
||||
@ -26,10 +26,10 @@ namespace LibHac
|
||||
HeaderSize = Header.HeaderSize;
|
||||
Files = Header.Files;
|
||||
FileDict = Header.Files.ToDictionary(x => x.Name, x => x);
|
||||
StreamSource = new SharedStreamSource(stream);
|
||||
BaseStorage = storage;
|
||||
}
|
||||
|
||||
public Stream OpenFile(string filename)
|
||||
public IStorage OpenFile(string filename)
|
||||
{
|
||||
if (!FileDict.TryGetValue(filename, out PfsFileEntry file))
|
||||
{
|
||||
@ -39,21 +39,21 @@ namespace LibHac
|
||||
return OpenFile(file);
|
||||
}
|
||||
|
||||
public bool TryOpenFile(string filename, out Stream stream)
|
||||
public bool TryOpenFile(string filename, out IStorage storage)
|
||||
{
|
||||
if (!FileDict.TryGetValue(filename, out PfsFileEntry file))
|
||||
{
|
||||
stream = null;
|
||||
storage = null;
|
||||
return false;
|
||||
}
|
||||
|
||||
stream = OpenFile(file);
|
||||
storage = OpenFile(file);
|
||||
return true;
|
||||
}
|
||||
|
||||
public Stream OpenFile(PfsFileEntry file)
|
||||
public IStorage OpenFile(PfsFileEntry file)
|
||||
{
|
||||
return StreamSource.CreateStream(HeaderSize + file.Offset, file.Size);
|
||||
return BaseStorage.Slice(HeaderSize + file.Offset, file.Size);
|
||||
}
|
||||
|
||||
public bool FileExists(string filename)
|
||||
@ -175,7 +175,7 @@ namespace LibHac
|
||||
{
|
||||
foreach (PfsFileEntry file in pfs.Header.Files)
|
||||
{
|
||||
Stream stream = pfs.OpenFile(file);
|
||||
IStorage storage = pfs.OpenFile(file);
|
||||
string outName = Path.Combine(outDir, file.Name);
|
||||
string dir = Path.GetDirectoryName(outName);
|
||||
if (!string.IsNullOrWhiteSpace(dir)) Directory.CreateDirectory(dir);
|
||||
@ -183,7 +183,7 @@ namespace LibHac
|
||||
using (var outFile = new FileStream(outName, FileMode.Create, FileAccess.ReadWrite))
|
||||
{
|
||||
logger?.LogMessage(file.Name);
|
||||
stream.CopyStream(outFile, stream.Length, logger);
|
||||
storage.CopyToStream(outFile, storage.Length, logger);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2,7 +2,7 @@
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using LibHac.Streams;
|
||||
using LibHac.IO;
|
||||
|
||||
namespace LibHac
|
||||
{
|
||||
@ -15,15 +15,15 @@ namespace LibHac
|
||||
public RomfsDir RootDir { get; }
|
||||
|
||||
public Dictionary<string, RomfsFile> FileDict { get; }
|
||||
private SharedStreamSource StreamSource { get; }
|
||||
private IStorage BaseStorage { get; }
|
||||
|
||||
public Romfs(Stream stream)
|
||||
public Romfs(IStorage storage)
|
||||
{
|
||||
StreamSource = new SharedStreamSource(stream);
|
||||
BaseStorage = storage;
|
||||
|
||||
byte[] dirMetaTable;
|
||||
byte[] fileMetaTable;
|
||||
using (var reader = new BinaryReader(StreamSource.CreateStream(), Encoding.Default, true))
|
||||
using (var reader = new BinaryReader(BaseStorage.AsStream(), Encoding.Default, true))
|
||||
{
|
||||
Header = new RomfsHeader(reader);
|
||||
reader.BaseStream.Position = Header.DirMetaTableOffset;
|
||||
@ -61,7 +61,7 @@ namespace LibHac
|
||||
FileDict = Files.ToDictionary(x => x.FullPath, x => x);
|
||||
}
|
||||
|
||||
public Stream OpenFile(string filename)
|
||||
public IStorage OpenFile(string filename)
|
||||
{
|
||||
if (!FileDict.TryGetValue(filename, out RomfsFile file))
|
||||
{
|
||||
@ -71,26 +71,24 @@ namespace LibHac
|
||||
return OpenFile(file);
|
||||
}
|
||||
|
||||
public Stream OpenFile(RomfsFile file)
|
||||
public IStorage OpenFile(RomfsFile file)
|
||||
{
|
||||
return StreamSource.CreateStream(Header.DataOffset + file.DataOffset, file.DataLength);
|
||||
return BaseStorage.Slice(Header.DataOffset + file.DataOffset, file.DataLength);
|
||||
}
|
||||
|
||||
public byte[] GetFile(string filename)
|
||||
{
|
||||
Stream stream = OpenFile(filename);
|
||||
var file = new byte[stream.Length];
|
||||
using (var ms = new MemoryStream(file))
|
||||
{
|
||||
stream.CopyTo(ms);
|
||||
}
|
||||
IStorage storage = OpenFile(filename);
|
||||
var file = new byte[storage.Length];
|
||||
|
||||
storage.Read(file, 0);
|
||||
|
||||
return file;
|
||||
}
|
||||
|
||||
public bool FileExists(string filename) => FileDict.ContainsKey(filename);
|
||||
|
||||
public Stream OpenRawStream() => StreamSource.CreateStream();
|
||||
public IStorage OpenRawStream() => BaseStorage.Slice(0);
|
||||
|
||||
private void SetReferences()
|
||||
{
|
||||
@ -149,7 +147,7 @@ namespace LibHac
|
||||
{
|
||||
foreach (RomfsFile file in romfs.Files)
|
||||
{
|
||||
Stream stream = romfs.OpenFile(file);
|
||||
IStorage storage = romfs.OpenFile(file);
|
||||
string outName = outDir + file.FullPath;
|
||||
string dir = Path.GetDirectoryName(outName);
|
||||
if (!string.IsNullOrWhiteSpace(dir)) Directory.CreateDirectory(dir);
|
||||
@ -157,7 +155,7 @@ namespace LibHac
|
||||
using (var outFile = new FileStream(outName, FileMode.Create, FileAccess.ReadWrite))
|
||||
{
|
||||
logger?.LogMessage(file.FullPath);
|
||||
stream.CopyStream(outFile, stream.Length, logger);
|
||||
storage.CopyToStream(outFile, storage.Length, logger);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,52 +0,0 @@
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.Save
|
||||
{
|
||||
public class AllocationTable
|
||||
{
|
||||
public AllocationTableEntry[] Entries { get; }
|
||||
|
||||
public AllocationTable(Stream tableStream)
|
||||
{
|
||||
int blockCount = (int)(tableStream.Length / 8);
|
||||
|
||||
Entries = new AllocationTableEntry[blockCount];
|
||||
tableStream.Position = 0;
|
||||
var reader = new BinaryReader(tableStream);
|
||||
|
||||
for (int i = 0; i < blockCount; i++)
|
||||
{
|
||||
int parent = reader.ReadInt32();
|
||||
int child = reader.ReadInt32();
|
||||
|
||||
Entries[i] = new AllocationTableEntry { Next = child, Prev = parent };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public class AllocationTableEntry
|
||||
{
|
||||
public int Prev { get; set; }
|
||||
public int Next { get; set; }
|
||||
|
||||
public bool IsListStart()
|
||||
{
|
||||
return Prev == int.MinValue;
|
||||
}
|
||||
|
||||
public bool IsListEnd()
|
||||
{
|
||||
return (Next & 0x7FFFFFFF) == 0;
|
||||
}
|
||||
|
||||
public bool IsMultiBlockSegment()
|
||||
{
|
||||
return Next < 0;
|
||||
}
|
||||
|
||||
public bool IsSingleBlockSegment()
|
||||
{
|
||||
return Next >= 0;
|
||||
}
|
||||
}
|
||||
}
|
@ -1,135 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.Save
|
||||
{
|
||||
public class AllocationTableStream : Stream
|
||||
{
|
||||
private int SegmentPos => (int)(Data.Position - (Iterator.PhysicalBlock * BlockSize));
|
||||
|
||||
private Stream Data { get; }
|
||||
private int BlockSize { get; }
|
||||
private AllocationTableIterator Iterator { get; }
|
||||
|
||||
public AllocationTableStream(Stream data, AllocationTable table, int blockSize, int initialBlock, long length)
|
||||
{
|
||||
Data = data;
|
||||
BlockSize = blockSize;
|
||||
Length = length;
|
||||
Iterator = new AllocationTableIterator(table, initialBlock);
|
||||
Data.Position = Iterator.PhysicalBlock * BlockSize;
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
Data.Flush();
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
int remaining = count;
|
||||
int outOffset = offset;
|
||||
int totalBytesRead = 0;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
int remainingInSegment = Iterator.CurrentSegmentSize * BlockSize - SegmentPos;
|
||||
int bytesToRead = Math.Min(remaining, remainingInSegment);
|
||||
int bytesRead = Data.Read(buffer, outOffset, bytesToRead);
|
||||
|
||||
outOffset += bytesRead;
|
||||
totalBytesRead += bytesRead;
|
||||
remaining -= bytesRead;
|
||||
|
||||
if (SegmentPos >= Iterator.CurrentSegmentSize * BlockSize)
|
||||
{
|
||||
if (!Iterator.MoveNext()) return totalBytesRead;
|
||||
Data.Position = Iterator.PhysicalBlock * BlockSize;
|
||||
}
|
||||
}
|
||||
|
||||
return totalBytesRead;
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
switch (origin)
|
||||
{
|
||||
case SeekOrigin.Begin:
|
||||
Position = offset;
|
||||
break;
|
||||
case SeekOrigin.Current:
|
||||
Position += offset;
|
||||
break;
|
||||
case SeekOrigin.End:
|
||||
Position = Length - offset;
|
||||
break;
|
||||
}
|
||||
|
||||
return Position;
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
int remaining = count;
|
||||
int outOffset = offset;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
int remainingInSegment = Iterator.CurrentSegmentSize * BlockSize - SegmentPos;
|
||||
int bytesToWrite = Math.Min(remaining, remainingInSegment);
|
||||
Data.Write(buffer, outOffset, bytesToWrite);
|
||||
|
||||
outOffset += bytesToWrite;
|
||||
remaining -= bytesToWrite;
|
||||
|
||||
if (SegmentPos >= Iterator.CurrentSegmentSize * BlockSize)
|
||||
{
|
||||
if (!Iterator.MoveNext()) return;
|
||||
Data.Position = Iterator.PhysicalBlock * BlockSize;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public override bool CanRead => true;
|
||||
public override bool CanSeek => true;
|
||||
public override bool CanWrite => true;
|
||||
public override long Length { get; }
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => Iterator.VirtualBlock * BlockSize + (Data.Position - (Iterator.PhysicalBlock * BlockSize));
|
||||
set
|
||||
{
|
||||
long blockIndex = value / BlockSize;
|
||||
|
||||
while (Iterator.VirtualBlock > blockIndex ||
|
||||
Iterator.VirtualBlock + Iterator.CurrentSegmentSize <= blockIndex)
|
||||
{
|
||||
if (Iterator.VirtualBlock > blockIndex)
|
||||
{
|
||||
Iterator.MovePrevious();
|
||||
}
|
||||
else
|
||||
{
|
||||
Iterator.MoveNext();
|
||||
}
|
||||
}
|
||||
|
||||
long segmentPos = value - (Iterator.VirtualBlock * BlockSize);
|
||||
Data.Position = Iterator.PhysicalBlock * BlockSize + segmentPos;
|
||||
}
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
Flush();
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,115 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.Save
|
||||
{
|
||||
public class DuplexFs : Stream
|
||||
{
|
||||
private int BlockSize{ get; }
|
||||
private Stream BitmapStream { get; }
|
||||
private Stream DataA { get; }
|
||||
private Stream DataB { get; }
|
||||
private DuplexBitmap Bitmap { get; }
|
||||
|
||||
public DuplexFs(Stream bitmap, Stream dataA, Stream dataB, int blockSize)
|
||||
{
|
||||
if (dataA.Length != dataB.Length)
|
||||
{
|
||||
throw new InvalidDataException("Both data streams must be the same length");
|
||||
}
|
||||
|
||||
BlockSize = blockSize;
|
||||
BitmapStream = bitmap;
|
||||
DataA = dataA;
|
||||
DataB = dataB;
|
||||
Bitmap = new DuplexBitmap(BitmapStream, (int)(bitmap.Length * 8));
|
||||
Length = dataA.Length;
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
BitmapStream?.Flush();
|
||||
DataA?.Flush();
|
||||
DataB?.Flush();
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
long remaining = Math.Min(count, Length - Position);
|
||||
if (remaining <= 0) return 0;
|
||||
int outOffset = offset;
|
||||
int totalBytesRead = 0;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
int blockNum = (int)(Position / BlockSize);
|
||||
int blockPos = (int)(Position % BlockSize);
|
||||
int bytesToRead = (int)Math.Min(remaining, BlockSize - blockPos);
|
||||
|
||||
Stream data = Bitmap.Bitmap[blockNum] ? DataB : DataA;
|
||||
data.Position = blockNum * BlockSize + blockPos;
|
||||
|
||||
data.Read(buffer, outOffset, bytesToRead);
|
||||
outOffset += bytesToRead;
|
||||
totalBytesRead += bytesToRead;
|
||||
remaining -= bytesToRead;
|
||||
Position += bytesToRead;
|
||||
}
|
||||
|
||||
return totalBytesRead;
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
long remaining = Math.Min(count, Length - Position);
|
||||
if (remaining <= 0) return;
|
||||
|
||||
int inOffset = offset;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
int blockNum = (int)(Position / BlockSize);
|
||||
int blockPos = (int)(Position % BlockSize);
|
||||
int bytesToWrite = (int)Math.Min(remaining, BlockSize - blockPos);
|
||||
|
||||
Stream data = Bitmap.Bitmap[blockNum] ? DataB : DataA;
|
||||
data.Position = blockNum * BlockSize + blockPos;
|
||||
|
||||
data.Write(buffer, inOffset, bytesToWrite);
|
||||
|
||||
inOffset += bytesToWrite;
|
||||
remaining -= bytesToWrite;
|
||||
Position += bytesToWrite;
|
||||
}
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
switch (origin)
|
||||
{
|
||||
case SeekOrigin.Begin:
|
||||
Position = offset;
|
||||
break;
|
||||
case SeekOrigin.Current:
|
||||
Position += offset;
|
||||
break;
|
||||
case SeekOrigin.End:
|
||||
Position = Length - offset;
|
||||
break;
|
||||
}
|
||||
|
||||
return Position;
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override bool CanRead => true;
|
||||
public override bool CanSeek => true;
|
||||
public override bool CanWrite => false;
|
||||
public override long Length { get; }
|
||||
public override long Position { get; set; }
|
||||
}
|
||||
}
|
@ -1,398 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using LibHac.Streams;
|
||||
|
||||
namespace LibHac.Save
|
||||
{
|
||||
public class Header
|
||||
{
|
||||
public byte[] Cmac { get; set; }
|
||||
public FsLayout Layout { get; set; }
|
||||
public JournalHeader Journal { get; set; }
|
||||
public DuplexHeader Duplex { get; set; }
|
||||
public IvfcHeader Ivfc { get; set; }
|
||||
public SaveHeader Save { get; set; }
|
||||
|
||||
public RemapHeader FileRemap { get; set; }
|
||||
public RemapHeader MetaRemap { get; set; }
|
||||
public ExtraData ExtraData { get; set; }
|
||||
|
||||
public MapEntry[] FileMapEntries { get; set; }
|
||||
public MapEntry[] MetaMapEntries { get; set; }
|
||||
|
||||
public byte[] MasterHashA { get; }
|
||||
public byte[] MasterHashB { get; }
|
||||
public byte[] DuplexMasterA { get; }
|
||||
public byte[] DuplexMasterB { get; }
|
||||
|
||||
public Stream MasterHash { get; }
|
||||
|
||||
public Validity SignatureValidity { get; }
|
||||
public Validity HeaderHashValidity { get; }
|
||||
|
||||
public byte[] Data { get; }
|
||||
|
||||
public Header(Keyset keyset, SharedStreamSource streamSource)
|
||||
{
|
||||
var reader = new BinaryReader(streamSource.CreateStream());
|
||||
|
||||
reader.BaseStream.Position = 0;
|
||||
Data = reader.ReadBytes(0x4000);
|
||||
reader.BaseStream.Position = 0;
|
||||
|
||||
Cmac = reader.ReadBytes(0x10);
|
||||
|
||||
reader.BaseStream.Position = 0x100;
|
||||
Layout = new FsLayout(reader);
|
||||
|
||||
reader.BaseStream.Position = 0x300;
|
||||
Duplex = new DuplexHeader(reader);
|
||||
|
||||
reader.BaseStream.Position = 0x344;
|
||||
Ivfc = new IvfcHeader(reader);
|
||||
|
||||
reader.BaseStream.Position = 0x408;
|
||||
Journal = new JournalHeader(reader);
|
||||
|
||||
reader.BaseStream.Position = 0x608;
|
||||
Save = new SaveHeader(reader);
|
||||
|
||||
reader.BaseStream.Position = 0x650;
|
||||
FileRemap = new RemapHeader(reader);
|
||||
reader.BaseStream.Position = 0x690;
|
||||
MetaRemap = new RemapHeader(reader);
|
||||
|
||||
reader.BaseStream.Position = 0x6D8;
|
||||
ExtraData = new ExtraData(reader);
|
||||
|
||||
reader.BaseStream.Position = Layout.IvfcMasterHashOffsetA;
|
||||
MasterHashA = reader.ReadBytes((int)Layout.IvfcMasterHashSize);
|
||||
reader.BaseStream.Position = Layout.IvfcMasterHashOffsetB;
|
||||
MasterHashB = reader.ReadBytes((int)Layout.IvfcMasterHashSize);
|
||||
|
||||
MasterHash = streamSource.CreateStream(Layout.IvfcMasterHashOffsetA, Layout.IvfcMasterHashSize);
|
||||
|
||||
reader.BaseStream.Position = Layout.DuplexMasterOffsetA;
|
||||
DuplexMasterA = reader.ReadBytes((int)Layout.DuplexMasterSize);
|
||||
reader.BaseStream.Position = Layout.DuplexMasterOffsetB;
|
||||
DuplexMasterB = reader.ReadBytes((int)Layout.DuplexMasterSize);
|
||||
|
||||
reader.BaseStream.Position = Layout.FileMapEntryOffset;
|
||||
FileMapEntries = new MapEntry[FileRemap.MapEntryCount];
|
||||
for (int i = 0; i < FileRemap.MapEntryCount; i++)
|
||||
{
|
||||
FileMapEntries[i] = new MapEntry(reader);
|
||||
}
|
||||
|
||||
reader.BaseStream.Position = Layout.MetaMapEntryOffset;
|
||||
MetaMapEntries = new MapEntry[MetaRemap.MapEntryCount];
|
||||
for (int i = 0; i < MetaRemap.MapEntryCount; i++)
|
||||
{
|
||||
MetaMapEntries[i] = new MapEntry(reader);
|
||||
}
|
||||
|
||||
HeaderHashValidity = Crypto.CheckMemoryHashTable(Data, Layout.Hash, 0x300, 0x3d00);
|
||||
SignatureValidity = ValidateSignature(keyset);
|
||||
}
|
||||
|
||||
private Validity ValidateSignature(Keyset keyset)
|
||||
{
|
||||
var calculatedCmac = new byte[0x10];
|
||||
|
||||
Crypto.CalculateAesCmac(keyset.SaveMacKey, Data, 0x100, calculatedCmac, 0, 0x200);
|
||||
|
||||
return Util.ArraysEqual(calculatedCmac, Cmac) ? Validity.Valid : Validity.Invalid;
|
||||
}
|
||||
}
|
||||
|
||||
public class FsLayout
|
||||
{
|
||||
public string Magic { get; set; }
|
||||
public uint MagicNum { get; set; }
|
||||
public byte[] Hash { get; set; }
|
||||
public long FileMapEntryOffset { get; set; }
|
||||
public long FileMapEntrySize { get; set; }
|
||||
public long MetaMapEntryOffset { get; set; }
|
||||
public long MetaMapEntrySize { get; set; }
|
||||
public long FileMapDataOffset { get; set; }
|
||||
public long FileMapDataSize { get; set; }
|
||||
public long DuplexL1OffsetA { get; set; }
|
||||
public long DuplexL1OffsetB { get; set; }
|
||||
public long DuplexL1Size { get; set; }
|
||||
public long DuplexDataOffsetA { get; set; }
|
||||
public long DuplexDataOffsetB { get; set; }
|
||||
public long DuplexDataSize { get; set; }
|
||||
public long JournalDataOffset { get; set; }
|
||||
public long JournalDataSizeA { get; set; }
|
||||
public long JournalDataSizeB { get; set; }
|
||||
public long SizeReservedArea { get; set; }
|
||||
public long DuplexMasterOffsetA { get; set; }
|
||||
public long DuplexMasterOffsetB { get; set; }
|
||||
public long DuplexMasterSize { get; set; }
|
||||
public long IvfcMasterHashOffsetA { get; set; }
|
||||
public long IvfcMasterHashOffsetB { get; set; }
|
||||
public long IvfcMasterHashSize { get; set; }
|
||||
public long JournalTableOffset { get; set; }
|
||||
public long JournalTableSize { get; set; }
|
||||
public long JournalBitmapUpdatedPhysicalOffset { get; set; }
|
||||
public long JournalBitmapUpdatedPhysicalSize { get; set; }
|
||||
public long JournalBitmapUpdatedVirtualOffset { get; set; }
|
||||
public long JournalBitmapUpdatedVirtualSize { get; set; }
|
||||
public long JournalBitmapUnassignedOffset { get; set; }
|
||||
public long JournalBitmapUnassignedSize { get; set; }
|
||||
public long IvfcL1Offset { get; set; }
|
||||
public long IvfcL1Size { get; set; }
|
||||
public long IvfcL2Offset { get; set; }
|
||||
public long IvfcL2Size { get; set; }
|
||||
public long IvfcL3Offset { get; set; }
|
||||
public long IvfcL3Size { get; set; }
|
||||
public long FatOffset { get; set; }
|
||||
public long FatSize { get; set; }
|
||||
public long DuplexIndex { get; set; }
|
||||
|
||||
public FsLayout(BinaryReader reader)
|
||||
{
|
||||
Magic = reader.ReadAscii(4);
|
||||
MagicNum = reader.ReadUInt32();
|
||||
Hash = reader.ReadBytes(0x20);
|
||||
FileMapEntryOffset = reader.ReadInt64();
|
||||
FileMapEntrySize = reader.ReadInt64();
|
||||
MetaMapEntryOffset = reader.ReadInt64();
|
||||
MetaMapEntrySize = reader.ReadInt64();
|
||||
FileMapDataOffset = reader.ReadInt64();
|
||||
FileMapDataSize = reader.ReadInt64();
|
||||
DuplexL1OffsetA = reader.ReadInt64();
|
||||
DuplexL1OffsetB = reader.ReadInt64();
|
||||
DuplexL1Size = reader.ReadInt64();
|
||||
DuplexDataOffsetA = reader.ReadInt64();
|
||||
DuplexDataOffsetB = reader.ReadInt64();
|
||||
DuplexDataSize = reader.ReadInt64();
|
||||
JournalDataOffset = reader.ReadInt64();
|
||||
JournalDataSizeA = reader.ReadInt64();
|
||||
JournalDataSizeB = reader.ReadInt64();
|
||||
SizeReservedArea = reader.ReadInt64();
|
||||
DuplexMasterOffsetA = reader.ReadInt64();
|
||||
DuplexMasterOffsetB = reader.ReadInt64();
|
||||
DuplexMasterSize = reader.ReadInt64();
|
||||
IvfcMasterHashOffsetA = reader.ReadInt64();
|
||||
IvfcMasterHashOffsetB = reader.ReadInt64();
|
||||
IvfcMasterHashSize = reader.ReadInt64();
|
||||
JournalTableOffset = reader.ReadInt64();
|
||||
JournalTableSize = reader.ReadInt64();
|
||||
JournalBitmapUpdatedPhysicalOffset = reader.ReadInt64();
|
||||
JournalBitmapUpdatedPhysicalSize = reader.ReadInt64();
|
||||
JournalBitmapUpdatedVirtualOffset = reader.ReadInt64();
|
||||
JournalBitmapUpdatedVirtualSize = reader.ReadInt64();
|
||||
JournalBitmapUnassignedOffset = reader.ReadInt64();
|
||||
JournalBitmapUnassignedSize = reader.ReadInt64();
|
||||
IvfcL1Offset = reader.ReadInt64();
|
||||
IvfcL1Size = reader.ReadInt64();
|
||||
IvfcL2Offset = reader.ReadInt64();
|
||||
IvfcL2Size = reader.ReadInt64();
|
||||
IvfcL3Offset = reader.ReadInt64();
|
||||
IvfcL3Size = reader.ReadInt64();
|
||||
FatOffset = reader.ReadInt64();
|
||||
FatSize = reader.ReadInt64();
|
||||
DuplexIndex = reader.ReadByte();
|
||||
}
|
||||
}
|
||||
|
||||
public class RemapHeader
|
||||
{
|
||||
public string Magic { get; }
|
||||
public uint MagicNum { get; }
|
||||
public int MapEntryCount { get; }
|
||||
public int MapSegmentCount { get; }
|
||||
public int SegmentBits { get; }
|
||||
|
||||
public RemapHeader(BinaryReader reader)
|
||||
{
|
||||
Magic = reader.ReadAscii(4);
|
||||
MagicNum = reader.ReadUInt32();
|
||||
MapEntryCount = reader.ReadInt32();
|
||||
MapSegmentCount = reader.ReadInt32();
|
||||
SegmentBits = reader.ReadInt32();
|
||||
}
|
||||
}
|
||||
|
||||
public class DuplexHeader
|
||||
{
|
||||
public string Magic { get; }
|
||||
public uint MagicNum { get; }
|
||||
public DuplexInfo[] Layers { get; } = new DuplexInfo[3];
|
||||
|
||||
public DuplexHeader(BinaryReader reader)
|
||||
{
|
||||
Magic = reader.ReadAscii(4);
|
||||
MagicNum = reader.ReadUInt32();
|
||||
|
||||
for (int i = 0; i < Layers.Length; i++)
|
||||
{
|
||||
Layers[i] = new DuplexInfo(reader);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public class DuplexInfo
|
||||
{
|
||||
public long Offset { get; }
|
||||
public long Length { get; set; }
|
||||
public int BlockSizePower { get; set; }
|
||||
public int BlockSize { get; set; }
|
||||
|
||||
public DuplexInfo() { }
|
||||
|
||||
public DuplexInfo(BinaryReader reader)
|
||||
{
|
||||
Offset = reader.ReadInt64();
|
||||
Length = reader.ReadInt64();
|
||||
BlockSizePower = reader.ReadInt32();
|
||||
BlockSize = 1 << BlockSizePower;
|
||||
}
|
||||
}
|
||||
|
||||
public class JournalHeader
|
||||
{
|
||||
public string Magic { get; }
|
||||
public uint MagicNum { get; }
|
||||
public long TotalSize { get; }
|
||||
public long JournalSize { get; }
|
||||
public long BlockSize { get; }
|
||||
public int Field20 { get; }
|
||||
public int MainDataBlockCount { get; }
|
||||
public int JournalBlockCount { get; }
|
||||
public int Field2C { get; }
|
||||
|
||||
public JournalHeader(BinaryReader reader)
|
||||
{
|
||||
Magic = reader.ReadAscii(4);
|
||||
MagicNum = reader.ReadUInt32();
|
||||
TotalSize = reader.ReadInt64();
|
||||
JournalSize = reader.ReadInt64();
|
||||
BlockSize = reader.ReadInt64();
|
||||
Field20 = reader.ReadInt32();
|
||||
MainDataBlockCount = reader.ReadInt32();
|
||||
JournalBlockCount = reader.ReadInt32();
|
||||
Field2C = reader.ReadInt32();
|
||||
}
|
||||
}
|
||||
|
||||
public class SaveHeader
|
||||
{
|
||||
public string Magic { get; }
|
||||
public uint MagicNum { get; }
|
||||
public int Field8 { get; }
|
||||
public int FieldC { get; }
|
||||
public int Field10 { get; }
|
||||
public int Field14 { get; }
|
||||
public long BlockSize { get; }
|
||||
public StorageInfo AllocationTableInfo { get; }
|
||||
public StorageInfo DataInfo { get; }
|
||||
public int DirectoryTableBlock { get; }
|
||||
public int FileTableBlock { get; }
|
||||
|
||||
public SaveHeader(BinaryReader reader)
|
||||
{
|
||||
Magic = reader.ReadAscii(4);
|
||||
MagicNum = reader.ReadUInt32();
|
||||
Field8 = reader.ReadInt32();
|
||||
FieldC = reader.ReadInt32();
|
||||
Field10 = reader.ReadInt32();
|
||||
Field14 = reader.ReadInt32();
|
||||
BlockSize = reader.ReadInt64();
|
||||
AllocationTableInfo = new StorageInfo(reader);
|
||||
DataInfo = new StorageInfo(reader);
|
||||
DirectoryTableBlock = reader.ReadInt32();
|
||||
FileTableBlock = reader.ReadInt32();
|
||||
}
|
||||
}
|
||||
|
||||
public class StorageInfo
|
||||
{
|
||||
public long Offset { get; }
|
||||
public int Size { get; }
|
||||
public int FieldC { get; }
|
||||
|
||||
public StorageInfo(BinaryReader reader)
|
||||
{
|
||||
Offset = reader.ReadInt64();
|
||||
Size = reader.ReadInt32();
|
||||
FieldC = reader.ReadInt32();
|
||||
}
|
||||
}
|
||||
|
||||
public class MapEntry
|
||||
{
|
||||
public long VirtualOffset { get; }
|
||||
public long PhysicalOffset { get; }
|
||||
public long Size { get; }
|
||||
public int Alignment { get; }
|
||||
public int Field1C { get; }
|
||||
|
||||
public long VirtualOffsetEnd => VirtualOffset + Size;
|
||||
public long PhysicalOffsetEnd => PhysicalOffset + Size;
|
||||
internal RemapSegment Segment { get; set; }
|
||||
internal MapEntry Next { get; set; }
|
||||
|
||||
public MapEntry(BinaryReader reader)
|
||||
{
|
||||
VirtualOffset = reader.ReadInt64();
|
||||
PhysicalOffset = reader.ReadInt64();
|
||||
Size = reader.ReadInt64();
|
||||
Alignment = reader.ReadInt32();
|
||||
Field1C = reader.ReadInt32();
|
||||
}
|
||||
}
|
||||
|
||||
public class ExtraData
|
||||
{
|
||||
public ulong TitleId { get; }
|
||||
public Guid UserId { get; }
|
||||
public ulong SaveId { get; }
|
||||
public SaveDataType Type { get; }
|
||||
|
||||
public ulong SaveOwnerId { get; }
|
||||
public long Timestamp { get; }
|
||||
public long Field50 { get; }
|
||||
public uint Field54 { get; }
|
||||
public long DataSize { get; }
|
||||
public long JournalSize { get; }
|
||||
|
||||
public ExtraData(BinaryReader reader)
|
||||
{
|
||||
TitleId = reader.ReadUInt64();
|
||||
UserId = ToGuid(reader.ReadBytes(0x10));
|
||||
SaveId = reader.ReadUInt64();
|
||||
Type = (SaveDataType)reader.ReadByte();
|
||||
reader.BaseStream.Position += 0x1f;
|
||||
|
||||
SaveOwnerId = reader.ReadUInt64();
|
||||
Timestamp = reader.ReadInt64();
|
||||
Field50 = reader.ReadUInt32();
|
||||
Field54 = reader.ReadUInt32();
|
||||
DataSize = reader.ReadInt64();
|
||||
JournalSize = reader.ReadInt64();
|
||||
}
|
||||
|
||||
private static Guid ToGuid(byte[] bytes)
|
||||
{
|
||||
var b = new byte[0x10];
|
||||
Array.Copy(bytes, b, 0x10);
|
||||
|
||||
// The Guid constructor uses a weird, mixed-endian format
|
||||
Array.Reverse(b, 10, 6);
|
||||
|
||||
return new Guid(b);
|
||||
}
|
||||
}
|
||||
|
||||
public enum SaveDataType
|
||||
{
|
||||
SystemSaveData,
|
||||
SaveData,
|
||||
BcatDeliveryCacheStorage,
|
||||
DeviceSaveData,
|
||||
TemporaryStorage,
|
||||
CacheStorage
|
||||
}
|
||||
}
|
@ -1,136 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.Save
|
||||
{
|
||||
public class JournalStream : Stream
|
||||
{
|
||||
private long _position;
|
||||
private Stream BaseStream { get; }
|
||||
public MappingEntry[] Map { get; }
|
||||
public int BlockSize { get; }
|
||||
private MappingEntry CurrentMapEntry { get; set; }
|
||||
|
||||
public JournalStream(Stream baseStream, MappingEntry[] map, int blockSize)
|
||||
{
|
||||
BaseStream = baseStream;
|
||||
Map = map;
|
||||
BlockSize = blockSize;
|
||||
Length = map.Length * BlockSize;
|
||||
|
||||
CurrentMapEntry = Map[0];
|
||||
BaseStream.Position = CurrentMapEntry.PhysicalIndex * BlockSize;
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
long remaining = Length - Position;
|
||||
if (remaining <= 0) return 0;
|
||||
if (remaining < count) count = (int)remaining;
|
||||
|
||||
int toOutput = count;
|
||||
int outPos = offset;
|
||||
|
||||
while (toOutput > 0)
|
||||
{
|
||||
long remainInEntry = BlockSize - Position % BlockSize;
|
||||
int toRead = (int)Math.Min(toOutput, remainInEntry);
|
||||
BaseStream.Read(buffer, outPos, toRead);
|
||||
|
||||
outPos += toRead;
|
||||
toOutput -= toRead;
|
||||
Position += toRead;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
long remaining = Math.Min(Length - Position, count);
|
||||
if (remaining <= 0) return;
|
||||
|
||||
int inPos = offset;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
long remainInEntry = BlockSize - Position % BlockSize;
|
||||
int toRead = (int)Math.Min(remaining, remainInEntry);
|
||||
BaseStream.Write(buffer, inPos, toRead);
|
||||
|
||||
inPos += toRead;
|
||||
remaining -= toRead;
|
||||
Position += toRead;
|
||||
}
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
switch (origin)
|
||||
{
|
||||
case SeekOrigin.Begin:
|
||||
Position = offset;
|
||||
break;
|
||||
case SeekOrigin.Current:
|
||||
Position += offset;
|
||||
break;
|
||||
case SeekOrigin.End:
|
||||
Position = Length - offset;
|
||||
break;
|
||||
}
|
||||
|
||||
return Position;
|
||||
}
|
||||
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
BaseStream.Flush();
|
||||
}
|
||||
|
||||
public override bool CanRead => true;
|
||||
public override bool CanSeek => true;
|
||||
public override bool CanWrite => true;
|
||||
public override long Length { get; }
|
||||
public override long Position
|
||||
{
|
||||
get => _position;
|
||||
set
|
||||
{
|
||||
_position = value;
|
||||
if (value >= Length) return;
|
||||
long currentBlock = value / BlockSize;
|
||||
long blockPos = value % BlockSize;
|
||||
CurrentMapEntry = Map[currentBlock];
|
||||
BaseStream.Position = CurrentMapEntry.PhysicalIndex * BlockSize + blockPos;
|
||||
}
|
||||
}
|
||||
|
||||
public static MappingEntry[] ReadMappingEntries(Stream mapTable, int count)
|
||||
{
|
||||
var tableReader = new BinaryReader(mapTable);
|
||||
var map = new MappingEntry[count];
|
||||
|
||||
for (int i = 0; i < count; i++)
|
||||
{
|
||||
var entry = new MappingEntry
|
||||
{
|
||||
VirtualIndex = i,
|
||||
PhysicalIndex = tableReader.ReadInt32() & 0x7FFFFFFF
|
||||
};
|
||||
|
||||
map[i] = entry;
|
||||
tableReader.BaseStream.Position += 4;
|
||||
}
|
||||
|
||||
return map;
|
||||
}
|
||||
}
|
||||
|
||||
public class MappingEntry
|
||||
{
|
||||
public int PhysicalIndex { get; set; }
|
||||
public int VirtualIndex { get; set; }
|
||||
}
|
||||
}
|
@ -1,75 +0,0 @@
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.Save
|
||||
{
|
||||
public class LayeredDuplexFs : Stream
|
||||
{
|
||||
private DuplexFs[] Layers { get; }
|
||||
private DuplexFs DataLayer { get; }
|
||||
|
||||
public LayeredDuplexFs(DuplexFsLayerInfo[] layers, bool masterBit)
|
||||
{
|
||||
Layers = new DuplexFs[layers.Length - 1];
|
||||
|
||||
for (int i = 0; i < Layers.Length; i++)
|
||||
{
|
||||
Stream bitmap;
|
||||
|
||||
if (i == 0)
|
||||
{
|
||||
bitmap = masterBit ? layers[0].DataB : layers[0].DataA;
|
||||
}
|
||||
else
|
||||
{
|
||||
bitmap = Layers[i - 1];
|
||||
}
|
||||
|
||||
Layers[i] = new DuplexFs(bitmap, layers[i + 1].DataA, layers[i + 1].DataB, layers[i + 1].Info.BlockSize);
|
||||
}
|
||||
|
||||
DataLayer = Layers[Layers.Length - 1];
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
DataLayer.Flush();
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
return DataLayer.Read(buffer, offset, count);
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
return DataLayer.Seek(offset, origin);
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new System.NotImplementedException();
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
DataLayer.Write(buffer, offset, count);
|
||||
}
|
||||
|
||||
public override bool CanRead => DataLayer.CanRead;
|
||||
public override bool CanSeek => DataLayer.CanSeek;
|
||||
public override bool CanWrite => DataLayer.CanWrite;
|
||||
public override long Length => DataLayer.Length;
|
||||
public override long Position
|
||||
{
|
||||
get => DataLayer.Position;
|
||||
set => DataLayer.Position = value;
|
||||
}
|
||||
}
|
||||
|
||||
public class DuplexFsLayerInfo
|
||||
{
|
||||
public Stream DataA { get; set; }
|
||||
public Stream DataB { get; set; }
|
||||
public DuplexInfo Info { get; set; }
|
||||
}
|
||||
}
|
@ -1,116 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
using LibHac.Streams;
|
||||
|
||||
namespace LibHac.Save
|
||||
{
|
||||
public class RemapStorage
|
||||
{
|
||||
private SharedStreamSource StreamSource { get; }
|
||||
private RemapHeader Header { get; }
|
||||
public MapEntry[] MapEntries { get; set; }
|
||||
public RemapSegment[] Segments { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new <see cref="RemapStorage"/>
|
||||
/// </summary>
|
||||
/// <param name="data">A <see cref="Stream"/> of the main data of the RemapStream.
|
||||
/// The <see cref="RemapStorage"/> object takes complete ownership of the Stream.</param>
|
||||
/// <param name="header">The header for this RemapStorage.</param>
|
||||
/// <param name="mapEntries">The remapping entries for this RemapStorage.</param>
|
||||
public RemapStorage(Stream data, RemapHeader header, MapEntry[] mapEntries)
|
||||
{
|
||||
StreamSource = new SharedStreamSource(data);
|
||||
Header = header;
|
||||
MapEntries = mapEntries;
|
||||
|
||||
Segments = InitSegments(Header, MapEntries);
|
||||
}
|
||||
|
||||
public Stream OpenStream(long offset, long size)
|
||||
{
|
||||
int segmentIdx = GetSegmentFromVirtualOffset(offset);
|
||||
long segmentOffset = GetOffsetFromVirtualOffset(offset);
|
||||
|
||||
if (segmentIdx > Segments.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
}
|
||||
|
||||
RemapSegment segment = Segments[GetSegmentFromVirtualOffset(offset)];
|
||||
|
||||
if (segmentOffset > segment.Length)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
}
|
||||
|
||||
Stream stream = new RemapStream(StreamSource.CreateStream(), segment);
|
||||
|
||||
return new SubStream(stream, offset, size);
|
||||
}
|
||||
|
||||
public Stream OpenSegmentStream(int segment)
|
||||
{
|
||||
long offset = ToVirtualOffset(segment, 0);
|
||||
long size = Segments[segment].Length;
|
||||
|
||||
return OpenStream(offset, size);
|
||||
}
|
||||
|
||||
private static RemapSegment[] InitSegments(RemapHeader header, MapEntry[] mapEntries)
|
||||
{
|
||||
var segments = new RemapSegment[header.MapSegmentCount];
|
||||
int entryIdx = 0;
|
||||
|
||||
for (int i = 0; i < header.MapSegmentCount; i++)
|
||||
{
|
||||
var seg = new RemapSegment();
|
||||
seg.Entries.Add(mapEntries[entryIdx]);
|
||||
seg.Offset = mapEntries[entryIdx].VirtualOffset;
|
||||
mapEntries[entryIdx].Segment = seg;
|
||||
entryIdx++;
|
||||
|
||||
while (entryIdx < mapEntries.Length &&
|
||||
mapEntries[entryIdx - 1].VirtualOffsetEnd == mapEntries[entryIdx].VirtualOffset)
|
||||
{
|
||||
mapEntries[entryIdx].Segment = seg;
|
||||
mapEntries[entryIdx - 1].Next = mapEntries[entryIdx];
|
||||
seg.Entries.Add(mapEntries[entryIdx]);
|
||||
entryIdx++;
|
||||
}
|
||||
|
||||
seg.Length = seg.Entries[seg.Entries.Count - 1].VirtualOffsetEnd - seg.Entries[0].VirtualOffset;
|
||||
segments[i] = seg;
|
||||
}
|
||||
|
||||
return segments;
|
||||
}
|
||||
|
||||
private int GetSegmentFromVirtualOffset(long virtualOffset)
|
||||
{
|
||||
return (int)((ulong)virtualOffset >> (64 - Header.SegmentBits));
|
||||
}
|
||||
|
||||
private long GetOffsetFromVirtualOffset(long virtualOffset)
|
||||
{
|
||||
return virtualOffset & GetOffsetMask();
|
||||
}
|
||||
|
||||
private long ToVirtualOffset(int segment, long offset)
|
||||
{
|
||||
long seg = (segment << (64 - Header.SegmentBits)) & GetSegmentMask();
|
||||
long off = offset & GetOffsetMask();
|
||||
return seg | off;
|
||||
}
|
||||
|
||||
private long GetOffsetMask()
|
||||
{
|
||||
return (1 << (64 - Header.SegmentBits)) - 1;
|
||||
}
|
||||
|
||||
private long GetSegmentMask()
|
||||
{
|
||||
return ~GetOffsetMask();
|
||||
}
|
||||
}
|
||||
}
|
@ -1,142 +0,0 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
|
||||
namespace LibHac.Save
|
||||
{
|
||||
public class RemapStream : Stream
|
||||
{
|
||||
private long _position;
|
||||
private Stream BaseStream { get; }
|
||||
private RemapSegment Segment { get; }
|
||||
private MapEntry CurrentEntry { get; set; }
|
||||
|
||||
public RemapStream(Stream baseStream, RemapSegment segment)
|
||||
{
|
||||
BaseStream = baseStream;
|
||||
Segment = segment;
|
||||
CurrentEntry = segment.Entries[0];
|
||||
Length = segment.Length;
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (CurrentEntry == null) return 0;
|
||||
long remaining = CurrentEntry.Segment.Offset + CurrentEntry.Segment.Length - Position;
|
||||
if (remaining <= 0) return 0;
|
||||
if (remaining < count) count = (int)remaining;
|
||||
|
||||
int toOutput = count;
|
||||
int outPos = offset;
|
||||
|
||||
while (toOutput > 0)
|
||||
{
|
||||
long remainInEntry = CurrentEntry.VirtualOffsetEnd - Position;
|
||||
int toRead = (int)Math.Min(toOutput, remainInEntry);
|
||||
BaseStream.Read(buffer, outPos, toRead);
|
||||
outPos += toRead;
|
||||
toOutput -= toRead;
|
||||
Position += toRead;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
if (CurrentEntry == null) throw new EndOfStreamException();
|
||||
|
||||
long remaining = Math.Min(CurrentEntry.VirtualOffsetEnd - Position, count);
|
||||
if (remaining <= 0) return;
|
||||
|
||||
int inPos = offset;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
long remainInEntry = CurrentEntry.VirtualOffsetEnd - Position;
|
||||
int toWrite = (int)Math.Min(remaining, remainInEntry);
|
||||
BaseStream.Write(buffer, inPos, toWrite);
|
||||
|
||||
inPos += toWrite;
|
||||
remaining -= toWrite;
|
||||
Position += toWrite;
|
||||
}
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
switch (origin)
|
||||
{
|
||||
case SeekOrigin.Begin:
|
||||
Position = offset;
|
||||
break;
|
||||
case SeekOrigin.Current:
|
||||
Position += offset;
|
||||
break;
|
||||
case SeekOrigin.End:
|
||||
Position = Length - offset;
|
||||
break;
|
||||
}
|
||||
|
||||
return Position;
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotSupportedException();
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
BaseStream.Flush();
|
||||
}
|
||||
|
||||
private MapEntry GetMapEntry(long offset)
|
||||
{
|
||||
MapEntry entry = Segment.Entries.FirstOrDefault(x => offset >= x.VirtualOffset && offset < x.VirtualOffsetEnd);
|
||||
if (entry == null) throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
return entry;
|
||||
}
|
||||
|
||||
private void UpdateBaseStreamPosition()
|
||||
{
|
||||
// At end of virtual stream
|
||||
if (CurrentEntry == null) return;
|
||||
long entryOffset = Position - CurrentEntry.VirtualOffset;
|
||||
BaseStream.Position = CurrentEntry.PhysicalOffset + entryOffset;
|
||||
}
|
||||
|
||||
public override bool CanRead => true;
|
||||
public override bool CanSeek => true;
|
||||
public override bool CanWrite => false;
|
||||
public override long Length { get; }
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => _position;
|
||||
set
|
||||
{
|
||||
// Avoid doing a search when reading sequentially
|
||||
if (CurrentEntry != null && value == CurrentEntry.VirtualOffsetEnd)
|
||||
{
|
||||
CurrentEntry = CurrentEntry.Next;
|
||||
}
|
||||
else if (CurrentEntry == null || value < CurrentEntry.VirtualOffset || value > CurrentEntry.VirtualOffsetEnd)
|
||||
{
|
||||
CurrentEntry = GetMapEntry(value);
|
||||
}
|
||||
|
||||
_position = value;
|
||||
UpdateBaseStreamPosition();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public class RemapSegment
|
||||
{
|
||||
public List<MapEntry> Entries { get; } = new List<MapEntry>();
|
||||
public long Offset { get; internal set; }
|
||||
public long Length { get; internal set; }
|
||||
}
|
||||
}
|
@ -1,200 +0,0 @@
|
||||
using System.IO;
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
using LibHac.Streams;
|
||||
|
||||
namespace LibHac.Save
|
||||
{
|
||||
public class Savefile
|
||||
{
|
||||
public Header Header { get; }
|
||||
public SharedStreamSource SavefileSource { get; }
|
||||
|
||||
public SharedStreamSource JournalStreamSource { get; }
|
||||
private HierarchicalIntegrityVerificationStream IvfcStream { get; }
|
||||
public SharedStreamSource IvfcStreamSource { get; }
|
||||
public SaveFs SaveFs { get; }
|
||||
|
||||
public RemapStorage DataRemapStorage { get; }
|
||||
public RemapStorage MetaRemapStorage { get; }
|
||||
|
||||
public LayeredDuplexFs DuplexData { get; }
|
||||
|
||||
public DirectoryEntry RootDirectory => SaveFs.RootDirectory;
|
||||
public FileEntry[] Files => SaveFs.Files;
|
||||
public DirectoryEntry[] Directories => SaveFs.Directories;
|
||||
|
||||
public Savefile(Keyset keyset, Stream file, IntegrityCheckLevel integrityCheckLevel)
|
||||
{
|
||||
SavefileSource = new SharedStreamSource(file);
|
||||
|
||||
Header = new Header(keyset, SavefileSource);
|
||||
FsLayout layout = Header.Layout;
|
||||
|
||||
DataRemapStorage = new RemapStorage(SavefileSource.CreateStream(layout.FileMapDataOffset, layout.FileMapDataSize),
|
||||
Header.FileRemap, Header.FileMapEntries);
|
||||
|
||||
DuplexData = InitDuplexStream(DataRemapStorage, Header);
|
||||
|
||||
MetaRemapStorage = new RemapStorage(DuplexData, Header.MetaRemap, Header.MetaMapEntries);
|
||||
|
||||
Stream journalTable = MetaRemapStorage.OpenStream(layout.JournalTableOffset, layout.JournalTableSize);
|
||||
|
||||
MappingEntry[] journalMap = JournalStream.ReadMappingEntries(journalTable, Header.Journal.MainDataBlockCount);
|
||||
|
||||
Stream journalData = DataRemapStorage.OpenStream(layout.JournalDataOffset,
|
||||
layout.JournalDataSizeB + layout.SizeReservedArea);
|
||||
var journalStream = new JournalStream(journalData, journalMap, (int)Header.Journal.BlockSize);
|
||||
JournalStreamSource = new SharedStreamSource(journalStream);
|
||||
|
||||
IvfcStream = InitIvfcStream(integrityCheckLevel);
|
||||
|
||||
SaveFs = new SaveFs(IvfcStream, MetaRemapStorage.OpenStream(layout.FatOffset, layout.FatSize), Header.Save);
|
||||
|
||||
IvfcStreamSource = new SharedStreamSource(IvfcStream);
|
||||
}
|
||||
|
||||
private static LayeredDuplexFs InitDuplexStream(RemapStorage baseStorage, Header header)
|
||||
{
|
||||
FsLayout layout = header.Layout;
|
||||
var duplexLayers = new DuplexFsLayerInfo[3];
|
||||
|
||||
duplexLayers[0] = new DuplexFsLayerInfo
|
||||
{
|
||||
DataA = new MemoryStream(header.DuplexMasterA),
|
||||
DataB = new MemoryStream(header.DuplexMasterB),
|
||||
Info = header.Duplex.Layers[0]
|
||||
};
|
||||
|
||||
duplexLayers[1] = new DuplexFsLayerInfo
|
||||
{
|
||||
DataA = baseStorage.OpenStream(layout.DuplexL1OffsetA, layout.DuplexL1Size),
|
||||
DataB = baseStorage.OpenStream(layout.DuplexL1OffsetB, layout.DuplexL1Size),
|
||||
Info = header.Duplex.Layers[1]
|
||||
};
|
||||
|
||||
duplexLayers[2] = new DuplexFsLayerInfo
|
||||
{
|
||||
DataA = baseStorage.OpenStream(layout.DuplexDataOffsetA, layout.DuplexDataSize),
|
||||
DataB = baseStorage.OpenStream(layout.DuplexDataOffsetB, layout.DuplexDataSize),
|
||||
Info = header.Duplex.Layers[2]
|
||||
};
|
||||
|
||||
return new LayeredDuplexFs(duplexLayers, layout.DuplexIndex == 1);
|
||||
}
|
||||
|
||||
private HierarchicalIntegrityVerificationStream InitIvfcStream(IntegrityCheckLevel integrityCheckLevel)
|
||||
{
|
||||
IvfcHeader ivfc = Header.Ivfc;
|
||||
|
||||
const int ivfcLevels = 5;
|
||||
var initInfo = new IntegrityVerificationInfo[ivfcLevels];
|
||||
|
||||
initInfo[0] = new IntegrityVerificationInfo
|
||||
{
|
||||
Data = Header.MasterHash,
|
||||
BlockSize = 0,
|
||||
Type = IntegrityStreamType.Save
|
||||
};
|
||||
|
||||
for (int i = 1; i < ivfcLevels; i++)
|
||||
{
|
||||
IvfcLevelHeader level = ivfc.LevelHeaders[i - 1];
|
||||
|
||||
Stream data = i == ivfcLevels - 1
|
||||
? JournalStreamSource.CreateStream()
|
||||
: MetaRemapStorage.OpenStream(level.LogicalOffset, level.HashDataSize);
|
||||
|
||||
initInfo[i] = new IntegrityVerificationInfo
|
||||
{
|
||||
Data = data,
|
||||
BlockSize = 1 << level.BlockSizePower,
|
||||
Salt = new HMACSHA256(Encoding.ASCII.GetBytes(SaltSources[i - 1])).ComputeHash(ivfc.SaltSource),
|
||||
Type = IntegrityStreamType.Save
|
||||
};
|
||||
}
|
||||
|
||||
return new HierarchicalIntegrityVerificationStream(initInfo, integrityCheckLevel);
|
||||
}
|
||||
|
||||
public Stream OpenFile(string filename)
|
||||
{
|
||||
return SaveFs.OpenFile(filename);
|
||||
}
|
||||
|
||||
public Stream OpenFile(FileEntry file)
|
||||
{
|
||||
return SaveFs.OpenFile(file);
|
||||
}
|
||||
|
||||
public bool FileExists(string filename) => SaveFs.FileExists(filename);
|
||||
|
||||
public bool CommitHeader(Keyset keyset)
|
||||
{
|
||||
SharedStream headerStream = SavefileSource.CreateStream();
|
||||
|
||||
var hashData = new byte[0x3d00];
|
||||
|
||||
headerStream.Position = 0x300;
|
||||
headerStream.Read(hashData, 0, hashData.Length);
|
||||
|
||||
byte[] hash = Crypto.ComputeSha256(hashData, 0, hashData.Length);
|
||||
headerStream.Position = 0x108;
|
||||
headerStream.Write(hash, 0, hash.Length);
|
||||
|
||||
if (keyset.SaveMacKey.IsEmpty()) return false;
|
||||
|
||||
var cmacData = new byte[0x200];
|
||||
var cmac = new byte[0x10];
|
||||
|
||||
headerStream.Position = 0x100;
|
||||
headerStream.Read(cmacData, 0, 0x200);
|
||||
|
||||
Crypto.CalculateAesCmac(keyset.SaveMacKey, cmacData, 0, cmac, 0, 0x200);
|
||||
|
||||
headerStream.Position = 0;
|
||||
headerStream.Write(cmac, 0, 0x10);
|
||||
headerStream.Flush();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
public Validity Verify(IProgressReport logger = null)
|
||||
{
|
||||
Validity validity = IvfcStream.Validate(true, logger);
|
||||
IvfcStream.SetLevelValidities(Header.Ivfc);
|
||||
|
||||
return validity;
|
||||
}
|
||||
|
||||
private string[] SaltSources =
|
||||
{
|
||||
"HierarchicalIntegrityVerificationStorage::Master",
|
||||
"HierarchicalIntegrityVerificationStorage::L1",
|
||||
"HierarchicalIntegrityVerificationStorage::L2",
|
||||
"HierarchicalIntegrityVerificationStorage::L3",
|
||||
"HierarchicalIntegrityVerificationStorage::L4",
|
||||
"HierarchicalIntegrityVerificationStorage::L5"
|
||||
};
|
||||
}
|
||||
|
||||
public static class SavefileExtensions
|
||||
{
|
||||
public static void Extract(this Savefile save, string outDir, IProgressReport logger = null)
|
||||
{
|
||||
foreach (FileEntry file in save.Files)
|
||||
{
|
||||
Stream stream = save.OpenFile(file);
|
||||
string outName = outDir + file.FullPath;
|
||||
string dir = Path.GetDirectoryName(outName);
|
||||
if (!string.IsNullOrWhiteSpace(dir)) Directory.CreateDirectory(dir);
|
||||
|
||||
using (var outFile = new FileStream(outName, FileMode.Create, FileAccess.ReadWrite))
|
||||
{
|
||||
logger?.LogMessage(file.FullPath);
|
||||
stream.CopyStream(outFile, stream.Length, logger);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,353 +0,0 @@
|
||||
//-----------------------------------------------------------------------
|
||||
// <copyright file="CombinationStream.cs" company="The Outercurve Foundation">
|
||||
// Copyright (c) 2011, The Outercurve Foundation.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// </copyright>
|
||||
// <author>Prabir Shrestha (prabir.me)</author>
|
||||
// <website>https://github.com/facebook-csharp-sdk/combination-stream</website>
|
||||
//-----------------------------------------------------------------------
|
||||
|
||||
/*
|
||||
* Install-Package CombinationStream
|
||||
*
|
||||
*/
|
||||
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Threading;
|
||||
|
||||
namespace LibHac.Streams
|
||||
{
|
||||
public class CombinationStream : Stream
|
||||
{
|
||||
private readonly IList<Stream> _streams;
|
||||
private readonly IList<int> _streamsToDispose;
|
||||
private readonly IList<long> _streamsStartPos;
|
||||
private int _currentStreamIndex;
|
||||
private Stream _currentStream;
|
||||
private long _length = -1;
|
||||
private long _postion;
|
||||
|
||||
public CombinationStream(IList<Stream> streams)
|
||||
: this(streams, null)
|
||||
{
|
||||
}
|
||||
|
||||
public CombinationStream(IList<Stream> streams, IList<int> streamsToDispose)
|
||||
{
|
||||
if (streams == null)
|
||||
throw new ArgumentNullException("streams");
|
||||
|
||||
_streams = streams;
|
||||
_streamsToDispose = streamsToDispose;
|
||||
if (streams.Count > 0)
|
||||
_currentStream = streams[_currentStreamIndex++];
|
||||
|
||||
_streamsStartPos = new List<long>(streams.Count);
|
||||
long pos = 0;
|
||||
foreach (Stream strm in streams)
|
||||
{
|
||||
_streamsStartPos.Add(pos);
|
||||
pos += strm.Length;
|
||||
}
|
||||
}
|
||||
|
||||
public IList<Stream> InternalStreams => _streams;
|
||||
|
||||
public override void Flush() => _currentStream?.Flush();
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
long pos = 0;
|
||||
switch (origin)
|
||||
{
|
||||
case SeekOrigin.Begin:
|
||||
pos = offset;
|
||||
break;
|
||||
case SeekOrigin.Current:
|
||||
pos = Position + offset;
|
||||
break;
|
||||
case SeekOrigin.End:
|
||||
pos = Length + offset;
|
||||
break;
|
||||
}
|
||||
int idx = 0;
|
||||
while (idx + 1 < _streamsStartPos.Count)
|
||||
{
|
||||
if (_streamsStartPos[idx + 1] > pos)
|
||||
{
|
||||
break;
|
||||
}
|
||||
idx++;
|
||||
}
|
||||
|
||||
_currentStreamIndex = idx;
|
||||
_currentStream = _streams[_currentStreamIndex];
|
||||
_currentStream.Seek(pos - _streamsStartPos[idx], SeekOrigin.Begin);
|
||||
_postion = pos;
|
||||
return _postion;
|
||||
//throw new InvalidOperationException("Stream is not seekable.");
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
_length = value;
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
int result = 0;
|
||||
int buffPostion = offset;
|
||||
|
||||
while (count > 0)
|
||||
{
|
||||
int bytesRead = _currentStream.Read(buffer, buffPostion, count);
|
||||
result += bytesRead;
|
||||
buffPostion += bytesRead;
|
||||
_postion += bytesRead;
|
||||
|
||||
if (bytesRead <= count)
|
||||
count -= bytesRead;
|
||||
|
||||
if (count > 0)
|
||||
{
|
||||
if (_currentStreamIndex + 1 >= _streams.Count)
|
||||
break;
|
||||
|
||||
_currentStream = _streams[_currentStreamIndex + 1];
|
||||
_currentStreamIndex++;
|
||||
_currentStream.Position = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#if NETFX_CORE
|
||||
|
||||
public async override System.Threading.Tasks.Task<int> ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
|
||||
{
|
||||
int result = 0;
|
||||
int buffPostion = offset;
|
||||
|
||||
while (count > 0)
|
||||
{
|
||||
int bytesRead = await _currentStream.ReadAsync(buffer, buffPostion, count, cancellationToken);
|
||||
result += bytesRead;
|
||||
buffPostion += bytesRead;
|
||||
_postion += bytesRead;
|
||||
|
||||
if (bytesRead <= count)
|
||||
count -= bytesRead;
|
||||
|
||||
if (count > 0)
|
||||
{
|
||||
if (_currentStreamIndex >= _streams.Count)
|
||||
break;
|
||||
|
||||
_currentStream = _streams[_currentStreamIndex++];
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
public override System.Threading.Tasks.Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
|
||||
{
|
||||
throw new InvalidOperationException("Stream is not writable");
|
||||
}
|
||||
|
||||
#else
|
||||
public override IAsyncResult BeginRead(byte[] buffer, int offset, int count, AsyncCallback callback, object state)
|
||||
{
|
||||
var asyncResult = new CombinationStreamAsyncResult(state);
|
||||
if (count > 0)
|
||||
{
|
||||
int buffPostion = offset;
|
||||
|
||||
AsyncCallback rc = null;
|
||||
rc = readresult =>
|
||||
{
|
||||
try
|
||||
{
|
||||
int bytesRead = _currentStream.EndRead(readresult);
|
||||
asyncResult.BytesRead += bytesRead;
|
||||
buffPostion += bytesRead;
|
||||
_postion += bytesRead;
|
||||
|
||||
if (bytesRead <= count)
|
||||
count -= bytesRead;
|
||||
|
||||
if (count > 0)
|
||||
{
|
||||
if (_currentStreamIndex >= _streams.Count)
|
||||
{
|
||||
// done
|
||||
asyncResult.CompletedSynchronously = false;
|
||||
asyncResult.SetAsyncWaitHandle();
|
||||
asyncResult.IsCompleted = true;
|
||||
callback(asyncResult);
|
||||
}
|
||||
else
|
||||
{
|
||||
_currentStream = _streams[_currentStreamIndex++];
|
||||
_currentStream.BeginRead(buffer, buffPostion, count, rc, readresult.AsyncState);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// done
|
||||
asyncResult.CompletedSynchronously = false;
|
||||
asyncResult.SetAsyncWaitHandle();
|
||||
asyncResult.IsCompleted = true;
|
||||
callback(asyncResult);
|
||||
}
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
// done
|
||||
asyncResult.Exception = ex;
|
||||
asyncResult.CompletedSynchronously = false;
|
||||
asyncResult.SetAsyncWaitHandle();
|
||||
asyncResult.IsCompleted = true;
|
||||
callback(asyncResult);
|
||||
}
|
||||
};
|
||||
_currentStream.BeginRead(buffer, buffPostion, count, rc, state);
|
||||
}
|
||||
else
|
||||
{
|
||||
// done
|
||||
asyncResult.CompletedSynchronously = true;
|
||||
asyncResult.SetAsyncWaitHandle();
|
||||
asyncResult.IsCompleted = true;
|
||||
callback(asyncResult);
|
||||
}
|
||||
|
||||
return asyncResult;
|
||||
}
|
||||
|
||||
public override int EndRead(IAsyncResult asyncResult)
|
||||
{
|
||||
// todo: check if it is of same reference
|
||||
asyncResult.AsyncWaitHandle.WaitOne();
|
||||
var ar = (CombinationStreamAsyncResult)asyncResult;
|
||||
if (ar.Exception != null)
|
||||
{
|
||||
throw ar.Exception;
|
||||
}
|
||||
|
||||
return ar.BytesRead;
|
||||
}
|
||||
|
||||
public override IAsyncResult BeginWrite(byte[] buffer, int offset, int count, AsyncCallback callback, object state)
|
||||
{
|
||||
throw new InvalidOperationException("Stream is not writable");
|
||||
}
|
||||
|
||||
internal class CombinationStreamAsyncResult : IAsyncResult
|
||||
{
|
||||
private readonly object _asyncState;
|
||||
|
||||
public CombinationStreamAsyncResult(object asyncState)
|
||||
{
|
||||
_asyncState = asyncState;
|
||||
_manualResetEvent = new ManualResetEvent(false);
|
||||
}
|
||||
|
||||
public bool IsCompleted { get; internal set; }
|
||||
|
||||
public WaitHandle AsyncWaitHandle
|
||||
{
|
||||
get { return _manualResetEvent; }
|
||||
}
|
||||
|
||||
public object AsyncState
|
||||
{
|
||||
get { return _asyncState; }
|
||||
}
|
||||
|
||||
public bool CompletedSynchronously { get; internal set; }
|
||||
|
||||
public Exception Exception { get; internal set; }
|
||||
|
||||
internal void SetAsyncWaitHandle()
|
||||
{
|
||||
_manualResetEvent.Set();
|
||||
}
|
||||
|
||||
private readonly ManualResetEvent _manualResetEvent;
|
||||
public int BytesRead;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
throw new InvalidOperationException("Stream is not writable");
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
base.Dispose(disposing);
|
||||
if (_streamsToDispose == null)
|
||||
{
|
||||
foreach (Stream stream in InternalStreams)
|
||||
stream.Dispose();
|
||||
}
|
||||
else
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < InternalStreams.Count; i++)
|
||||
InternalStreams[i].Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
public override bool CanRead
|
||||
{
|
||||
get { return true; }
|
||||
}
|
||||
|
||||
public override bool CanSeek
|
||||
{
|
||||
get { return true; }
|
||||
}
|
||||
|
||||
public override bool CanWrite
|
||||
{
|
||||
get { return false; }
|
||||
}
|
||||
|
||||
public override long Length
|
||||
{
|
||||
get
|
||||
{
|
||||
if (_length == -1)
|
||||
{
|
||||
_length = 0;
|
||||
foreach (Stream stream in _streams)
|
||||
_length += stream.Length;
|
||||
}
|
||||
|
||||
return _length;
|
||||
}
|
||||
}
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => _postion;
|
||||
set => Seek(value, SeekOrigin.Begin);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,172 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.Streams
|
||||
{
|
||||
public class RandomAccessSectorStream : Stream
|
||||
{
|
||||
private readonly byte[] _buffer;
|
||||
private readonly int _bufferSize;
|
||||
private readonly SectorStream _baseStream;
|
||||
private readonly bool _keepOpen;
|
||||
private int _readBytes; // Number of bytes read into buffer
|
||||
private int _bufferPos;
|
||||
private long _currentSector;
|
||||
private bool _bufferDirty;
|
||||
|
||||
public RandomAccessSectorStream(SectorStream baseStream)
|
||||
: this(baseStream, true) { }
|
||||
|
||||
public RandomAccessSectorStream(SectorStream baseStream, bool keepOpen)
|
||||
{
|
||||
_baseStream = baseStream;
|
||||
_keepOpen = keepOpen;
|
||||
_bufferSize = baseStream.SectorSize * baseStream.MaxSectors;
|
||||
_buffer = new byte[_bufferSize];
|
||||
}
|
||||
|
||||
private void FillBuffer(long sectorNum)
|
||||
{
|
||||
WriteSectorIfDirty();
|
||||
|
||||
_currentSector = sectorNum;
|
||||
long startPos = sectorNum * _bufferSize;
|
||||
if (_baseStream.Position != startPos)
|
||||
{
|
||||
_baseStream.Position = startPos;
|
||||
}
|
||||
|
||||
_readBytes = _baseStream.Read(_buffer, 0, _bufferSize);
|
||||
}
|
||||
|
||||
private void WriteSectorIfDirty()
|
||||
{
|
||||
if (_readBytes == 0 || !_bufferDirty) return;
|
||||
|
||||
_baseStream.Position = _currentSector * _bufferSize;
|
||||
_baseStream.Write(_buffer, 0, _bufferSize);
|
||||
|
||||
_bufferPos = 0;
|
||||
_readBytes = 0;
|
||||
_bufferDirty = false;
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
WriteSectorIfDirty();
|
||||
_baseStream.Flush();
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
long remaining = Math.Min(count, Length - Position);
|
||||
if (remaining <= 0) return 0;
|
||||
|
||||
if (_readBytes == 0) FillBuffer(Position / _bufferSize);
|
||||
int outOffset = offset;
|
||||
int totalBytesRead = 0;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
int bytesToRead = (int)Math.Min(remaining, _bufferSize - _bufferPos);
|
||||
|
||||
Buffer.BlockCopy(_buffer, _bufferPos, buffer, outOffset, bytesToRead);
|
||||
|
||||
outOffset += bytesToRead;
|
||||
_bufferPos += bytesToRead;
|
||||
totalBytesRead += bytesToRead;
|
||||
remaining -= bytesToRead;
|
||||
|
||||
if (_bufferPos == _bufferSize)
|
||||
{
|
||||
FillBuffer(_currentSector + 1);
|
||||
_bufferPos = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return totalBytesRead;
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
switch (origin)
|
||||
{
|
||||
case SeekOrigin.Begin:
|
||||
Position = offset;
|
||||
break;
|
||||
case SeekOrigin.Current:
|
||||
Position += offset;
|
||||
break;
|
||||
case SeekOrigin.End:
|
||||
Position = Length - offset;
|
||||
break;
|
||||
}
|
||||
|
||||
return Position;
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
int remaining = count;
|
||||
int outOffset = offset;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
if (_readBytes == 0) FillBuffer(Position / _bufferSize);
|
||||
|
||||
int bytesToWrite = Math.Min(remaining, _readBytes - _bufferPos);
|
||||
|
||||
Buffer.BlockCopy(buffer, outOffset, _buffer, _bufferPos, bytesToWrite);
|
||||
|
||||
outOffset += bytesToWrite;
|
||||
_bufferPos += bytesToWrite;
|
||||
remaining -= bytesToWrite;
|
||||
_bufferDirty = true;
|
||||
|
||||
if (_bufferPos == _bufferSize)
|
||||
{
|
||||
WriteSectorIfDirty();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public override bool CanRead => _baseStream.CanRead;
|
||||
public override bool CanSeek => _baseStream.CanSeek;
|
||||
public override bool CanWrite => _baseStream.CanWrite;
|
||||
public override long Length => _baseStream.Length;
|
||||
public override long Position
|
||||
{
|
||||
get => _baseStream.Position - _readBytes + _bufferPos;
|
||||
set
|
||||
{
|
||||
if (value < 0)
|
||||
throw new ArgumentOutOfRangeException(nameof(value));
|
||||
|
||||
long sectorNum = value / _bufferSize;
|
||||
|
||||
if (sectorNum != _currentSector)
|
||||
{
|
||||
FillBuffer(sectorNum);
|
||||
}
|
||||
|
||||
_bufferPos = (int)(value % _bufferSize);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
Flush();
|
||||
|
||||
base.Dispose(disposing);
|
||||
|
||||
if (!_keepOpen)
|
||||
_baseStream.Dispose();
|
||||
}
|
||||
}
|
||||
}
|
@ -1,168 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.Streams
|
||||
{
|
||||
public class SectorStream : Stream
|
||||
{
|
||||
private readonly Stream _baseStream;
|
||||
private readonly long _offset;
|
||||
private readonly int _maxBufferSize;
|
||||
private readonly bool _keepOpen;
|
||||
|
||||
/// <summary>
|
||||
/// The size of the sectors.
|
||||
/// </summary>
|
||||
public int SectorSize { get; }
|
||||
|
||||
/// <summary>
|
||||
/// The number of sectors in the stream.
|
||||
/// </summary>
|
||||
public int SectorCount { get; }
|
||||
|
||||
/// <summary>
|
||||
/// The maximum number of sectors that can be read or written in a single operation.
|
||||
/// </summary>
|
||||
public int MaxSectors { get; }
|
||||
|
||||
/// <summary>
|
||||
/// The current sector this stream is at
|
||||
/// </summary>
|
||||
protected long CurrentSector { get; private set; }
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new stream
|
||||
/// </summary>
|
||||
/// <param name="baseStream">The base stream to read/write from</param>
|
||||
/// <param name="sectorSize">The size of the sectors to read/write</param>
|
||||
public SectorStream(Stream baseStream, int sectorSize)
|
||||
: this(baseStream, sectorSize, 1, 0)
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new stream
|
||||
/// </summary>
|
||||
/// <param name="baseStream">The base stream to read/write from</param>
|
||||
/// <param name="sectorSize">The size of the sectors to read/write</param>
|
||||
/// <param name="maxSectors">The maximum number of sectors to read/write at once</param>
|
||||
/// <param name="offset">Offset to start counting sectors</param>
|
||||
public SectorStream(Stream baseStream, int sectorSize, int maxSectors, long offset)
|
||||
: this(baseStream, sectorSize, maxSectors, offset, false)
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new stream
|
||||
/// </summary>
|
||||
/// <param name="baseStream">The base stream to read/write from</param>
|
||||
/// <param name="sectorSize">The size of the sectors to read/write</param>
|
||||
/// <param name="maxSectors">The maximum number of sectors to read/write at once</param>
|
||||
/// <param name="offset">Offset to start counting sectors</param>
|
||||
/// <param name="keepOpen">Should this stream leave the base stream open when disposed?</param>
|
||||
public SectorStream(Stream baseStream, int sectorSize, int maxSectors, long offset, bool keepOpen)
|
||||
{
|
||||
SectorSize = sectorSize;
|
||||
_baseStream = baseStream;
|
||||
MaxSectors = maxSectors;
|
||||
_offset = offset;
|
||||
_keepOpen = keepOpen;
|
||||
_maxBufferSize = MaxSectors * SectorSize;
|
||||
baseStream.Position = offset;
|
||||
|
||||
SectorCount = (int)Util.DivideByRoundUp(_baseStream.Length - _offset, sectorSize);
|
||||
}
|
||||
|
||||
public override void Flush()
|
||||
{
|
||||
_baseStream.Flush();
|
||||
}
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
ValidateSize(count);
|
||||
int bytesRead = _baseStream.Read(buffer, offset, count);
|
||||
CurrentSector += bytesRead / SectorSize;
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
switch (origin)
|
||||
{
|
||||
case SeekOrigin.Begin:
|
||||
Position = offset;
|
||||
break;
|
||||
case SeekOrigin.Current:
|
||||
Position += offset;
|
||||
break;
|
||||
case SeekOrigin.End:
|
||||
Position = Length - offset;
|
||||
break;
|
||||
}
|
||||
|
||||
return Position;
|
||||
}
|
||||
|
||||
public override void SetLength(long value)
|
||||
{
|
||||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
ValidateSize(count);
|
||||
int toWrite = (int)Math.Min(count, Length - Position);
|
||||
|
||||
_baseStream.Write(buffer, offset, toWrite);
|
||||
CurrentSector += count / SectorSize;
|
||||
}
|
||||
|
||||
public override bool CanRead => _baseStream.CanRead;
|
||||
public override bool CanSeek => _baseStream.CanSeek;
|
||||
public override bool CanWrite => _baseStream.CanWrite;
|
||||
public override long Length => _baseStream.Length - _offset;
|
||||
public override long Position
|
||||
{
|
||||
get => _baseStream.Position - _offset;
|
||||
set
|
||||
{
|
||||
ValidateSizeMultiple(value);
|
||||
_baseStream.Position = value + _offset;
|
||||
CurrentSector = value / SectorSize;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Validates that the size is a multiple of the sector size and smaller than the max buffer size
|
||||
/// </summary>
|
||||
protected void ValidateSize(long value)
|
||||
{
|
||||
ValidateSizeMultiple(value);
|
||||
|
||||
if (value > _maxBufferSize)
|
||||
throw new ArgumentException($"Value cannot be greater than {_maxBufferSize}");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Validates that the size is a multiple of the sector size
|
||||
/// </summary>
|
||||
protected void ValidateSizeMultiple(long value)
|
||||
{
|
||||
if (value < 0)
|
||||
throw new ArgumentException("Value must be non-negative");
|
||||
if (value % SectorSize != 0)
|
||||
throw new ArgumentException($"Value must be a multiple of {SectorSize}");
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
if (!_keepOpen)
|
||||
{
|
||||
_baseStream.Dispose();
|
||||
}
|
||||
|
||||
base.Dispose(disposing);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,75 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.Streams
|
||||
{
|
||||
public class SharedStream : Stream
|
||||
{
|
||||
private readonly SharedStreamSource _stream;
|
||||
private readonly long _offset;
|
||||
private long _position;
|
||||
|
||||
public SharedStream(SharedStreamSource source, long offset, long length)
|
||||
{
|
||||
_stream = source;
|
||||
_offset = offset;
|
||||
Length = length;
|
||||
}
|
||||
|
||||
public override void Flush() => _stream.Flush();
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
long remaining = Length - Position;
|
||||
if (remaining <= 0) return 0;
|
||||
if (remaining < count) count = (int)remaining;
|
||||
|
||||
int bytesRead = _stream.Read(_offset + _position, buffer, offset, count);
|
||||
_position += bytesRead;
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
switch (origin)
|
||||
{
|
||||
case SeekOrigin.Begin:
|
||||
Position = offset;
|
||||
break;
|
||||
case SeekOrigin.Current:
|
||||
Position += offset;
|
||||
break;
|
||||
case SeekOrigin.End:
|
||||
Position = Length - offset;
|
||||
break;
|
||||
}
|
||||
|
||||
return Position;
|
||||
}
|
||||
|
||||
public override void SetLength(long value) => throw new NotImplementedException();
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
_stream.Write(_offset + _position, buffer, offset, count);
|
||||
_position += count;
|
||||
}
|
||||
|
||||
public override bool CanRead => _stream.CanRead;
|
||||
public override bool CanSeek => _stream.CanSeek;
|
||||
public override bool CanWrite => _stream.CanWrite;
|
||||
public override long Length { get; }
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => _position;
|
||||
set
|
||||
{
|
||||
if (value < 0)
|
||||
throw new ArgumentOutOfRangeException(nameof(value));
|
||||
|
||||
_position = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,82 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.Streams
|
||||
{
|
||||
public class SharedStreamSource : IDisposable
|
||||
{
|
||||
private Stream BaseStream { get; }
|
||||
private object Locker { get; } = new object();
|
||||
private bool KeepOpen { get; }
|
||||
|
||||
public SharedStreamSource(Stream baseStream) : this(baseStream, true) { }
|
||||
|
||||
public SharedStreamSource(Stream baseStream, bool keepOpen)
|
||||
{
|
||||
BaseStream = baseStream;
|
||||
KeepOpen = keepOpen;
|
||||
}
|
||||
|
||||
public SharedStream CreateStream()
|
||||
{
|
||||
return CreateStream(0);
|
||||
}
|
||||
|
||||
public SharedStream CreateStream(long offset)
|
||||
{
|
||||
return CreateStream(offset, BaseStream.Length - offset);
|
||||
}
|
||||
|
||||
public SharedStream CreateStream(long offset, long length)
|
||||
{
|
||||
return new SharedStream(this, offset, length);
|
||||
}
|
||||
|
||||
public void Flush()
|
||||
{
|
||||
lock (Locker)
|
||||
{
|
||||
BaseStream.Flush();
|
||||
}
|
||||
}
|
||||
|
||||
public int Read(long readOffset, byte[] buffer, int bufferOffset, int count)
|
||||
{
|
||||
lock (Locker)
|
||||
{
|
||||
if (BaseStream.Position != readOffset)
|
||||
{
|
||||
BaseStream.Position = readOffset;
|
||||
}
|
||||
|
||||
return BaseStream.Read(buffer, bufferOffset, count);
|
||||
}
|
||||
}
|
||||
|
||||
public void Write(long writeOffset, byte[] buffer, int bufferOffset, int count)
|
||||
{
|
||||
lock (Locker)
|
||||
{
|
||||
if (BaseStream.Position != writeOffset)
|
||||
{
|
||||
BaseStream.Position = writeOffset;
|
||||
}
|
||||
|
||||
BaseStream.Write(buffer, bufferOffset, count);
|
||||
}
|
||||
}
|
||||
|
||||
public bool CanRead => BaseStream.CanRead;
|
||||
public bool CanSeek => BaseStream.CanSeek;
|
||||
public bool CanWrite => BaseStream.CanWrite;
|
||||
public long Length => BaseStream.Length;
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (KeepOpen)
|
||||
{
|
||||
BaseStream?.Dispose();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,82 +0,0 @@
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
namespace LibHac.Streams
|
||||
{
|
||||
public class SubStream : Stream
|
||||
{
|
||||
private Stream BaseStream { get; }
|
||||
private long Offset { get; }
|
||||
|
||||
public SubStream(Stream baseStream, long offset, long length)
|
||||
{
|
||||
if (baseStream == null) throw new ArgumentNullException(nameof(baseStream));
|
||||
if (offset < 0) throw new ArgumentOutOfRangeException(nameof(offset));
|
||||
if (!baseStream.CanSeek || !baseStream.CanRead) throw new NotSupportedException();
|
||||
|
||||
BaseStream = baseStream;
|
||||
Length = length;
|
||||
Offset = offset;
|
||||
|
||||
baseStream.Seek(offset, SeekOrigin.Begin);
|
||||
}
|
||||
|
||||
public SubStream(Stream baseStream, long offset)
|
||||
: this(baseStream, offset, baseStream.Length - offset) { }
|
||||
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
long remaining = Length - Position;
|
||||
if (remaining <= 0) return 0;
|
||||
if (remaining < count) count = (int)remaining;
|
||||
return BaseStream.Read(buffer, offset, count);
|
||||
}
|
||||
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
long remaining = Math.Min(Length - Position, count);
|
||||
if (remaining <= 0) return;
|
||||
|
||||
BaseStream.Write(buffer, offset, (int)remaining);
|
||||
}
|
||||
|
||||
public override long Length { get; }
|
||||
public override bool CanRead => BaseStream.CanRead;
|
||||
public override bool CanWrite => BaseStream.CanWrite;
|
||||
public override bool CanSeek => BaseStream.CanSeek;
|
||||
|
||||
public override long Position
|
||||
{
|
||||
get => BaseStream.Position - Offset;
|
||||
set
|
||||
{
|
||||
if (value < 0)
|
||||
throw new ArgumentOutOfRangeException(nameof(value));
|
||||
|
||||
BaseStream.Position = Offset + value;
|
||||
}
|
||||
}
|
||||
|
||||
public override long Seek(long offset, SeekOrigin origin)
|
||||
{
|
||||
switch (origin)
|
||||
{
|
||||
case SeekOrigin.Begin:
|
||||
Position = offset;
|
||||
break;
|
||||
case SeekOrigin.Current:
|
||||
Position += offset;
|
||||
break;
|
||||
case SeekOrigin.End:
|
||||
Position = Length - offset;
|
||||
break;
|
||||
}
|
||||
|
||||
return Position;
|
||||
}
|
||||
|
||||
public override void SetLength(long value) => throw new NotSupportedException();
|
||||
|
||||
public override void Flush() => BaseStream.Flush();
|
||||
}
|
||||
}
|
@ -4,8 +4,8 @@ using System.Diagnostics;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using LibHac.Save;
|
||||
using LibHac.Streams;
|
||||
using LibHac.IO;
|
||||
using LibHac.IO.Save;
|
||||
|
||||
namespace LibHac
|
||||
{
|
||||
@ -66,25 +66,25 @@ namespace LibHac
|
||||
try
|
||||
{
|
||||
bool isNax0;
|
||||
Stream stream = OpenSplitNcaStream(Fs, file);
|
||||
if (stream == null) continue;
|
||||
IStorage storage = OpenSplitNcaStream(Fs, file);
|
||||
if (storage == null) continue;
|
||||
|
||||
using (var reader = new BinaryReader(stream, Encoding.Default, true))
|
||||
using (var reader = new BinaryReader(storage.AsStream(), Encoding.Default, true))
|
||||
{
|
||||
stream.Position = 0x20;
|
||||
reader.BaseStream.Position = 0x20;
|
||||
isNax0 = reader.ReadUInt32() == 0x3058414E; // NAX0
|
||||
stream.Position = 0;
|
||||
reader.BaseStream.Position = 0;
|
||||
}
|
||||
|
||||
if (isNax0)
|
||||
{
|
||||
string sdPath = "/" + Util.GetRelativePath(file, ContentsDir).Replace('\\', '/');
|
||||
var nax0 = new Nax0(Keyset, stream, sdPath, false);
|
||||
nca = new Nca(Keyset, nax0.Stream, false);
|
||||
var nax0 = new Nax0(Keyset, storage, sdPath, false);
|
||||
nca = new Nca(Keyset, nax0.BaseStorage, false);
|
||||
}
|
||||
else
|
||||
{
|
||||
nca = new Nca(Keyset, stream, false);
|
||||
nca = new Nca(Keyset, storage, false);
|
||||
}
|
||||
|
||||
nca.NcaId = Path.GetFileNameWithoutExtension(file);
|
||||
@ -123,11 +123,11 @@ namespace LibHac
|
||||
|
||||
try
|
||||
{
|
||||
Stream stream = Fs.OpenFile(file, FileMode.Open);
|
||||
IStorage storage = Fs.OpenFile(file, FileMode.Open).AsStorage();
|
||||
|
||||
string sdPath = "/" + Util.GetRelativePath(file, SaveDir).Replace('\\', '/');
|
||||
var nax0 = new Nax0(Keyset, stream, sdPath, false);
|
||||
save = new Savefile(Keyset, nax0.Stream, IntegrityCheckLevel.None);
|
||||
var nax0 = new Nax0(Keyset, storage, sdPath, false);
|
||||
save = new Savefile(Keyset, nax0.BaseStorage, IntegrityCheckLevel.None, true);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
@ -148,11 +148,11 @@ namespace LibHac
|
||||
var title = new Title();
|
||||
|
||||
// Meta contents always have 1 Partition FS section with 1 file in it
|
||||
Stream sect = nca.OpenSection(0, false, IntegrityCheckLevel.ErrorOnInvalid);
|
||||
IStorage sect = nca.OpenSection(0, false, IntegrityCheckLevel.ErrorOnInvalid, true);
|
||||
var pfs0 = new Pfs(sect);
|
||||
Stream file = pfs0.OpenFile(pfs0.Files[0]);
|
||||
IStorage file = pfs0.OpenFile(pfs0.Files[0]);
|
||||
|
||||
var metadata = new Cnmt(file);
|
||||
var metadata = new Cnmt(file.AsStream());
|
||||
title.Id = metadata.TitleId;
|
||||
title.Version = metadata.TitleVersion;
|
||||
title.Metadata = metadata;
|
||||
@ -188,10 +188,10 @@ namespace LibHac
|
||||
{
|
||||
foreach (Title title in Titles.Values.Where(x => x.ControlNca != null))
|
||||
{
|
||||
var romfs = new Romfs(title.ControlNca.OpenSection(0, false, IntegrityCheckLevel.ErrorOnInvalid));
|
||||
Stream control = romfs.OpenFile("/control.nacp");
|
||||
var romfs = new Romfs(title.ControlNca.OpenSection(0, false, IntegrityCheckLevel.ErrorOnInvalid, true));
|
||||
IStorage control = romfs.OpenFile("/control.nacp");
|
||||
|
||||
title.Control = new Nacp(control);
|
||||
title.Control = new Nacp(control.AsStream());
|
||||
|
||||
foreach (NacpDescription desc in title.Control.Descriptions)
|
||||
{
|
||||
@ -232,10 +232,10 @@ namespace LibHac
|
||||
}
|
||||
}
|
||||
|
||||
internal static Stream OpenSplitNcaStream(IFileSystem fs, string path)
|
||||
internal static IStorage OpenSplitNcaStream(IFileSystem fs, string path)
|
||||
{
|
||||
var files = new List<string>();
|
||||
var streams = new List<Stream>();
|
||||
var storages = new List<IStorage>();
|
||||
|
||||
if (fs.DirectoryExists(path))
|
||||
{
|
||||
@ -251,7 +251,7 @@ namespace LibHac
|
||||
{
|
||||
if (Path.GetFileName(path) != "00")
|
||||
{
|
||||
return fs.OpenFile(path, FileMode.Open, FileAccess.Read);
|
||||
return fs.OpenFile(path, FileMode.Open, FileAccess.Read).AsStorage();
|
||||
}
|
||||
files.Add(path);
|
||||
}
|
||||
@ -260,15 +260,19 @@ namespace LibHac
|
||||
throw new FileNotFoundException("Could not find the input file or directory");
|
||||
}
|
||||
|
||||
foreach (string file in files)
|
||||
if (files.Count == 1)
|
||||
{
|
||||
streams.Add(fs.OpenFile(file, FileMode.Open, FileAccess.Read));
|
||||
return fs.OpenFile(files[0], FileMode.Open, FileAccess.Read).AsStorage();
|
||||
}
|
||||
|
||||
if (streams.Count == 0) return null;
|
||||
foreach (string file in files)
|
||||
{
|
||||
storages.Add(fs.OpenFile(file, FileMode.Open, FileAccess.Read).AsStorage());
|
||||
}
|
||||
|
||||
var stream = new CombinationStream(streams);
|
||||
return stream;
|
||||
if (storages.Count == 0) return null; //todo
|
||||
|
||||
return new ConcatenationStorage(storages, true);
|
||||
}
|
||||
|
||||
private void DisposeNcas()
|
||||
|
@ -1,6 +1,8 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Numerics;
|
||||
using System.Runtime.InteropServices;
|
||||
using System.Text;
|
||||
|
||||
namespace LibHac
|
||||
@ -46,13 +48,14 @@ namespace LibHac
|
||||
return true;
|
||||
}
|
||||
|
||||
public static bool IsEmpty(this byte[] array)
|
||||
public static bool SpansEqual<T>(Span<T> a1, Span<T> a2)
|
||||
{
|
||||
if (array == null) throw new ArgumentNullException(nameof(array));
|
||||
if (a1 == a2) return true;
|
||||
if (a1.Length != a2.Length) return false;
|
||||
|
||||
for (int i = 0; i < array.Length; i++)
|
||||
for (int i = 0; i < a1.Length; i++)
|
||||
{
|
||||
if (array[i] != 0)
|
||||
if (!a1[i].Equals(a2[i]))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@ -61,6 +64,44 @@ namespace LibHac
|
||||
return true;
|
||||
}
|
||||
|
||||
public static bool IsEmpty(this byte[] array) => ((ReadOnlySpan<byte>)array).IsEmpty();
|
||||
|
||||
public static bool IsEmpty(this ReadOnlySpan<byte> span)
|
||||
{
|
||||
if (span == null) throw new ArgumentNullException(nameof(span));
|
||||
|
||||
for (int i = 0; i < span.Length; i++)
|
||||
{
|
||||
if (span[i] != 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
public static void XorArrays(Span<byte> transformData, Span<byte> xorData)
|
||||
{
|
||||
int sisdStart = 0;
|
||||
if (Vector.IsHardwareAccelerated)
|
||||
{
|
||||
Span<Vector<byte>> dataVec = MemoryMarshal.Cast<byte, Vector<byte>>(transformData);
|
||||
Span<Vector<byte>> xorVec = MemoryMarshal.Cast<byte, Vector<byte>>(xorData);
|
||||
sisdStart = dataVec.Length * Vector<byte>.Count;
|
||||
|
||||
for (int i = 0; i < dataVec.Length; i++)
|
||||
{
|
||||
dataVec[i] ^= xorVec[i];
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = sisdStart; i < transformData.Length; i++)
|
||||
{
|
||||
transformData[i] ^= xorData[i];
|
||||
}
|
||||
}
|
||||
|
||||
public static void CopyStream(this Stream input, Stream output, long length, IProgressReport progress = null)
|
||||
{
|
||||
const int bufferSize = 0x8000;
|
||||
|
@ -1,117 +0,0 @@
|
||||
// Copyright (c) 2010 Gareth Lennox (garethl@dwakn.com)
|
||||
// All rights reserved.
|
||||
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of Gareth Lennox nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from this
|
||||
// software without specific prior written permission.
|
||||
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
using System;
|
||||
using System.Security.Cryptography;
|
||||
|
||||
namespace LibHac.XTSSharp
|
||||
{
|
||||
/// <summary>
|
||||
/// Xts. See <see cref="XtsAes128"/> and <see cref="XtsAes256"/>.
|
||||
/// </summary>
|
||||
public class Xts
|
||||
{
|
||||
private readonly SymmetricAlgorithm _key1;
|
||||
private readonly SymmetricAlgorithm _key2;
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new Xts implementation.
|
||||
/// </summary>
|
||||
/// <param name="create">Function to create the implementations</param>
|
||||
/// <param name="key1">Key 1</param>
|
||||
/// <param name="key2">Key 2</param>
|
||||
protected Xts(Func<SymmetricAlgorithm> create, byte[] key1, byte[] key2)
|
||||
{
|
||||
if (create == null)
|
||||
throw new ArgumentNullException("create");
|
||||
if (key1 == null)
|
||||
throw new ArgumentNullException("key1");
|
||||
if (key2 == null)
|
||||
throw new ArgumentNullException("key2");
|
||||
|
||||
_key1 = create();
|
||||
_key2 = create();
|
||||
|
||||
if (key1.Length != key2.Length)
|
||||
throw new ArgumentException("Key lengths don't match");
|
||||
|
||||
//set the key sizes
|
||||
_key1.KeySize = key1.Length * 8;
|
||||
_key2.KeySize = key2.Length * 8;
|
||||
|
||||
//set the keys
|
||||
_key1.Key = key1;
|
||||
_key2.Key = key2;
|
||||
|
||||
//ecb mode
|
||||
_key1.Mode = CipherMode.ECB;
|
||||
_key2.Mode = CipherMode.ECB;
|
||||
|
||||
//no padding - we're always going to be writing full blocks
|
||||
_key1.Padding = PaddingMode.None;
|
||||
_key2.Padding = PaddingMode.None;
|
||||
|
||||
//fixed block size of 128 bits.
|
||||
_key1.BlockSize = 16 * 8;
|
||||
_key2.BlockSize = 16 * 8;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates an xts encryptor
|
||||
/// </summary>
|
||||
public XtsCryptoTransform CreateEncryptor()
|
||||
{
|
||||
return new XtsCryptoTransform(_key1.CreateEncryptor(), _key2.CreateEncryptor(), false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates an xts decryptor
|
||||
/// </summary>
|
||||
public XtsCryptoTransform CreateDecryptor()
|
||||
{
|
||||
return new XtsCryptoTransform(_key1.CreateDecryptor(), _key2.CreateEncryptor(), true);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verify that the key is of an expected size of bits
|
||||
/// </summary>
|
||||
/// <param name="expectedSize">Expected size of the key in bits</param>
|
||||
/// <param name="key">The key</param>
|
||||
/// <returns>The key</returns>
|
||||
/// <exception cref="ArgumentNullException">If the key is null</exception>
|
||||
/// <exception cref="ArgumentException">If the key length does not match the expected length</exception>
|
||||
protected static byte[] VerifyKey(int expectedSize, byte[] key)
|
||||
{
|
||||
if (key == null)
|
||||
throw new ArgumentNullException("key");
|
||||
|
||||
if (key.Length * 8 != expectedSize)
|
||||
throw new ArgumentException(string.Format("Expected key length of {0} bits, got {1}", expectedSize, key.Length * 8));
|
||||
|
||||
return key;
|
||||
}
|
||||
}
|
||||
}
|
@ -1,82 +0,0 @@
|
||||
// Copyright (c) 2010 Gareth Lennox (garethl@dwakn.com)
|
||||
// All rights reserved.
|
||||
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of Gareth Lennox nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from this
|
||||
// software without specific prior written permission.
|
||||
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
using System;
|
||||
using System.Security.Cryptography;
|
||||
|
||||
namespace LibHac.XTSSharp
|
||||
{
|
||||
/// <summary>
|
||||
/// XTS-AES-128 implementation
|
||||
/// </summary>
|
||||
public class XtsAes128 : Xts
|
||||
{
|
||||
private const int KeyLength = 128;
|
||||
private const int KeyByteLength = KeyLength / 8;
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new instance
|
||||
/// </summary>
|
||||
protected XtsAes128(Func<SymmetricAlgorithm> create, byte[] key1, byte[] key2)
|
||||
: base(create, VerifyKey(KeyLength, key1), VerifyKey(KeyLength, key2))
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new implementation
|
||||
/// </summary>
|
||||
/// <param name="key1">First key</param>
|
||||
/// <param name="key2">Second key</param>
|
||||
/// <returns>Xts implementation</returns>
|
||||
/// <remarks>Keys need to be 128 bits long (i.e. 16 bytes)</remarks>
|
||||
public static Xts Create(byte[] key1, byte[] key2)
|
||||
{
|
||||
VerifyKey(KeyLength, key1);
|
||||
VerifyKey(KeyLength, key2);
|
||||
|
||||
return new XtsAes128(Aes.Create, key1, key2);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new implementation
|
||||
/// </summary>
|
||||
/// <param name="key">Key to use</param>
|
||||
/// <returns>Xts implementation</returns>
|
||||
/// <remarks>Key need to be 256 bits long (i.e. 32 bytes)</remarks>
|
||||
public static Xts Create(byte[] key)
|
||||
{
|
||||
VerifyKey(KeyLength * 2, key);
|
||||
|
||||
var key1 = new byte[KeyByteLength];
|
||||
var key2 = new byte[KeyByteLength];
|
||||
|
||||
Buffer.BlockCopy(key, 0, key1, 0, KeyByteLength);
|
||||
Buffer.BlockCopy(key, KeyByteLength, key2, 0, KeyByteLength);
|
||||
|
||||
return new XtsAes128(Aes.Create, key1, key2);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,82 +0,0 @@
|
||||
// Copyright (c) 2010 Gareth Lennox (garethl@dwakn.com)
|
||||
// All rights reserved.
|
||||
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of Gareth Lennox nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from this
|
||||
// software without specific prior written permission.
|
||||
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
using System;
|
||||
using System.Security.Cryptography;
|
||||
|
||||
namespace LibHac.XTSSharp
|
||||
{
|
||||
/// <summary>
|
||||
/// XTS-AES-256 implementation
|
||||
/// </summary>
|
||||
public class XtsAes256 : Xts
|
||||
{
|
||||
private const int KeyLength = 256;
|
||||
private const int KeyByteLength = KeyLength / 8;
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new instance
|
||||
/// </summary>
|
||||
protected XtsAes256(Func<SymmetricAlgorithm> create, byte[] key1, byte[] key2)
|
||||
: base(create, VerifyKey(KeyLength, key1), VerifyKey(KeyLength, key2))
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new implementation
|
||||
/// </summary>
|
||||
/// <param name="key1">First key</param>
|
||||
/// <param name="key2">Second key</param>
|
||||
/// <returns>Xts implementation</returns>
|
||||
/// <remarks>Keys need to be 256 bits long (i.e. 32 bytes)</remarks>
|
||||
public static Xts Create(byte[] key1, byte[] key2)
|
||||
{
|
||||
VerifyKey(KeyLength, key1);
|
||||
VerifyKey(KeyLength, key2);
|
||||
|
||||
return new XtsAes256(Aes.Create, key1, key2);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new implementation
|
||||
/// </summary>
|
||||
/// <param name="key">Key to use</param>
|
||||
/// <returns>Xts implementation</returns>
|
||||
/// <remarks>Keys need to be 512 bits long (i.e. 64 bytes)</remarks>
|
||||
public static Xts Create(byte[] key)
|
||||
{
|
||||
VerifyKey(KeyLength * 2, key);
|
||||
|
||||
var key1 = new byte[KeyByteLength];
|
||||
var key2 = new byte[KeyByteLength];
|
||||
|
||||
Buffer.BlockCopy(key, 0, key1, 0, KeyByteLength);
|
||||
Buffer.BlockCopy(key, KeyByteLength, key2, 0, KeyByteLength);
|
||||
|
||||
return new XtsAes256(Aes.Create, key1, key2);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,231 +0,0 @@
|
||||
// Copyright (c) 2010 Gareth Lennox (garethl@dwakn.com)
|
||||
// All rights reserved.
|
||||
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of Gareth Lennox nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from this
|
||||
// software without specific prior written permission.
|
||||
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
using System;
|
||||
using System.Security.Cryptography;
|
||||
|
||||
namespace LibHac.XTSSharp
|
||||
{
|
||||
/// <summary>
|
||||
/// The actual Xts cryptography transform
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// The reason that it doesn't implement ICryptoTransform, as the interface is different.
|
||||
///
|
||||
/// Most of the logic was taken from the LibTomCrypt project - http://libtom.org and
|
||||
/// converted to C#
|
||||
/// </remarks>
|
||||
public class XtsCryptoTransform : IDisposable
|
||||
{
|
||||
private readonly byte[] _cc = new byte[16];
|
||||
private readonly bool _decrypting;
|
||||
private readonly ICryptoTransform _key1;
|
||||
private readonly ICryptoTransform _key2;
|
||||
|
||||
private readonly byte[] _pp = new byte[16];
|
||||
private readonly byte[] _t = new byte[16];
|
||||
private readonly byte[] _tweak = new byte[16];
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new transform
|
||||
/// </summary>
|
||||
/// <param name="key1">Transform 1</param>
|
||||
/// <param name="key2">Transform 2</param>
|
||||
/// <param name="decrypting">Is this a decryption transform?</param>
|
||||
public XtsCryptoTransform(ICryptoTransform key1, ICryptoTransform key2, bool decrypting)
|
||||
{
|
||||
if (key1 == null)
|
||||
throw new ArgumentNullException("key1");
|
||||
|
||||
if (key2 == null)
|
||||
throw new ArgumentNullException("key2");
|
||||
|
||||
_key1 = key1;
|
||||
_key2 = key2;
|
||||
_decrypting = decrypting;
|
||||
}
|
||||
|
||||
#region IDisposable Members
|
||||
|
||||
/// <summary>
|
||||
/// Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources.
|
||||
/// </summary>
|
||||
/// <filterpriority>2</filterpriority>
|
||||
public void Dispose()
|
||||
{
|
||||
_key1.Dispose();
|
||||
_key2.Dispose();
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
/// <summary>
|
||||
/// Transforms a single block.
|
||||
/// </summary>
|
||||
/// <param name="inputBuffer"> The input for which to compute the transform.</param>
|
||||
/// <param name="inputOffset">The offset into the input byte array from which to begin using data.</param>
|
||||
/// <param name="inputCount">The number of bytes in the input byte array to use as data.</param>
|
||||
/// <param name="outputBuffer">The output to which to write the transform.</param>
|
||||
/// <param name="outputOffset">The offset into the output byte array from which to begin writing data.</param>
|
||||
/// <param name="sector">The sector number of the block</param>
|
||||
/// <returns>The number of bytes written.</returns>
|
||||
public int TransformBlock(byte[] inputBuffer, int inputOffset, int inputCount, byte[] outputBuffer, int outputOffset, ulong sector)
|
||||
{
|
||||
//Nintendo Switch uses Little Endian
|
||||
FillArrayFromSectorLittleEndian(_tweak, sector);
|
||||
|
||||
int lim;
|
||||
|
||||
/* get number of blocks */
|
||||
int m = inputCount >> 4;
|
||||
int mo = inputCount & 15;
|
||||
|
||||
/* encrypt the tweak */
|
||||
_key2.TransformBlock(_tweak, 0, _tweak.Length, _t, 0);
|
||||
|
||||
/* for i = 0 to m-2 do */
|
||||
if (mo == 0)
|
||||
lim = m;
|
||||
else
|
||||
lim = m - 1;
|
||||
|
||||
for (int i = 0; i < lim; i++)
|
||||
{
|
||||
TweakCrypt(inputBuffer, inputOffset, outputBuffer, outputOffset, _t);
|
||||
inputOffset += 16;
|
||||
outputOffset += 16;
|
||||
}
|
||||
|
||||
/* if ptlen not divide 16 then */
|
||||
if (mo > 0)
|
||||
{
|
||||
if (_decrypting)
|
||||
{
|
||||
Buffer.BlockCopy(_t, 0, _cc, 0, 16);
|
||||
MultiplyByX(_cc);
|
||||
|
||||
/* CC = tweak encrypt block m-1 */
|
||||
TweakCrypt(inputBuffer, inputOffset, _pp, 0, _cc);
|
||||
|
||||
/* Cm = first ptlen % 16 bytes of CC */
|
||||
int i;
|
||||
for (i = 0; i < mo; i++)
|
||||
{
|
||||
_cc[i] = inputBuffer[16 + i + inputOffset];
|
||||
outputBuffer[16 + i + outputOffset] = _pp[i];
|
||||
}
|
||||
|
||||
for (; i < 16; i++)
|
||||
{
|
||||
_cc[i] = _pp[i];
|
||||
}
|
||||
|
||||
/* Cm-1 = Tweak encrypt PP */
|
||||
TweakCrypt(_cc, 0, outputBuffer, outputOffset, _t);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* CC = tweak encrypt block m-1 */
|
||||
TweakCrypt(inputBuffer, inputOffset, _cc, 0, _t);
|
||||
|
||||
/* Cm = first ptlen % 16 bytes of CC */
|
||||
int i;
|
||||
for (i = 0; i < mo; i++)
|
||||
{
|
||||
_pp[i] = inputBuffer[16 + i + inputOffset];
|
||||
outputBuffer[16 + i + outputOffset] = _cc[i];
|
||||
}
|
||||
|
||||
for (; i < 16; i++)
|
||||
{
|
||||
_pp[i] = _cc[i];
|
||||
}
|
||||
|
||||
/* Cm-1 = Tweak encrypt PP */
|
||||
TweakCrypt(_pp, 0, outputBuffer, outputOffset, _t);
|
||||
}
|
||||
}
|
||||
|
||||
return inputCount;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Fills a byte array from a sector number (little endian)
|
||||
/// </summary>
|
||||
/// <param name="value">The destination</param>
|
||||
/// <param name="sector">The sector number</param>
|
||||
private static void FillArrayFromSectorLittleEndian(byte[] value, ulong sector)
|
||||
{
|
||||
value[0x8] = (byte)((sector >> 56) & 255);
|
||||
value[0x9] = (byte)((sector >> 48) & 255);
|
||||
value[0xA] = (byte)((sector >> 40) & 255);
|
||||
value[0xB] = (byte)((sector >> 32) & 255);
|
||||
value[0xC] = (byte)((sector >> 24) & 255);
|
||||
value[0xD] = (byte)((sector >> 16) & 255);
|
||||
value[0xE] = (byte)((sector >> 8) & 255);
|
||||
value[0xF] = (byte)(sector & 255);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Performs the Xts TweakCrypt operation
|
||||
/// </summary>
|
||||
private void TweakCrypt(byte[] inputBuffer, int inputOffset, byte[] outputBuffer, int outputOffset, byte[] t)
|
||||
{
|
||||
for (int x = 0; x < 16; x++)
|
||||
{
|
||||
outputBuffer[x + outputOffset] = (byte)(inputBuffer[x + inputOffset] ^ t[x]);
|
||||
}
|
||||
|
||||
_key1.TransformBlock(outputBuffer, outputOffset, 16, outputBuffer, outputOffset);
|
||||
|
||||
for (int x = 0; x < 16; x++)
|
||||
{
|
||||
outputBuffer[x + outputOffset] = (byte)(outputBuffer[x + outputOffset] ^ t[x]);
|
||||
}
|
||||
|
||||
MultiplyByX(t);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Multiply by x
|
||||
/// </summary>
|
||||
/// <param name="i">The value to multiply by x (LFSR shift)</param>
|
||||
private static void MultiplyByX(byte[] i)
|
||||
{
|
||||
byte t = 0, tt = 0;
|
||||
|
||||
for (int x = 0; x < 16; x++)
|
||||
{
|
||||
tt = (byte)(i[x] >> 7);
|
||||
i[x] = (byte)(((i[x] << 1) | t) & 0xFF);
|
||||
t = tt;
|
||||
}
|
||||
|
||||
if (tt > 0)
|
||||
i[0] ^= 0x87;
|
||||
}
|
||||
}
|
||||
}
|
@ -1,156 +0,0 @@
|
||||
// Copyright (c) 2010 Gareth Lennox (garethl@dwakn.com)
|
||||
// All rights reserved.
|
||||
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of Gareth Lennox nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from this
|
||||
// software without specific prior written permission.
|
||||
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
using System.IO;
|
||||
using LibHac.Streams;
|
||||
|
||||
namespace LibHac.XTSSharp
|
||||
{
|
||||
/// <summary>
|
||||
/// Xts sector-based
|
||||
/// </summary>
|
||||
public class XtsSectorStream : SectorStream
|
||||
{
|
||||
/// <summary>
|
||||
/// The default sector size
|
||||
/// </summary>
|
||||
public const int DefaultSectorSize = 512;
|
||||
|
||||
private readonly byte[] _tempBuffer;
|
||||
private readonly Xts _xts;
|
||||
private XtsCryptoTransform _decryptor;
|
||||
private XtsCryptoTransform _encryptor;
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new stream with the default sector size
|
||||
/// </summary>
|
||||
/// <param name="baseStream">The base stream</param>
|
||||
/// <param name="xts">The xts transform</param>
|
||||
public XtsSectorStream(Stream baseStream, Xts xts)
|
||||
: this(baseStream, xts, DefaultSectorSize)
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new stream
|
||||
/// </summary>
|
||||
/// <param name="baseStream">The base stream</param>
|
||||
/// <param name="xts">The xts transform</param>
|
||||
/// <param name="sectorSize">Sector size</param>
|
||||
public XtsSectorStream(Stream baseStream, Xts xts, int sectorSize)
|
||||
: this(baseStream, xts, sectorSize, 0)
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new stream
|
||||
/// </summary>
|
||||
/// <param name="baseStream">The base stream</param>
|
||||
/// <param name="xts">The xts transform</param>
|
||||
/// <param name="sectorSize">Sector size</param>
|
||||
/// <param name="offset">Offset to start counting sectors</param>
|
||||
public XtsSectorStream(Stream baseStream, Xts xts, int sectorSize, long offset)
|
||||
: base(baseStream, sectorSize, 1, offset)
|
||||
{
|
||||
_xts = xts;
|
||||
_tempBuffer = new byte[sectorSize];
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Releases the unmanaged resources used by the <see cref="T:System.IO.Stream"/> and optionally releases the managed resources.
|
||||
/// </summary>
|
||||
/// <param name="disposing">true to release both managed and unmanaged resources; false to release only unmanaged resources.</param>
|
||||
protected override void Dispose(bool disposing)
|
||||
{
|
||||
base.Dispose(disposing);
|
||||
|
||||
if (_encryptor != null)
|
||||
_encryptor.Dispose();
|
||||
|
||||
if (_decryptor != null)
|
||||
_decryptor.Dispose();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Writes a sequence of bytes to the current stream and advances the current position within this stream by the number of bytes written.
|
||||
/// </summary>
|
||||
/// <param name="buffer">An array of bytes. This method copies <paramref name="count"/> bytes from <paramref name="buffer"/> to the current stream.</param>
|
||||
/// <param name="offset">The zero-based byte offset in <paramref name="buffer"/> at which to begin copying bytes to the current stream.</param>
|
||||
/// <param name="count">The number of bytes to be written to the current stream.</param>
|
||||
public override void Write(byte[] buffer, int offset, int count)
|
||||
{
|
||||
ValidateSize(count);
|
||||
|
||||
if (count == 0)
|
||||
return;
|
||||
|
||||
//get the current sector
|
||||
long currentSector = CurrentSector;
|
||||
|
||||
if (_encryptor == null)
|
||||
_encryptor = _xts.CreateEncryptor();
|
||||
|
||||
//encrypt the sector
|
||||
int transformedCount = _encryptor.TransformBlock(buffer, offset, count, _tempBuffer, 0, (ulong) currentSector);
|
||||
|
||||
//Console.WriteLine("Encrypting sector {0}", currentSector);
|
||||
|
||||
//write it to the base stream
|
||||
base.Write(_tempBuffer, 0, transformedCount);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Reads a sequence of bytes from the current stream and advances the position within the stream by the number of bytes read.
|
||||
/// </summary>
|
||||
/// <returns>The total number of bytes read into the buffer. This can be less than the number of bytes requested if that many bytes are not currently available, or zero (0) if the end of the stream has been reached.</returns>
|
||||
/// <param name="buffer">An array of bytes. When this method returns, the buffer contains the specified byte array with the values between <paramref name="offset"/> and (<paramref name="offset"/> + <paramref name="count"/> - 1) replaced by the bytes read from the current source. </param>
|
||||
/// <param name="offset">The zero-based byte offset in <paramref name="buffer"/> at which to begin storing the data read from the current stream.</param>
|
||||
/// <param name="count">The maximum number of bytes to be read from the current stream.</param>
|
||||
public override int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
ValidateSize(count);
|
||||
|
||||
//get the current sector
|
||||
long currentSector = CurrentSector;
|
||||
|
||||
//read the sector from the base stream
|
||||
int ret = base.Read(_tempBuffer, 0, count);
|
||||
|
||||
if (ret == 0)
|
||||
return 0;
|
||||
|
||||
if (_decryptor == null)
|
||||
_decryptor = _xts.CreateDecryptor();
|
||||
|
||||
//decrypt the sector
|
||||
int retV = _decryptor.TransformBlock(_tempBuffer, 0, ret, buffer, offset, (ulong) currentSector);
|
||||
|
||||
//Console.WriteLine("Decrypting sector {0} == {1} bytes", currentSector, retV);
|
||||
|
||||
return retV;
|
||||
}
|
||||
}
|
||||
}
|
@ -1,71 +0,0 @@
|
||||
// Copyright (c) 2010 Gareth Lennox (garethl@dwakn.com)
|
||||
// All rights reserved.
|
||||
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of Gareth Lennox nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from this
|
||||
// software without specific prior written permission.
|
||||
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
using System.IO;
|
||||
using LibHac.Streams;
|
||||
|
||||
namespace LibHac.XTSSharp
|
||||
{
|
||||
/// <summary>
|
||||
/// A random access, xts encrypted stream
|
||||
/// </summary>
|
||||
public class XtsStream : RandomAccessSectorStream
|
||||
{
|
||||
/// <summary>
|
||||
/// Creates a new stream
|
||||
/// </summary>
|
||||
/// <param name="baseStream">The base stream</param>
|
||||
/// <param name="xts">Xts implementation to use</param>
|
||||
public XtsStream(Stream baseStream, Xts xts)
|
||||
: this(baseStream, xts, XtsSectorStream.DefaultSectorSize)
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new stream
|
||||
/// </summary>
|
||||
/// <param name="baseStream">The base stream</param>
|
||||
/// <param name="xts">Xts implementation to use</param>
|
||||
/// <param name="sectorSize">Sector size</param>
|
||||
public XtsStream(Stream baseStream, Xts xts, int sectorSize)
|
||||
: base(new XtsSectorStream(baseStream, xts, sectorSize), false)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Creates a new stream
|
||||
/// </summary>
|
||||
/// <param name="baseStream">The base stream</param>
|
||||
/// <param name="xts">Xts implementation to use</param>
|
||||
/// <param name="sectorSize">Sector size</param>
|
||||
/// <param name="offset">Offset to start counting sectors</param>
|
||||
public XtsStream(Stream baseStream, Xts xts, int sectorSize, long offset)
|
||||
: base(new XtsSectorStream(baseStream, xts, sectorSize, offset), false)
|
||||
{
|
||||
}
|
||||
}
|
||||
}
|
@ -1,7 +1,6 @@
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using LibHac.Streams;
|
||||
using LibHac.IO;
|
||||
|
||||
namespace LibHac
|
||||
{
|
||||
@ -23,29 +22,29 @@ namespace LibHac
|
||||
|
||||
public List<XciPartition> Partitions { get; } = new List<XciPartition>();
|
||||
|
||||
public Xci(Keyset keyset, Stream stream)
|
||||
public Xci(Keyset keyset, IStorage storage)
|
||||
{
|
||||
Header = new XciHeader(keyset, stream);
|
||||
var hfs0Stream = new SubStream(stream, Header.PartitionFsHeaderAddress);
|
||||
Header = new XciHeader(keyset, storage.AsStream());
|
||||
IStorage hfs0Stream = storage.Slice(Header.PartitionFsHeaderAddress);
|
||||
|
||||
RootPartition = new XciPartition(hfs0Stream)
|
||||
{
|
||||
Name = RootPartitionName,
|
||||
Offset = Header.PartitionFsHeaderAddress,
|
||||
HashValidity = Header.PartitionFsHeaderValidity
|
||||
HashValidity = Header.PartitionFsHeaderValidity
|
||||
};
|
||||
|
||||
Partitions.Add(RootPartition);
|
||||
|
||||
foreach (PfsFileEntry file in RootPartition.Files)
|
||||
{
|
||||
Stream partitionStream = RootPartition.OpenFile(file);
|
||||
IStorage partitionStorage = RootPartition.OpenFile(file);
|
||||
|
||||
var partition = new XciPartition(partitionStream)
|
||||
var partition = new XciPartition(partitionStorage)
|
||||
{
|
||||
Name = file.Name,
|
||||
Offset = Header.PartitionFsHeaderAddress + RootPartition.HeaderSize + file.Offset,
|
||||
HashValidity = file.HashValidity
|
||||
HashValidity = file.HashValidity
|
||||
};
|
||||
|
||||
Partitions.Add(partition);
|
||||
@ -64,6 +63,6 @@ namespace LibHac
|
||||
public long Offset { get; internal set; }
|
||||
public Validity HashValidity { get; set; } = Validity.Unchecked;
|
||||
|
||||
public XciPartition(Stream stream) : base(stream) { }
|
||||
public XciPartition(IStorage storage) : base(storage) { }
|
||||
}
|
||||
}
|
||||
|
@ -68,8 +68,8 @@ namespace LibHac
|
||||
|
||||
public XciHeader(Keyset keyset, Stream stream)
|
||||
{
|
||||
|
||||
using (var reader = new BinaryReader(stream, Encoding.Default, true)) {
|
||||
using (var reader = new BinaryReader(stream, Encoding.Default, true))
|
||||
{
|
||||
|
||||
Signature = reader.ReadBytes(SignatureSize);
|
||||
Magic = reader.ReadAscii(4);
|
||||
@ -82,14 +82,7 @@ namespace LibHac
|
||||
byte[] sigData = reader.ReadBytes(SignatureSize);
|
||||
reader.BaseStream.Position = SignatureSize + 4;
|
||||
|
||||
if (Crypto.Rsa2048Pkcs1Verify(sigData, Signature, _xciHeaderPubk))
|
||||
{
|
||||
SignatureValidity = Validity.Valid;
|
||||
}
|
||||
else
|
||||
{
|
||||
SignatureValidity = Validity.Invalid;
|
||||
}
|
||||
SignatureValidity = Crypto.Rsa2048Pkcs1Verify(sigData, Signature, _xciHeaderPubk);
|
||||
|
||||
RomAreaStartPage = reader.ReadInt32();
|
||||
BackupAreaStartPage = reader.ReadInt32();
|
||||
@ -112,13 +105,15 @@ namespace LibHac
|
||||
SelKey = reader.ReadInt32();
|
||||
LimAreaPage = reader.ReadInt32();
|
||||
|
||||
if (!keyset.XciHeaderKey.IsEmpty()) {
|
||||
if (!keyset.XciHeaderKey.IsEmpty())
|
||||
{
|
||||
|
||||
byte[] encHeader = reader.ReadBytes(EncryptedHeaderSize);
|
||||
var decHeader = new byte[EncryptedHeaderSize];
|
||||
Crypto.DecryptCbc(keyset.XciHeaderKey, AesCbcIv, encHeader, decHeader, EncryptedHeaderSize);
|
||||
|
||||
using (var decreader = new BinaryReader(new MemoryStream(decHeader))) {
|
||||
using (var decreader = new BinaryReader(new MemoryStream(decHeader)))
|
||||
{
|
||||
FwVersion = decreader.ReadUInt64();
|
||||
AccCtrl1 = (CardClockRate)decreader.ReadInt32();
|
||||
Wait1TimeRead = decreader.ReadInt32();
|
||||
|
@ -3,8 +3,9 @@ using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using LibHac;
|
||||
using LibHac.IO;
|
||||
using LibHac.IO.Save;
|
||||
using LibHac.Nand;
|
||||
using LibHac.Save;
|
||||
|
||||
namespace NandReader
|
||||
{
|
||||
@ -102,9 +103,9 @@ namespace NandReader
|
||||
private static List<Ticket> ReadTickets(Keyset keyset, Stream savefile)
|
||||
{
|
||||
var tickets = new List<Ticket>();
|
||||
var save = new Savefile(keyset, savefile, IntegrityCheckLevel.None);
|
||||
var ticketList = new BinaryReader(save.OpenFile("/ticket_list.bin"));
|
||||
var ticketFile = new BinaryReader(save.OpenFile("/ticket.bin"));
|
||||
var save = new Savefile(keyset, savefile.AsStorage(), IntegrityCheckLevel.None, true);
|
||||
var ticketList = new BinaryReader(save.OpenFile("/ticket_list.bin").AsStream());
|
||||
var ticketFile = new BinaryReader(save.OpenFile("/ticket.bin").AsStream());
|
||||
|
||||
ulong titleId = ticketList.ReadUInt64();
|
||||
while (titleId != ulong.MaxValue)
|
||||
|
@ -6,9 +6,9 @@ using System.Windows.Input;
|
||||
using GalaSoft.MvvmLight;
|
||||
using GalaSoft.MvvmLight.Command;
|
||||
using LibHac;
|
||||
using LibHac.IO;
|
||||
using LibHac.IO.Save;
|
||||
using LibHac.Nand;
|
||||
using LibHac.Save;
|
||||
using LibHac.Streams;
|
||||
|
||||
namespace NandReaderGui.ViewModel
|
||||
{
|
||||
@ -32,6 +32,7 @@ namespace NandReaderGui.ViewModel
|
||||
info.PhysicalName = (string)drive.GetPropertyValue("Name");
|
||||
info.Name = (string)drive.GetPropertyValue("Caption");
|
||||
info.Model = (string)drive.GetPropertyValue("Model");
|
||||
//todo Why is Windows returning small sizes? https://stackoverflow.com/questions/15051660
|
||||
info.Length = (long)((ulong)drive.GetPropertyValue("Size"));
|
||||
info.SectorSize = (int)((uint)drive.GetPropertyValue("BytesPerSector"));
|
||||
info.DisplaySize = Util.GetBytesReadable((long)((ulong)drive.GetPropertyValue("Size")));
|
||||
@ -44,7 +45,9 @@ namespace NandReaderGui.ViewModel
|
||||
public void Open()
|
||||
{
|
||||
DiskInfo disk = SelectedDisk;
|
||||
var stream = new RandomAccessSectorStream(new SectorStream(new DeviceStream(disk.PhysicalName, disk.Length), disk.SectorSize * 100));
|
||||
var storage = new CachedStorage(new DeviceStream(disk.PhysicalName, disk.Length).AsStorage(), disk.SectorSize * 100, 4, true);
|
||||
storage.SetReadOnly();
|
||||
Stream stream = storage.AsStream();
|
||||
|
||||
Keyset keyset = OpenKeyset();
|
||||
var nand = new Nand(stream, keyset);
|
||||
@ -84,9 +87,9 @@ namespace NandReaderGui.ViewModel
|
||||
private static List<Ticket> ReadTickets(Keyset keyset, Stream savefile)
|
||||
{
|
||||
var tickets = new List<Ticket>();
|
||||
var save = new Savefile(keyset, savefile, IntegrityCheckLevel.None);
|
||||
var ticketList = new BinaryReader(save.OpenFile("/ticket_list.bin"));
|
||||
var ticketFile = new BinaryReader(save.OpenFile("/ticket.bin"));
|
||||
var save = new Savefile(keyset, savefile.AsStorage(), IntegrityCheckLevel.None, true);
|
||||
var ticketList = new BinaryReader(save.OpenFile("/ticket_list.bin").AsStream());
|
||||
var ticketFile = new BinaryReader(save.OpenFile("/ticket.bin").AsStream());
|
||||
|
||||
ulong titleId = ticketList.ReadUInt64();
|
||||
while (titleId != ulong.MaxValue)
|
||||
|
@ -1,8 +1,8 @@
|
||||
# LibHac
|
||||
|
||||
LibHac is a .NET or .NET Core library for opening, decrypting and extracting common content file formats used by the Nintendo Switch.
|
||||
LibHac is a .NET and .NET Core library for opening, decrypting and extracting common content file formats used by the Nintendo Switch.
|
||||
|
||||
All content is imported and exported as .NET streams. This means that reading nested file types and encryptions can easily be done by linking different file readers together.
|
||||
Most content is imported and exported using a standard `IStorage` interface. This means that reading nested file types and encryptions can easily be done by linking different file readers together.
|
||||
For example, the files from a title stored on the external SD card can be read or extracted in this way.
|
||||
`NAX0 Reader` -> `NCA Reader` -> `RomFS Reader` -> `Individual Files`
|
||||
|
||||
|
@ -32,6 +32,7 @@ namespace hactoolnet
|
||||
new CliOption("debugoutdir", 1, (o, a) => o.DebugOutDir = a[0]),
|
||||
new CliOption("savedir", 1, (o, a) => o.SaveOutDir = a[0]),
|
||||
new CliOption("outdir", 1, (o, a) => o.OutDir = a[0]),
|
||||
new CliOption("plaintext", 1, (o, a) => o.PlaintextOut = a[0]),
|
||||
new CliOption("nspout", 1, (o, a) => o.NspOut = a[0]),
|
||||
new CliOption("sdseed", 1, (o, a) => o.SdSeed = a[0]),
|
||||
new CliOption("sdpath", 1, (o, a) => o.SdPath = a[0]),
|
||||
@ -47,6 +48,7 @@ namespace hactoolnet
|
||||
new CliOption("listfiles", 0, (o, a) => o.ListFiles = true),
|
||||
new CliOption("sign", 0, (o, a) => o.SignSave = true),
|
||||
new CliOption("title", 1, (o, a) => o.TitleId = ParseTitleId(a[0])),
|
||||
new CliOption("bench", 1, (o, a) => o.BenchType = a[0]),
|
||||
};
|
||||
|
||||
public static Options Parse(string[] args)
|
||||
@ -99,7 +101,7 @@ namespace hactoolnet
|
||||
i += option.ArgsNeeded;
|
||||
}
|
||||
|
||||
if (!inputSpecified && options.InFileType != FileType.Keygen && !options.RunCustom)
|
||||
if (!inputSpecified && options.InFileType != FileType.Keygen && options.InFileType != FileType.Bench && !options.RunCustom)
|
||||
{
|
||||
PrintWithUsage("Input file must be specified");
|
||||
return null;
|
||||
|
@ -1,4 +1,5 @@
|
||||
using LibHac;
|
||||
using LibHac.IO;
|
||||
|
||||
namespace hactoolnet
|
||||
{
|
||||
@ -22,6 +23,7 @@ namespace hactoolnet
|
||||
public string DebugOutDir;
|
||||
public string SaveOutDir;
|
||||
public string OutDir;
|
||||
public string PlaintextOut;
|
||||
public string SdSeed;
|
||||
public string NspOut;
|
||||
public string SdPath;
|
||||
@ -37,6 +39,7 @@ namespace hactoolnet
|
||||
public bool ListFiles;
|
||||
public bool SignSave;
|
||||
public ulong TitleId;
|
||||
public string BenchType;
|
||||
|
||||
public IntegrityCheckLevel IntegrityLevel
|
||||
{
|
||||
@ -63,7 +66,8 @@ namespace hactoolnet
|
||||
Pk11,
|
||||
Pk21,
|
||||
Kip1,
|
||||
Ini1
|
||||
Ini1,
|
||||
Bench
|
||||
}
|
||||
|
||||
internal class Context
|
||||
|
@ -1,6 +1,7 @@
|
||||
using System;
|
||||
using System.Text;
|
||||
using LibHac;
|
||||
using LibHac.IO;
|
||||
|
||||
namespace hactoolnet
|
||||
{
|
||||
@ -28,24 +29,24 @@ namespace hactoolnet
|
||||
}
|
||||
}
|
||||
|
||||
public static void PrintIvfcHash(StringBuilder sb, int colLen, int indentSize, IvfcHeader ivfcInfo, IntegrityStreamType type)
|
||||
public static void PrintIvfcHash(StringBuilder sb, int colLen, int indentSize, IvfcHeader ivfcInfo, IntegrityStorageType type)
|
||||
{
|
||||
string prefix = new string(' ', indentSize);
|
||||
string prefix2 = new string(' ', indentSize + 4);
|
||||
|
||||
if (type == IntegrityStreamType.RomFs)
|
||||
if (type == IntegrityStorageType.RomFs)
|
||||
PrintItem(sb, colLen, $"{prefix}Master Hash{ivfcInfo.LevelHeaders[0].HashValidity.GetValidityString()}:", ivfcInfo.MasterHash);
|
||||
|
||||
PrintItem(sb, colLen, $"{prefix}Magic:", ivfcInfo.Magic);
|
||||
PrintItem(sb, colLen, $"{prefix}Version:", ivfcInfo.Version);
|
||||
|
||||
if (type == IntegrityStreamType.Save)
|
||||
if (type == IntegrityStorageType.Save)
|
||||
PrintItem(sb, colLen, $"{prefix}Salt Seed:", ivfcInfo.SaltSource);
|
||||
|
||||
int levelCount = Math.Max(ivfcInfo.NumLevels - 1, 0);
|
||||
if (type == IntegrityStreamType.Save) levelCount = 4;
|
||||
if (type == IntegrityStorageType.Save) levelCount = 4;
|
||||
|
||||
int offsetLen = type == IntegrityStreamType.Save ? 16 : 12;
|
||||
int offsetLen = type == IntegrityStorageType.Save ? 16 : 12;
|
||||
|
||||
for (int i = 0; i < levelCount; i++)
|
||||
{
|
||||
@ -54,12 +55,12 @@ namespace hactoolnet
|
||||
|
||||
if (i != 0)
|
||||
{
|
||||
hashOffset = ivfcInfo.LevelHeaders[i - 1].LogicalOffset;
|
||||
hashOffset = ivfcInfo.LevelHeaders[i - 1].Offset;
|
||||
}
|
||||
|
||||
sb.AppendLine($"{prefix}Level {i}{level.HashValidity.GetValidityString()}:");
|
||||
PrintItem(sb, colLen, $"{prefix2}Data Offset:", $"0x{level.LogicalOffset.ToString($"x{offsetLen}")}");
|
||||
PrintItem(sb, colLen, $"{prefix2}Data Size:", $"0x{level.HashDataSize.ToString($"x{offsetLen}")}");
|
||||
PrintItem(sb, colLen, $"{prefix2}Data Offset:", $"0x{level.Offset.ToString($"x{offsetLen}")}");
|
||||
PrintItem(sb, colLen, $"{prefix2}Data Size:", $"0x{level.Size.ToString($"x{offsetLen}")}");
|
||||
PrintItem(sb, colLen, $"{prefix2}Hash Offset:", $"0x{hashOffset.ToString($"x{offsetLen}")}");
|
||||
PrintItem(sb, colLen, $"{prefix2}Hash BlockSize:", $"0x{1 << level.BlockSizePower:x8}");
|
||||
}
|
||||
|
87
hactoolnet/ProcessBench.cs
Normal file
87
hactoolnet/ProcessBench.cs
Normal file
@ -0,0 +1,87 @@
|
||||
using System.Diagnostics;
|
||||
using LibHac;
|
||||
using LibHac.IO;
|
||||
|
||||
namespace hactoolnet
|
||||
{
|
||||
internal static class ProcessBench
|
||||
{
|
||||
private const int Size = 1024 * 1024 * 10;
|
||||
private const int Iterations = 100;
|
||||
|
||||
private static void CopyBenchmark(IStorage src, IStorage dst, int iterations, string label, IProgressReport logger)
|
||||
{
|
||||
// Warmup
|
||||
src.CopyTo(dst);
|
||||
|
||||
logger.SetTotal(iterations);
|
||||
|
||||
Stopwatch encryptWatch = Stopwatch.StartNew();
|
||||
for (int i = 0; i < iterations; i++)
|
||||
{
|
||||
src.CopyTo(dst);
|
||||
logger.ReportAdd(1);
|
||||
}
|
||||
encryptWatch.Stop();
|
||||
logger.SetTotal(0);
|
||||
|
||||
string rate = Util.GetBytesReadable((long)(src.Length * iterations / encryptWatch.Elapsed.TotalSeconds));
|
||||
logger.LogMessage($"{label}{rate}/s");
|
||||
}
|
||||
|
||||
public static void Process(Context ctx)
|
||||
{
|
||||
switch (ctx.Options.BenchType?.ToLower())
|
||||
{
|
||||
case "aesctr":
|
||||
{
|
||||
|
||||
IStorage decStorage = new MemoryStorage(new byte[Size]);
|
||||
IStorage encStorage = new Aes128CtrStorage(new MemoryStorage(new byte[Size]), new byte[0x10], new byte[0x10], true);
|
||||
|
||||
CopyBenchmark(decStorage, encStorage, Iterations, "MemoryStorage Encrypt: ", ctx.Logger);
|
||||
CopyBenchmark(encStorage, decStorage, Iterations, "MemoryStorage Decrypt: ", ctx.Logger);
|
||||
|
||||
decStorage = new NullStorage(Size);
|
||||
encStorage = new Aes128CtrStorage(new NullStorage(Size), new byte[0x10], new byte[0x10], true);
|
||||
|
||||
CopyBenchmark(decStorage, encStorage, Iterations, "NullStorage Encrypt: ", ctx.Logger);
|
||||
CopyBenchmark(encStorage, decStorage, Iterations, "NullStorage Decrypt: ", ctx.Logger);
|
||||
|
||||
decStorage = new MemoryStorage(new byte[Size]);
|
||||
encStorage = new CachedStorage(new Aes128CtrStorage(new MemoryStorage(new byte[Size]), new byte[0x10], new byte[0x10], true), 0x4000, 4, true);
|
||||
|
||||
CopyBenchmark(decStorage, encStorage, Iterations, "CachedStorage Encrypt: ", ctx.Logger);
|
||||
CopyBenchmark(encStorage, decStorage, Iterations, "CachedStorage Decrypt: ", ctx.Logger);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case "aesxts":
|
||||
{
|
||||
IStorage decStorage = new MemoryStorage(new byte[Size]);
|
||||
IStorage encStorage = new Aes128XtsStorage(new MemoryStorage(new byte[Size]), new byte[0x20], 81920, true);
|
||||
|
||||
CopyBenchmark(decStorage, encStorage, Iterations, "MemoryStorage Encrypt: ", ctx.Logger);
|
||||
CopyBenchmark(encStorage, decStorage, Iterations, "MemoryStorage Decrypt: ", ctx.Logger);
|
||||
|
||||
decStorage = new NullStorage(Size);
|
||||
encStorage = new Aes128XtsStorage(new NullStorage(Size), new byte[0x20], 81920, true);
|
||||
|
||||
CopyBenchmark(decStorage, encStorage, Iterations, "NullStorage Encrypt: ", ctx.Logger);
|
||||
CopyBenchmark(encStorage, decStorage, Iterations, "NullStorage Decrypt: ", ctx.Logger);
|
||||
|
||||
decStorage = new MemoryStorage(new byte[Size]);
|
||||
encStorage = new CachedStorage(new Aes128XtsStorage(new MemoryStorage(new byte[Size]), new byte[0x20], 0x4000, true), 4, true);
|
||||
|
||||
CopyBenchmark(decStorage, encStorage, Iterations, "CachedStorage Encrypt: ", ctx.Logger);
|
||||
CopyBenchmark(encStorage, decStorage, Iterations, "CachedStorage Decrypt: ", ctx.Logger);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ctx.Logger.LogMessage("Unknown benchmark type.");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,5 +1,6 @@
|
||||
using System.IO;
|
||||
using LibHac;
|
||||
using LibHac.IO;
|
||||
|
||||
namespace hactoolnet
|
||||
{
|
||||
@ -9,7 +10,7 @@ namespace hactoolnet
|
||||
{
|
||||
using (var file = new FileStream(ctx.Options.InFile, FileMode.Open, FileAccess.Read))
|
||||
{
|
||||
var kip = new Kip(file);
|
||||
var kip = new Kip(file.AsStorage());
|
||||
kip.OpenRawFile();
|
||||
}
|
||||
}
|
||||
@ -18,7 +19,7 @@ namespace hactoolnet
|
||||
{
|
||||
using (var file = new FileStream(ctx.Options.InFile, FileMode.Open, FileAccess.Read))
|
||||
{
|
||||
var ini1 = new Ini1(file);
|
||||
var ini1 = new Ini1(file.AsStorage());
|
||||
|
||||
string outDir = ctx.Options.OutDir;
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using LibHac;
|
||||
using LibHac.IO;
|
||||
using static hactoolnet.Print;
|
||||
|
||||
namespace hactoolnet
|
||||
@ -10,14 +11,15 @@ namespace hactoolnet
|
||||
{
|
||||
public static void Process(Context ctx)
|
||||
{
|
||||
using (var file = new FileStream(ctx.Options.InFile, FileMode.Open, FileAccess.Read))
|
||||
using (var file = new StreamStorage(new FileStream(ctx.Options.InFile, FileMode.Open, FileAccess.Read), false))
|
||||
{
|
||||
var nca = new Nca(ctx.Keyset, file, false);
|
||||
nca.ValidateMasterHashes();
|
||||
nca.ParseNpdm();
|
||||
|
||||
if (ctx.Options.BaseNca != null)
|
||||
{
|
||||
var baseFile = new FileStream(ctx.Options.BaseNca, FileMode.Open, FileAccess.Read);
|
||||
var baseFile = new StreamStorage(new FileStream(ctx.Options.BaseNca, FileMode.Open, FileAccess.Read), false);
|
||||
var baseNca = new Nca(ctx.Keyset, baseFile, false);
|
||||
nca.SetBaseNca(baseNca);
|
||||
}
|
||||
@ -42,7 +44,7 @@ namespace hactoolnet
|
||||
|
||||
if (ctx.Options.ListRomFs && nca.Sections[1] != null)
|
||||
{
|
||||
var romfs = new Romfs(nca.OpenSection(1, false, ctx.Options.IntegrityLevel));
|
||||
var romfs = new Romfs(nca.OpenSection(1, false, ctx.Options.IntegrityLevel, true));
|
||||
|
||||
foreach (RomfsFile romfsFile in romfs.Files)
|
||||
{
|
||||
@ -73,7 +75,7 @@ namespace hactoolnet
|
||||
|
||||
if (ctx.Options.RomfsOutDir != null)
|
||||
{
|
||||
var romfs = new Romfs(nca.OpenSection(section.SectionNum, false, ctx.Options.IntegrityLevel));
|
||||
var romfs = new Romfs(nca.OpenSection(section.SectionNum, false, ctx.Options.IntegrityLevel, true));
|
||||
romfs.Extract(ctx.Options.RomfsOutDir, ctx.Logger);
|
||||
}
|
||||
}
|
||||
@ -105,6 +107,11 @@ namespace hactoolnet
|
||||
}
|
||||
}
|
||||
|
||||
if (ctx.Options.PlaintextOut != null)
|
||||
{
|
||||
nca.OpenDecryptedNca().WriteAllBytes(ctx.Options.PlaintextOut, ctx.Logger);
|
||||
}
|
||||
|
||||
ctx.Logger.LogMessage(nca.Print());
|
||||
}
|
||||
}
|
||||
@ -117,8 +124,8 @@ namespace hactoolnet
|
||||
|
||||
sb.AppendLine("NCA:");
|
||||
PrintItem(sb, colLen, "Magic:", nca.Header.Magic);
|
||||
PrintItem(sb, colLen, "Fixed-Key Signature:", nca.Header.Signature1);
|
||||
PrintItem(sb, colLen, "NPDM Signature:", nca.Header.Signature2);
|
||||
PrintItem(sb, colLen, $"Fixed-Key Signature{nca.Header.FixedSigValidity.GetValidityString()}:", nca.Header.Signature1);
|
||||
PrintItem(sb, colLen, $"NPDM Signature{nca.Header.NpdmSigValidity.GetValidityString()}:", nca.Header.Signature2);
|
||||
PrintItem(sb, colLen, "Content Size:", $"0x{nca.Header.NcaSize:x12}");
|
||||
PrintItem(sb, colLen, "TitleID:", $"{nca.Header.TitleId:X16}");
|
||||
PrintItem(sb, colLen, "SDK Version:", nca.Header.SdkVersion);
|
||||
@ -174,7 +181,7 @@ namespace hactoolnet
|
||||
PrintSha256Hash(sect);
|
||||
break;
|
||||
case NcaHashType.Ivfc:
|
||||
PrintIvfcHash(sb, colLen, 8, sect.Header.IvfcInfo, IntegrityStreamType.RomFs);
|
||||
PrintIvfcHash(sb, colLen, 8, sect.Header.IvfcInfo, IntegrityStorageType.RomFs);
|
||||
break;
|
||||
default:
|
||||
sb.AppendLine(" Unknown/invalid superblock!");
|
||||
|
@ -2,6 +2,7 @@
|
||||
using System.Reflection;
|
||||
using System.Text;
|
||||
using LibHac;
|
||||
using LibHac.IO;
|
||||
using static hactoolnet.Print;
|
||||
|
||||
namespace hactoolnet
|
||||
@ -12,7 +13,7 @@ namespace hactoolnet
|
||||
{
|
||||
using (var file = new FileStream(ctx.Options.InFile, FileMode.Open, FileAccess.Read))
|
||||
{
|
||||
Pfs pfs = new Pfs(file);
|
||||
Pfs pfs = new Pfs(file.AsStorage());
|
||||
ctx.Logger.LogMessage(pfs.Print());
|
||||
|
||||
if (ctx.Options.OutDir != null)
|
||||
@ -68,7 +69,7 @@ namespace hactoolnet
|
||||
|
||||
foreach (Nca nca in title.Ncas)
|
||||
{
|
||||
builder.AddFile(nca.Filename, nca.GetStream());
|
||||
builder.AddFile(nca.Filename, nca.GetStorage().AsStream());
|
||||
}
|
||||
|
||||
var ticket = new Ticket
|
||||
|
@ -1,6 +1,7 @@
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
using LibHac;
|
||||
using LibHac.IO;
|
||||
using static hactoolnet.Print;
|
||||
|
||||
namespace hactoolnet
|
||||
@ -11,7 +12,7 @@ namespace hactoolnet
|
||||
{
|
||||
using (var file = new FileStream(ctx.Options.InFile, FileMode.Open, FileAccess.Read))
|
||||
{
|
||||
var package1 = new Package1(ctx.Keyset, file);
|
||||
var package1 = new Package1(ctx.Keyset, file.AsStorage());
|
||||
string outDir = ctx.Options.OutDir;
|
||||
|
||||
if (outDir != null)
|
||||
@ -21,19 +22,14 @@ namespace hactoolnet
|
||||
package1.Pk11.OpenWarmboot().WriteAllBytes(Path.Combine(outDir, "Warmboot.bin"), ctx.Logger);
|
||||
package1.Pk11.OpenNxBootloader().WriteAllBytes(Path.Combine(outDir, "NX_Bootloader.bin"), ctx.Logger);
|
||||
package1.Pk11.OpenSecureMonitor().WriteAllBytes(Path.Combine(outDir, "Secure_Monitor.bin"), ctx.Logger);
|
||||
|
||||
using (var decFile = new FileStream(Path.Combine(outDir, "Decrypted.bin"), FileMode.Create))
|
||||
{
|
||||
package1.OpenPackage1Ldr().CopyTo(decFile);
|
||||
package1.Pk11.OpenDecryptedPk11().CopyTo(decFile);
|
||||
}
|
||||
package1.OpenDecryptedPackage().WriteAllBytes(Path.Combine(outDir, "Decrypted.bin"), ctx.Logger);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static void ProcessPk21(Context ctx)
|
||||
{
|
||||
using (var file = new FileStream(ctx.Options.InFile, FileMode.Open, FileAccess.Read))
|
||||
using (var file = new CachedStorage(new FileStream(ctx.Options.InFile, FileMode.Open, FileAccess.Read).AsStorage(), 0x4000, 4, false))
|
||||
{
|
||||
var package2 = new Package2(ctx.Keyset, file);
|
||||
|
||||
@ -47,14 +43,7 @@ namespace hactoolnet
|
||||
|
||||
package2.OpenKernel().WriteAllBytes(Path.Combine(outDir, "Kernel.bin"), ctx.Logger);
|
||||
package2.OpenIni1().WriteAllBytes(Path.Combine(outDir, "INI1.bin"), ctx.Logger);
|
||||
|
||||
using (var decFile = new FileStream(Path.Combine(outDir, "Decrypted.bin"), FileMode.Create))
|
||||
{
|
||||
package2.OpenHeaderPart1().CopyTo(decFile);
|
||||
package2.OpenHeaderPart2().CopyTo(decFile);
|
||||
package2.OpenKernel().CopyTo(decFile);
|
||||
package2.OpenIni1().CopyTo(decFile);
|
||||
}
|
||||
package2.OpenDecryptedPackage().WriteAllBytes(Path.Combine(outDir, "Decrypted.bin"), ctx.Logger);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -68,7 +57,7 @@ namespace hactoolnet
|
||||
sb.AppendLine();
|
||||
|
||||
sb.AppendLine("PK21:");
|
||||
PrintItem(sb, colLen, "Signature:", package2.Header.Signature);
|
||||
PrintItem(sb, colLen, $"Signature{package2.Header.SignatureValidity.GetValidityString()}:", package2.Header.Signature);
|
||||
PrintItem(sb, colLen, "Header Version:", $"{package2.HeaderVersion:x2}");
|
||||
|
||||
for (int i = 0; i < 3; i++)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user