def organizeNcas(dir): files = [f for f in os.listdir(dir) if f.endswith('.nca')] for file in files: try: path = os.path.join(dir, file) f = Fs.Nca() f.open(path, 'r+b') f.close() titleId = f.header.titleId header = f.header os.makedirs(os.path.join(dir, f.header.titleId), exist_ok=True) if f.header.contentType == Fs.Type.Content.META: dest = os.path.join(dir, f.header.titleId, file.split('.')[0] + '.cnmt.nca') else: dest = os.path.join(dir, f.header.titleId, file.split('.')[0] + '.nca') os.rename(path, dest) Print.info(dest) except BaseException as e: Print.info(str(e))
def updateVersions(force=True): initTitles() initFiles() i = 0 for k, t in tqdm(Titles.items()): if force or t.version is None: if t.isActive(): v = t.lastestVersion(True) Print.info("%s[%s] v = %s" % (str(t.name), str(t.id), str(v))) for t in list(Titles.data().values()): if not t.isUpdate and not t.isDLC and t.updateId and t.updateId and not Titles.contains( t.updateId): u = Title.Title() u.setId(t.updateId) if u.lastestVersion(): Titles.set(t.updateId, u) Print.info("%s[%s] FOUND" % (str(t.name), str(u.id))) Titles.save()
def solidCompressNsp(filePath, compressionLevel, outputDir, threads, stusReport, id, pleaseNoPrint): filePath = filePath.resolve() container = factory(filePath) container.open(str(filePath), 'rb') nszPath = outputDir.joinpath(filePath.stem + '.nsz') Print.info( 'Solid compressing (level {0}) {1} -> {2}'.format( compressionLevel, filePath, nszPath), pleaseNoPrint) try: with Pfs0.Pfs0Stream(str(nszPath)) as nsp: processContainer(container, nsp, compressionLevel, threads, stusReport, id, pleaseNoPrint) except BaseException as ex: if not ex is KeyboardInterrupt: Print.error(format_exc()) if nszPath.is_file(): nszPath.unlink() container.close() return nszPath
def CreateTargetDict(targetFolder, parseCnmt, extension): filesAtTarget = {} alreadyExists = {} for file in scandir(str(targetFolder)): try: filePath = Path(targetFolder).joinpath(file.name) filePath_str = str(filePath) if filePath.suffix == extension: Print.infoNoNewline('Extract TitleID/Version: {0} '.format( file.name)) filesAtTarget[file.name.lower()] = filePath_str extractedIdVersion = ExtractTitleIDAndVersion(file, parseCnmt) if extractedIdVersion == None: if parseCnmt: Print.error( 'Failed to extract TitleID/Version from booth filename "{0}" and Cnmt - Outdated keys.txt?' .format(Path(filePath).name)) else: Print.error( 'Failed to extract TitleID/Version from filename "{0}". Use -p to extract from Cnmt.' .format(Path(filePath).name)) continue titleID, version = extractedIdVersion titleIDEntry = alreadyExists.get(titleID) if titleIDEntry == None: titleIDEntry = {version: [filePath_str]} elif not version in titleIDEntry: titleIDEntry[version] = [filePath_str] else: titleIDEntry[version].append(filePath_str) alreadyExists[titleID] = titleIDEntry Print.info('=> {0} {1}'.format(titleID, version)) except BaseException as e: Print.info("") print_exc() Print.error('Error: ' + str(e)) return (filesAtTarget, alreadyExists)
def setPath(self, path): self.path = path self.version = '0' z = reTID.match(path) if z: self.titleId = z.groups()[0].upper() else: z = reBaseTID.match(os.path.basename(path)) if z: self.titleId = z.groups()[0].upper() else: Print.info( 'could not get title id from filename, name needs to contain [titleId] : ' + path) self.titleId = None if not hasattr(self, 'cr') or not self.cr: self.cr = self.getCrFromPath() z = reVER.match(path, re.I) if z: self.version = z.groups()[0] if path.endswith('.nsp') or path.endswith('.nsz'): if self.hasValidTicket is None: self.setHasValidTicket(True) elif path.endswith('.nsx'): if self.hasValidTicket is None: self.setHasValidTicket(False) elif path.endswith('.xci') or path.endswith('.xcz'): if self.hasValidTicket is None: self.setHasValidTicket(True) else: print('unknown extension ' + str(path)) return
def CreateTargetDict(targetFolder, parseCnmt, extension): filesAtTarget = {} alreadyExists = {} for file in scandir(targetFolder): try: filePath = Path(targetFolder).joinpath(file) if file.name.endswith(extension): Print.infoNoNewline('Extract TitleID/Version: {0} '.format(file.name)) filesAtTarget[file.name.lower()] = filePath (titleID, version) = ExtractTitleIDAndVersion(file, True) titleIDEntry = alreadyExists.get(titleID) if titleIDEntry == None: titleIDEntry = {version: [filePath]} elif not version in titleIDEntry: titleIDEntry[version] = [filePath] else: titleIDEntry[version].append(filePath) alreadyExists[titleID] = titleIDEntry Print.info('=> {0} {1}'.format(titleID, version)) except BaseException as e: Print.info("") print_exc() Print.error('Error: ' + str(e)) return(filesAtTarget, alreadyExists)
def __init__(self, path=None, mode='rb', cryptoType=-1, cryptoKey=-1, cryptoCounter=-1): self.path = None self.titleId = None self.hasValidTicket = None self.timestamp = None self.version = None self.fileSize = None self.fileModified = None self.extractedNcaMeta = False self.verified = None self.attributes = {} Pfs0.__init__(self, None, path, mode, cryptoType, cryptoKey, cryptoCounter) IndexedFile.__init__(self, path, mode, cryptoType, cryptoKey, cryptoCounter) if self.titleId and self.isUnlockable(): Print.info('unlockable title found ' + self.path)
def daemon(): global status Watcher.start() while True: try: status = 'disconnected' dev = getDevice() Print.info('USB Connected') status = 'connected' dev.reset() dev.set_configuration() cfg = dev.get_active_configuration() def is_out_ep(ep): return usb.util.endpoint_direction( ep.bEndpointAddress) == usb.util.ENDPOINT_OUT def is_in_ep(ep): return usb.util.endpoint_direction( ep.bEndpointAddress) == usb.util.ENDPOINT_IN out_ep = usb.util.find_descriptor(cfg[(0, 0)], custom_match=is_out_ep) in_ep = usb.util.find_descriptor(cfg[(0, 0)], custom_match=is_in_ep) assert out_ep is not None assert in_ep is not None poll_commands(in_ep, out_ep) except BaseException as e: Print.error('usb exception: ' + str(e)) time.sleep(1)
def importRegion(region='US', language='en', save=True): if region not in Config.regionLanguages() or language not in Config.regionLanguages()[region]: Print.info('Could not locate %s/%s !' % (region, language)) return False regionLanguages = [] for region2 in Config.regionLanguages(): for language2 in Config.regionLanguages()[region2]: regionLanguages.append(RegionLanguage(region2, language2, region, language)) for rl in sorted(regionLanguages): data = Titles.data(rl.region, rl.language) for nsuId in sorted(data.keys(), reverse=True): regionTitle = data[nsuId] if not regionTitle.id: continue title = Titles.get(regionTitle.id, None, None) title.importFrom(regionTitle, rl.region, rl.language, preferredRegion=region, preferredLanguage=language) Titles.loadTxtDatabases() if save: Titles.save()
def getInstall(request, response): try: url = ('%s:%s@%s:%d/api/download/%s/title.nsp' % (request.user.id, request.user.password, Config.server.hostname, Config.server.port, request.bits[2])) Print.info('Installing ' + str(request.bits[2])) file_list_payloadBytes = url.encode('ascii') sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((request.user.switchHost, request.user.switchPort)) sock.sendall( struct.pack('!L', len(file_list_payloadBytes)) + file_list_payloadBytes) while len(sock.recv(1)) < 1: time.sleep(0.05) sock.close() response.write( json.dumps({ 'success': True, 'message': 'install successful' })) except BaseException as e: response.write(json.dumps({'success': False, 'message': str(e)}))
def load(): confLock.acquire() global titles titles = {} if os.path.isfile("titledb/titles.json"): timestamp = time.perf_counter() with open('titledb/titles.json', encoding="utf-8-sig") as f: try: items = json.loads(f.read()).items() except: Print.error('json file is corrupted: titledb/titles.json') confLock.release() return False for i, k in items: if i != '0100000000000816': titles[i] = Title.Title() titles[i].__dict__ = k titles[i].setId(i) Print.info('loaded titledb/titles.json in ' + str(time.perf_counter() - timestamp) + ' seconds') ''' if os.path.isfile("titles.txt"): loadTitleFile('titles.txt', True) try: files = [f for f in os.listdir(Config.paths.titleDatabase) if f.endswith('.txt')] files.sort() for file in files: loadTitleFile(Config.paths.titleDatabase + '/' + file, False) except BaseException as e: Print.error('title load error: ' + str(e)) ''' confLock.release()
def open(self, path=None, mode='rb', cryptoType=-1, cryptoKey=-1, cryptoCounter=-1): r = super(BaseFs, self).open(path, mode, cryptoType, cryptoKey, cryptoCounter) if self.bktr1Buffer: try: self.bktrRelocation = Bktr.Bktr1(MemoryFile(self.bktr1Buffer), 'rb', nca=self) except BaseException as e: Print.info('bktr reloc exception: ' + str(e)) if self.bktr2Buffer: try: self.bktrSubsection = Bktr.Bktr2(MemoryFile(self.bktr2Buffer), 'rb', nca=self) except BaseException as e: Print.info('bktr subsection exception: ' + str(e))
def downloadFile(url, fPath): fName = os.path.basename(fPath).split()[0] if os.path.exists(fPath): dlded = os.path.getsize(fPath) r = makeRequest('GET', url, hdArgs={'Range': 'bytes=%s-' % dlded}) if r.headers.get('Server') != 'openresty/1.9.7.4': Print.info('Download is already complete, skipping!') return fPath elif r.headers.get('Content-Range') is None: # CDN doesn't return a range if request >= filesize fSize = int(r.headers.get('Content-Length')) else: fSize = dlded + int(r.headers.get('Content-Length')) if dlded == fSize: Print.info('Download is already complete, skipping!') return fPath elif dlded < fSize: Print.info('Resuming download...') f = open(fPath, 'ab') else: Print.error('Existing file is bigger than expected (%s/%s), restarting download...' % (dlded, fSize)) dlded = 0 f = open(fPath, "wb") else: dlded = 0 r = makeRequest('GET', url) fSize = int(r.headers.get('Content-Length')) f = open(fPath, 'wb') chunkSize = 0x100000 if fSize >= 10000: s = Status.create(fSize, desc=fName, unit='B') #s.id = titleId.upper() s.add(dlded) for chunk in r.iter_content(chunkSize): f.write(chunk) s.add(len(chunk)) dlded += len(chunk) if not Config.isRunning: break s.close() else: f.write(r.content) dlded += len(r.content) # if fSize != 0 and dlded != fSize: # raise ValueError('Downloaded data is not as big as expected (%s/%s)!' % (dlded, fSize)) f.close() Print.debug('\r\nSaved to %s!' % f.name) return fPath
def AllowedToWriteOutfile(filePath, targetFileExtension, targetDict, removeOld, overwrite, parseCnmt): (filesAtTarget, alreadyExists) = targetDict extractedIdVersion = ExtractTitleIDAndVersion(filePath, parseCnmt) if extractedIdVersion == None: Print.error("Failed to extract TitleID/Version from filename {0}. Use -p to extract from Cnmt.".format(Path(filePath).name)) return fileNameCheck(filePath, targetFileExtension, filesAtTarget, removeOld, overwrite) (titleIDExtracted, versionExtracted) = extractedIdVersion titleIDEntry = alreadyExists.get(titleIDExtracted) if removeOld: if titleIDEntry != None: exitFlag = False for versionEntry in titleIDEntry.keys(): print(versionEntry, versionExtracted) if versionEntry < versionExtracted: for delFilePath in titleIDEntry[versionEntry]: Print.info('Delete outdated version: {0}'.format(delFilePath)) remove(delFilePath) del filesAtTarget[Path(delFilePath).name.lower()] else: exitFlag = True if exitFlag: Print.info('{0} with a the same ID and newer version already exists in the output directory.\n'\ 'If you want to process it do not use --rm-old-version!'.format(Path(filePath).name)) return False if not titleIDEntry == None: for versionEntry in titleIDEntry: if versionEntry == titleIDEntry: if overwrite: for (fileName, filePath) in filesAtTarget: # NEEDS TO BE FIXED Print.info('Delete duplicate: {0}'.format(filePath)) filesAtTarget.remove(Path(filePath).name.lower()) remove(filePath) else: Print.info('{0} with the same ID and version already exists in the output directory.\n'\ 'If you want to overwrite it use the -w parameter!'.format(Path(filePath).name)) return False return fileNameCheck(filePath, targetFileExtension, filesAtTarget, removeOld, overwrite)
def move(self, forceNsp=False): if not self.path: Print.error('no path set') return False newPath = self.fileName(forceNsp=forceNsp) if not newPath: Print.error('could not get filename for ' + self.path) return False if os.path.abspath(newPath).lower().replace( '\\', '/') == os.path.abspath(self.path).lower().replace('\\', '/'): return False if os.path.isfile(newPath): Print.info('duplicate title: ') Print.info(os.path.abspath(self.path)) Print.info(os.path.abspath(self.fileName(forceNsp=forceNsp))) return False try: Print.info(self.path + ' -> ' + self.fileName(forceNsp=forceNsp)) if not Config.dryRun: os.makedirs(os.path.dirname(self.fileName(forceNsp=forceNsp)), exist_ok=True) newPath = self.fileName(forceNsp=forceNsp) if not Config.dryRun: shutil.move(self.path, newPath) self.path = newPath except BaseException as e: Print.error('failed to rename file! %s -> %s : %s' % (self.path, self.fileName(forceNsp=forceNsp), e)) if not Config.dryRun: self.moveDupe() return True
def scrapeShogunThreaded(force=False): if not hasCdn: return initTitles() initFiles() scrapeThreads = [] numThreads = 8 if Config.reverse: q = queue.LifoQueue() else: q = queue.Queue() for region in cdn.regions(): q.put(region) for i in range(numThreads): t = threading.Thread(target=scrapeShogunWorker, args=[q, force]) t.daemon = True t.start() scrapeThreads.append(t) Print.info('joining shogun queue') q.join() for i in range(numThreads): q.put(None) i = 0 for t in scrapeThreads: i += 1 t.join() Print.info('joined thread %d of %d' % (i, len(scrapeThreads))) # q.join() Print.info('saving titles') Titles.saveAll() Print.info('titles saved')
def removeTitleRights(self): if not Titles.contains(self.titleId): raise IOError('No title key found in database! ' + self.titleId) ticket = self.ticket() masterKeyRev = ticket.getMasterKeyRevision() titleKeyDec = Keys.decryptTitleKey( ticket.getTitleKeyBlock().to_bytes(16, byteorder='big'), Keys.getMasterKeyIndex(masterKeyRev)) rightsId = ticket.getRightsId() Print.info('rightsId =\t' + hex(rightsId)) Print.info('titleKeyDec =\t' + str(hx(titleKeyDec))) Print.info('masterKeyRev =\t' + hex(masterKeyRev)) for nca in self: if type(nca) == Nca: if nca.header.getCryptoType2() != masterKeyRev: pass raise IOError('Mismatched masterKeyRevs!') ticket.setRightsId(0) for nca in self: if type(nca) == Nca: if nca.header.getRightsId() == 0: continue Print.info('writing masterKeyRev for %s, %d' % (str(nca._path), masterKeyRev)) crypto = aes128.AESECB( Keys.keyAreaKey(Keys.getMasterKeyIndex(masterKeyRev), nca.header.keyIndex)) encKeyBlock = crypto.encrypt(titleKeyDec * 4) nca.header.setRightsId(0) nca.header.setKeyBlock(encKeyBlock) Hex.dump(encKeyBlock)
def scanLatestTitleUpdates(): global versionHistory initTitles() initFiles() now = datetime.datetime.now() today = now.strftime("%Y-%m-%d") try: with open('titledb/versions.json', 'r') as f: for titleId, vers in json.loads(f.read()).items(): for ver, date in vers.items(): setVersionHistory(titleId, ver, date) except BaseException: pass if not hasCdn: return for k, i in cdn.hacVersionList().items(): id = str(k).upper() version = str(i) if not Titles.contains(id): if len(id) != 16: Print.info('invalid title id: ' + id) continue t = Titles.get(id) if t.isUpdate: setVersionHistory(Title.getBaseId(id), version, today) else: setVersionHistory(id, version, today) if str(t.version) != str(version): Print.info('new version detected for %s[%s] v%s' % (t.name or '', t.id or ('0' * 16), str(version))) t.setVersion(version, True) Titles.save() try: with open('titledb/versions.json', 'w') as outfile: json.dump(versionHistory, outfile, indent=4, sort_keys=True) except BaseException as e: Print.info(str(e))
def submitKeys(): for id, t in Titles.items(): if t.key and len(t.getFiles()) > 0: try: #blockchain.blockchain.suggest(t.id, t.key) if not blockchain.verifyKey(t.id, t.key): Print.error('Key verification failed for %s / %s' % (str(t.id), str(t.key))) for f in t.getFiles(): f.hasValidTicket = False f.move() except LookupError as e: Print.info(str(e)) except OSError as e: Print.info(str(e)) except BaseException as e: Print.info(str(e)) raise
def updateDb(url, c=0): nut.initTitles() c += 1 if c > 3: return False Print.info("Downloading new title database " + url) try: if url == '' or not url: return if "http://" not in url and "https://" not in url: try: url = base64.b64decode(url) except Exception as e: Print.info("\nError decoding url: ", e) return r = requests.get(url) r.encoding = 'utf-8-sig' if r.status_code == 200: try: m = re.search(r'<a href="([^"]*)">Proceed</a>', r.text) if m: return updateDb(m.group(1), c) except: pass Titles.loadTitleBuffer(r.text, False) else: Print.info('Error updating database: ', repr(r)) except Exception as e: Print.info('Error downloading:' + str(e)) raise
def getVerifiedHeader(self): self.seek(0x200) buffer = bytearray(self.read(0x200)) if self.verifyBuffer(buffer): return buffer for gameCardValue in [0, 1]: buffer[0x04] = gameCardValue if self.verifyBuffer(buffer): Print.info('isGameCard = %d' % gameCardValue) return buffer if self.hasTitleRights(): return None title = Titles.get(self.titleId) ''' if title.rightsId: for gameCardValue in [0, 1]: buffer[0x04] = gameCardValue #return False ''' for gameCardValue in [0, 1]: buffer[0x04] = gameCardValue for keyGen in Keys.getKeyGens(): buffer = self.setRightsIdBuffer(buffer, keyGen) if self.verifyBuffer(buffer): Print.info('Title Rights: isGameCard = %d, keyGen = %d' % (gameCardValue, keyGen)) return buffer for gameCardValue in [0, 1]: buffer[0x04] = gameCardValue for keyGen in Keys.getKeyGens(): buffer = self.setStandardCryptoBuffer(buffer, keyGen) if self.verifyBuffer(buffer): Print.info( 'Standard Crypto: isGameCard = %d, keyGen = %d' % (gameCardValue, keyGen)) return buffer return None
def recv(self, timeout=60000): Print.info('begin recv') header = bytes(self.i.read(32, timeout=timeout)) Print.info('read complete') magic = header[:4] self.command = int.from_bytes(header[4:8], byteorder='little') self.size = int.from_bytes(header[8:16], byteorder='little') self.threadId = int.from_bytes(header[16:20], byteorder='little') self.packetIndex = int.from_bytes(header[20:22], byteorder='little') self.packetCount = int.from_bytes(header[22:24], byteorder='little') self.timestamp = int.from_bytes(header[24:32], byteorder='little') if magic != b'\x12\x12\x12\x12': Print.error('invalid magic! ' + str(magic)) return False Print.info('receiving %d bytes' % self.size) self.payload = bytes(self.i.read(self.size, timeout=0)) return True
def downloadAll(wait=True): initTitles() initFiles() global activeDownloads global status i = 0 Print.info('Downloading All') try: for k, t in Titles.items(): i = i + 1 if not t.isActive(): continue if t.isUpdateAvailable(): if not t.id or t.id == '0' * 16 or ( t.isUpdate and t.lastestVersion() in [None]): Print.warning('no valid id? id: %s version: %s' % (str(t.id), str(t.lastestVersion()))) continue if t.lastestVersion() is None: Print.info('Could not get version for ' + str(t.name) + ' [' + str(t.id) + ']') continue Titles.queue.add(t.id) Print.info("%d titles scanned, downloading %d" % (i, Titles.queue.size())) Titles.save() status = Status.create(Titles.queue.size(), 'Total Download') startDownloadThreads() while wait and (not Titles.queue.empty() or sum(activeDownloads) > 0): time.sleep(1) except KeyboardInterrupt: pass except BaseException as e: Print.error(str(e)) if status: status.close()
def solidCompressTask(in_queue, statusReport, readyForWork, pleaseNoPrint, pleaseKillYourself, id): while True: readyForWork.increment() item = in_queue.get() readyForWork.decrement() if pleaseKillYourself.value() > 0: break try: filePath, compressionLevel, outputDir, threadsToUse, verifyArg = item outFile = solidCompress(filePath, compressionLevel, outputDir, threadsToUse, statusReport, id, pleaseNoPrint) if verifyArg: Print.info("[VERIFY NSZ] {0}".format(outFile)) verify(outFile, True, [statusReport, id], pleaseNoPrint) except KeyboardInterrupt: Print.info('Keyboard exception') except BaseException as e: Print.info('nut exception: {0}'.format(str(e))) raise
def pack(self, files): if not self.path: return False Print.info('\tRepacking to NSP...') hd = self.generateHeader(files) totalSize = len(hd) + sum(os.path.getsize(file) for file in files) if os.path.exists(self.path) and os.path.getsize( self.path) == totalSize: Print.info('\t\tRepack %s is already complete!' % self.path) return t = tqdm(total=totalSize, unit='B', unit_scale=True, desc=os.path.basename(self.path), leave=False) t.write('\t\tWriting header...') outf = open(self.path, 'wb') outf.write(hd) t.update(len(hd)) done = 0 for file in files: t.write('\t\tAppending %s...' % os.path.basename(file)) with open(file, 'rb') as inf: while True: buf = inf.read(4096) if not buf: break outf.write(buf) t.update(len(buf)) t.close() Print.info('\t\tRepacked to %s!' % outf.name) outf.close()
def decompress(filePath, outputDir, statusReportInfo=None): if isNspNsz(filePath): return __decompressNsz(filePath, outputDir, True, False, statusReportInfo) elif isCompressedGameFile(filePath): filename = changeExtension(filePath, '.nca') outPath = filename if outputDir is None else str( Path(outputDir).joinpath(filename)) Print.info('Decompressing %s -> %s' % (filePath, outPath)) if Config.dryRun: return None container = factory(filePath) container.open(filePath, 'rb') try: with open(outPath, 'wb') as outFile: written, hexHash = __decompressNcz(container, outFile) except BaseException as ex: if ex is not KeyboardInterrupt: Print.error(format_exc()) if outFile.is_file(): outFile.unlink() finally: container.close() fileNameHash = Path(filePath).stem.lower() if hexHash[:32] == fileNameHash: Print.info('[VERIFIED] {0}'.format(filename)) else: Print.info( '[MISMATCH] Filename startes with {0} but {1} was expected - hash verified failed!' .format(fileNameHash, hexHash[:32])) else: raise NotImplementedError( "Can't decompress {0} as that file format isn't implemented!". format(filePath))
def printInfo(self, maxDepth=3, indent=0): tabs = '\t' * indent Print.info('\n%sHFS0\n' % (tabs)) super(Pfs0, self).printInfo(maxDepth, indent)
def printInfo(self, maxDepth=3, indent=0): tabs = '\t' * indent Print.info('\n%sNCA Archive\n' % (tabs)) super(Nca, self).printInfo(maxDepth, indent) Print.info(tabs + 'magic = ' + str(self.header.magic)) Print.info(tabs + 'titleId = ' + str(self.header.titleId)) Print.info(tabs + 'rightsId = ' + str(self.header.rightsId)) Print.info(tabs + 'isGameCard = ' + hex(self.header.isGameCard)) Print.info(tabs + 'contentType = ' + str(self.header.contentType)) Print.info(tabs + 'cryptoType = ' + str(self.cryptoType)) Print.info(tabs + 'Size: ' + str(self.header.size)) Print.info(tabs + 'crypto master key: ' + str(self.header.cryptoType)) Print.info(tabs + 'crypto master key2: ' + str(self.header.cryptoType2)) Print.info(tabs + 'key Index: ' + str(self.header.keyIndex)) #Print.info(tabs + 'key Block: ' + str(self.header.getKeyBlock())) for key in self.header.keys: if key: Print.info(tabs + 'key Block: ' + str(hx(key))) if (indent + 1 < maxDepth): Print.info('\n%sPartitions:' % (tabs)) for s in self: s.printInfo(maxDepth, indent + 1) if self.header.contentType == Fs.Type.Content.PROGRAM: Print.info(tabs + 'build Id: ' + str(self.buildId()))
def open(self, file=None, mode='rb', cryptoType=-1, cryptoKey=-1, cryptoCounter=-1): super(NcaHeader, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter) self.rewind() self.signature1 = self.read(0x100) self.signature2 = self.read(0x100) self.magic = self.read(0x4) self.isGameCard = self.readInt8() self.contentType = self.readInt8() try: self.contentType = Fs.Type.Content(self.contentType) except: pass self.cryptoType = self.readInt8() self.keyIndex = self.readInt8() self.size = self.readInt64() self.titleId = hx(self.read(8)[::-1]).decode('utf-8').upper() self.contentIndex = self.readInt32() self.sdkVersion = self.readInt32() self.cryptoType2 = self.readInt8() self.read(0xF) # padding self.rightsId = hx(self.read(0x10)) if self.magic not in [b'NCA3', b'NCA2']: raise Exception('Failed to decrypt NCA header: ' + str(self.magic)) self.sectionHashes = [] for i in range(4): self.sectionTables.append(SectionTableEntry(self.read(0x10))) for i in range(4): self.sectionHashes.append(self.sectionTables[i]) self.masterKey = (self.cryptoType if self.cryptoType > self.cryptoType2 else self.cryptoType2) - 1 if self.masterKey < 0: self.masterKey = 0 self.encKeyBlock = self.getKeyBlock() #for i in range(4): # offset = i * 0x10 # key = encKeyBlock[offset:offset+0x10] # Print.info('enc %d: %s' % (i, hx(key))) #crypto = aes128.AESECB(Keys.keyAreaKey(self.masterKey, 0)) self.keyBlock = Keys.unwrapAesWrappedTitlekey(self.encKeyBlock, self.masterKey) self.keys = [] for i in range(4): offset = i * 0x10 key = self.keyBlock[offset:offset + 0x10] #Print.info('dec %d: %s' % (i, hx(key))) self.keys.append(key) if self.hasTitleRights(): titleRightsTitleId = self.rightsId.decode()[0:16].upper() if titleRightsTitleId in Titles.keys() and Titles.get( titleRightsTitleId).key: self.titleKeyDec = Keys.decryptTitleKey( uhx(Titles.get(titleRightsTitleId).key), self.masterKey) else: Print.info('could not find title key!') else: self.titleKeyDec = self.key() return True
def blockCompress(filePath, compressionLevel=18, blockSizeExponent=20, threads=32, outputDir=None, overwrite=False, filesAtTarget=[]): ncaHeaderSize = 0x4000 if blockSizeExponent < 14 or blockSizeExponent > 32: raise ValueError("Block size must be between 14 and 32") blockSize = 2**blockSizeExponent filePath = os.path.abspath(filePath) container = Fs.factory(filePath) container.open(filePath, 'rb') CHUNK_SZ = 0x100000 if outputDir is None: nszPath = filePath[0:-1] + 'z' else: nszPath = os.path.join(outputDir, os.path.basename(filePath[0:-1] + 'z')) nszPath = os.path.abspath(nszPath) nszFilename = os.path.basename(nszPath) # Getting title ID to check for NSZ file in the output directory # We should still keep this part of title ID comparison because not all files have titleID in # filename. titleId = '' for nspf in container: if isinstance(nspf, Fs.Ticket.Ticket): nspf.getRightsId() titleId = nspf.titleId() break # No need to go for other objects Print.info('compressing (level %d) %s -> %s' % (compressionLevel, filePath, nszPath)) newNsp = Fs.Pfs0.Pfs0Stream(nszPath) try: manager = Manager() results = manager.list() readyForWork = ThreadSafeCounter.Counter(0) pleaseKillYourself = ThreadSafeCounter.Counter(0) TasksPerChunk = 209715200 // blockSize for i in range(TasksPerChunk): results.append(b"") work = manager.Queue(threads) pool = [] for i in range(threads): p = Process(target=compressBlockTask, args=(work, results, blockSize, readyForWork, pleaseKillYourself)) p.start() pool.append(p) for nspf in container: if isinstance( nspf, Fs.Nca.Nca ) and nspf.header.contentType == Fs.Type.Content.DATA: Print.info('skipping delta fragment') continue if isinstance(nspf, Fs.Nca.Nca) and ( nspf.header.contentType == Fs.Type.Content.PROGRAM or nspf.header.contentType == Fs.Type.Content.PUBLICDATA): if SectionFs.isNcaPacked(nspf, ncaHeaderSize): newFileName = nspf._path[0:-1] + 'z' f = newNsp.add(newFileName, nspf.size) start = f.tell() nspf.seek(0) f.write(nspf.read(ncaHeaderSize)) sections = [] for fs in SectionFs.sortedFs(nspf): sections += fs.getEncryptionSections() if len(sections) == 0: raise Exception( "NCA can't be decrypted. Outdated keys.txt?") header = b'NCZSECTN' header += len(sections).to_bytes(8, 'little') i = 0 for fs in sections: i += 1 header += fs.offset.to_bytes(8, 'little') header += fs.size.to_bytes(8, 'little') header += fs.cryptoType.to_bytes(8, 'little') header += b'\x00' * 8 header += fs.cryptoKey header += fs.cryptoCounter f.write(header) blockID = 0 chunkRelativeBlockID = 0 startChunkBlockID = 0 blocksHeaderFilePos = f.tell() compressedblockSizeList = [] bytesToCompress = nspf.size - 0x4000 blocksToCompress = bytesToCompress // blockSize + ( bytesToCompress % blockSize > 0) header = b'NCZBLOCK' #Magic header += b'\x02' #Version header += b'\x01' #Type header += b'\x00' #Unused header += blockSizeExponent.to_bytes( 1, 'little') #blockSizeExponent in bits: 2^x header += blocksToCompress.to_bytes( 4, 'little') #Amount of Blocks header += bytesToCompress.to_bytes( 8, 'little') #Decompressed Size header += b'\x00' * (blocksToCompress * 4) f.write(header) compressedblockSizeList = [0] * blocksToCompress decompressedBytes = ncaHeaderSize with tqdm(total=nspf.size, unit_scale=True, unit="B/s") as bar: partitions = [] for section in sections: #print('offset: %x\t\tsize: %x\t\ttype: %d\t\tiv%s' % (section.offset, section.size, section.cryptoType, str(hx(section.cryptoCounter)))) partitions.append( nspf.partition(offset=section.offset, size=section.size, n=None, cryptoType=section.cryptoType, cryptoKey=section.cryptoKey, cryptoCounter=bytearray( section.cryptoCounter), autoOpen=True)) partNr = 0 bar.update(f.tell()) while True: buffer = partitions[partNr].read(blockSize) while (len(buffer) < blockSize and partNr < len(partitions) - 1): partNr += 1 buffer += partitions[partNr].read(blockSize - len(buffer)) if chunkRelativeBlockID >= TasksPerChunk or len( buffer) == 0: while readyForWork.value() < threads: sleep(0.02) for i in range( min( TasksPerChunk, blocksToCompress - startChunkBlockID)): compressedblockSizeList[startChunkBlockID + i] = len( results[i]) f.write(results[i]) results[i] = b"" if len(buffer) == 0: pleaseKillYourself.increment() for i in range(readyForWork.value()): work.put(None) while readyForWork.value() > 0: sleep(0.02) break chunkRelativeBlockID = 0 startChunkBlockID = blockID work.put([ buffer, compressionLevel, compressedblockSizeList, chunkRelativeBlockID ]) blockID += 1 chunkRelativeBlockID += 1 decompressedBytes += len(buffer) bar.update(len(buffer)) f.seek(blocksHeaderFilePos + 24) header = b"" for compressedblockSize in compressedblockSizeList: header += compressedblockSize.to_bytes(4, 'little') f.write(header) f.seek(0, 2) #Seek to end of file. written = f.tell() - start print('compressed %d%% %d -> %d - %s' % (int(written * 100 / nspf.size), decompressedBytes, written, nspf._path)) newNsp.resize(newFileName, written) continue else: print('not packed!') f = newNsp.add(nspf._path, nspf.size) nspf.seek(0) while not nspf.eof(): buffer = nspf.read(CHUNK_SZ) f.write(buffer) newNsp.close() except KeyboardInterrupt: newNsp.close() os.remove(nszPath) raise KeyboardInterrupt except BaseException as e: Print.error(traceback.format_exc()) newNsp.close() os.remove(nszPath) return nszPath