def exportKeys(fileName): nut.initTitles() with open(fileName, 'w') as f: f.write('id|key|version\n') for tid, title in Titles.items(): if title and title.rightsId and title.key and title.isActive(): f.write(str(title.rightsId) + '|' + str(title.key) + '|' + str(title.version) + '\n')
def organize(): nut.initTitles() nut.initFiles() #scan() Print.info('organizing') for k, f in Nsps.files.items(): #print('moving ' + f.path) #Print.info(str(f.hasValidTicket) +' = ' + f.path) f.move() for id, t in Titles.data().items(): files = t.getFiles() if len(files) > 1: #Print.info("%d - %s - %s" % (len(files), t.id, t.name)) latest = t.getLatestFile() if not latest: continue for f in files: if f.path != latest.path: f.moveDupe() Print.info('removing empty directories') Nsps.removeEmptyDir('.', False) Nsps.save()
def post(args): if not args.import_title_keys: return nut.initTitles() nut.initFiles() with open(args.import_title_keys, 'r') as f: for line in f.read().split('\n'): if '=' not in line: continue try: rightsId, key = line.split('=') rightsId = rightsId.strip() titleId = rightsId[0:16] key = key.strip() title = Titles.get(titleId) nsp = title.getLatestNsp() nsz = title.getLatestNsz() print(nsp) if not nsp and not nsz: Print.info('title import: new title detected: %s - %s' % (title.id, title.name)) elif not title.key: Print.info( 'title import: new title key detected: %s - %s' % (title.id, title.name)) title.rightsId = rightsId title.key = key except: raise Titles.save()
def export(file, cols=[ 'id', 'rightsId', 'key', 'isUpdate', 'isDLC', 'isDemo', 'baseName', 'name', 'version', 'region' ]): nut.initTitles() Titles.export(file, cols)
def logNcaDeltas(file): nut.initTitles() nut.initFiles() x = open(file, "w", encoding="utf-8-sig") for k, f in Nsps.files.items(): try: t = f.title() if (t.isDLC or t.isUpdate or Config.download.base) and ( not t.isDLC or Config.download.DLC) and ( not t.isDemo or Config.download.demo) and ( not t.isUpdate or Config.download.update) and ( t.key or Config.download.sansTitleKey) and ( len(Config.titleWhitelist) == 0 or t.id in Config.titleWhitelist ) and t.id not in Config.titleBlacklist: f.open(f.path) if f.hasDeltas(): Print.info(f.path) x.write(f.path + "\r\n") f.close() except KeyboardInterrupt: raise except BaseException as e: Print.info('error: ' + str(e)) x.close()
def scrapeShogunThreaded(): nut.initTitles() nut.initFiles() scrapeThreads = [] numThreads = 8 q = queue.Queue() for region in cdn.regions(): q.put(region) for i in range(numThreads): t = threading.Thread(target=scrapeShogunWorker, args=[q]) t.daemon = True t.start() scrapeThreads.append(t) q.join() for i in range(numThreads): q.put(None) for t in scrapeThreads: t.join() Titles.saveAll()
def matchDemos(): nut.initTitles() nut.initFiles() orphans = {} Titles.loadTxtDatabases() for nsuId, titleId in Titles.nsuIdMap.items(): for region, languages in Config.regionLanguages().items(): for language in languages: if nsuId: title = Titles.get(str(nsuId), region, language) title.id = titleId for region, languages in Config.regionLanguages().items(): for language in languages: for nsuId, rt in Titles.data(region, language).items(): if rt.id: continue orphans[nsuId] = rt.name Titles.saveRegion(region, language) for nsuId, name in orphans.items(): print(str(nsuId) + '|' + str(name))
def scrapeShogun(): nut.initTitles() nut.initFiles() for region in cdn.regions(): cdn.Shogun.scrapeTitles(region) Titles.saveAll()
def scanLatestTitleUpdates(): nut.initTitles() nut.initFiles() for k, i in CDNSP.get_versionUpdates().items(): id = str(k).upper() version = str(i) if not Titles.contains(id): if len(id) != 16: Print.info('invalid title id: ' + id) continue continue t = Title() t.setId(id) Titles.set(id, t) Print.info('Found new title id: ' + str(id)) t = Titles.get(id) if str(t.version) != str(version): Print.info('new version detected for %s[%s] v%s' % (t.name or '', t.id or ('0' * 16), str(version))) t.setVersion(version, True) Titles.save()
def unlockAll(copy=False): nut.initTitles() nut.initFiles() files = [] for k, f in Nsps.files.items(): files.append(f) for f in files: try: if f.isUnlockable() and f.title().isActive(): if f.title().getLatestNsp() is not None or f.title( ).getLatestNsz() is not None: Print.info('unlocked file arleady exists, skipping ' + str(f.path)) f.open(getUnlockPath(f.path, copy), 'r+b') if not f.verifyKey(f.titleId, f.title().key): raise IOError('Could not verify title key! %s / %s - %s' % (f.titleId, f.title().key, f.title().name)) continue Print.info('unlocking ' + str(f.path)) f.unlock() f.close() except BaseException as e: Print.info('error unlocking: ' + str(e)) traceback.print_exc(file=sys.stdout)
def run(): urllib3.disable_warnings() print(' ,;:;;,') print(' ;;;;;') print(' .=\', ;:;;:,') print(' /_\', "=. \';:;:;') print(' @=:__, \\,;:;:\'') print(' _(\\.= ;:;;\'') print(' `"_( _/="`') print(' `"\'') nut.initTitles() nut.initFiles() app = QApplication(sys.argv) app.setWindowIcon(QIcon('images/logo.jpg')) ex = App() threads = [] threads.append(threading.Thread(target=initThread, args=[ex])) threads.append(threading.Thread(target=usbThread, args=[])) threads.append(threading.Thread(target=nutThread, args=[])) for t in threads: t.start() sys.exit(app.exec_()) print('fin')
def exportVerifiedKeys(fileName): nut.initTitles() with open(fileName, 'w') as f: f.write('id|key|version\n') for tid, key in blockchain.blockchain.export().items(): title = Titles.get(tid) if title and title.rightsId: f.write(str(title.rightsId) + '|' + str(key) + '|' + str(title.version) + '\n')
def compressionStats(): nut.initTitles() nut.initFiles() results = {} i = 0 sum = 0 for k, t in Titles.items(): try: if not t.isActive(skipKeyCheck=True): continue lastestNsz = t.getLatestNsz() if not lastestNsz: continue lastestNsp = t.getLatestNsp(lastestNsz.version) if not lastestNsp: continue nspSize = lastestNsp.getFileSize() nszSize = lastestNsz.getFileSize() if nspSize > 0 and nszSize > 0: cr = nszSize / nspSize if t.isDLC: type = 'DLC' elif t.isUpdate: type = 'UPD' else: type = 'BASE' results[k] = {'id': k, 'name': cleanCsv(t.name), 'publisher': cleanCsv(t.publisher), 'type': type, 'nsp': nspSize, 'nsz': nszSize, 'cr': cr} i += 1 sum += cr except BaseException as e: Print.info(str(e)) if i == 0: Print.info('No data found') return Print.info('files: %d average compression ratio: %.2f' % (i, sum / i)) path = 'compression.stats.csv' with open(path, 'w', encoding='utf8') as f: f.write('title id,name,publisher,type,nsp,nsz,cr\n') for id, data in results.items(): f.write('%s,%s,%s,%s,%d,%d,%.2f\n' % (data['id'], data['name'], data['publisher'], data['type'], data['nsp'], data['nsz'], data['cr'])) Print.info('saved compression stats to %s' % path)
def genTinfoilTitles(): nut.initTitles() nut.initFiles() for region, languages in Config.regionLanguages().items(): for language in languages: nut.importRegion(region, language) Titles.save('titledb/titles.%s.%s.json' % (region, language), False) #Print.info('%s - %s' % (region, language)) nut.scanLatestTitleUpdates() nut.export('titledb/versions.txt', ['id', 'rightsId', 'version'])
def genTinfoilTitles(): nut.initTitles(verify=False) nut.initFiles(verify=False) nut.refreshRegions(False) for region, languages in Config.regionLanguages().items(): for language in languages: nut.importRegion(region, language, save=False) Titles.save('titledb/titles.%s.%s.json' % (region, language), False) #Print.info('%s - %s' % (region, language)) nut.importRegion() exit(0)
def unlockAll(): nut.initTitles() nut.initFiles() for k, f in Nsps.files.items(): if f.isUnlockable(): try: if not blockchain.verifyKey(f.titleId, f.title().key): raise IOError('Could not verify title key! %s / %s - %s' % (f.titleId, f.title().key, f.title().name)) continue Print.info('unlocking ' + f.path) f.open(f.path, 'r+b') f.unlock() f.close() except BaseException as e: Print.info('error unlocking: ' + str(e))
def startDownloadThreads(): global downloadThreadsStarted global activeDownloads if downloadThreadsStarted: return downloadThreadsStarted = True nut.initTitles() nut.initFiles() threads = [] for i in range(Config.threads): activeDownloads.append(0) t = threading.Thread(target=downloadThread, args=[i]) t.daemon = True t.start() threads.append(t)
def getName(titleId, version, key=None, path=None): nut.initTitles() nut.initFiles() titleId = titleId.upper() nsp = Nsp() if path: nsp.setPath(os.path.basename(path)) nsp.titleId = titleId nsp.version = version nsp.hasValidTicket = True if path: filename, ext = os.path.splitext(path) else: ext = '.nsp' return os.path.join(Config.paths.nspOut, os.path.basename(nsp.fileName() or ('Untitled [%s][v%d]%s' % (titleId, int(version or 0), ext))))
def downloadAll(wait=True): nut.initTitles() nut.initFiles() global activeDownloads global status try: for k, t in Titles.items(): if t.isUpdateAvailable() and ( t.isDLC or t.isUpdate or Config.download.base) and ( not t.isDLC or Config.download.DLC) and ( not t.isDemo or Config.download.demo) and ( not t.isUpdate or Config.download.update) and ( t.key or Config.download.sansTitleKey ) and (len(Config.titleWhitelist) == 0 or t.id in Config.titleWhitelist ) and t.id not in Config.titleBlacklist: if not t.id or t.id == '0' * 16 or ( t.isUpdate and t.lastestVersion() in [None, '0']): #Print.warning('no valid id? ' + str(t.path)) continue if not t.lastestVersion(): Print.info('Could not get version for ' + str(t.name) + ' [' + str(t.id) + ']') continue Titles.queue.add(t.id) Titles.save() status = Status.create(Titles.queue.size(), 'Total Download') startDownloadThreads() while wait and (not Titles.queue.empty() or sum(activeDownloads) > 0): time.sleep(1) except KeyboardInterrupt: pass except BaseException as e: Print.error(str(e)) if status: status.close()
def logMissingTitles(file): nut.initTitles() nut.initFiles() f = open(file, "w", encoding="utf-8-sig") for k, t in Titles.items(): if t.isUpdateAvailable() and ( t.isDLC or t.isUpdate or Config.download.base) and ( not t.isDLC or Config.download.DLC) and ( not t.isDemo or Config.download.demo) and ( not t.isUpdate or Config.download.update) and ( t.key or Config.download.sansTitleKey) and ( len( Config.titleWhitelist) == 0 or t.id in Config.titleWhitelist) and t.id not in Config.titleBlacklist: if not t.id or t.id == '0' * 16 or (t.isUpdate and t.lastestVersion() in [None, '0']): continue f.write((t.id or ('0'*16)) + '|' + (t.key or ('0'*32)) + '|' + (t.name or '') + "\r\n") f.close()
def updateVersions(force=True): nut.initTitles() nut.initFiles() i = 0 for k, t in Titles.items(): if force or t.version == None: if (t.isDLC or t.isUpdate or Config.download.base) and ( not t.isDLC or Config.download.DLC) and ( not t.isDemo or Config.download.demo) and ( not t.isUpdate or Config.download.update) and ( t.key or Config.download.sansTitleKey) and ( len(Config.titleWhitelist) == 0 or t.id in Config.titleWhitelist ) and t.id not in Config.titleBlacklist: v = t.lastestVersion(True) Print.info("%s[%s] v = %s" % (str(t.name), str(t.id), str(v))) i = i + 1 if i % 20 == 0: Titles.save() for t in list(Titles.data().values()): if not t.isUpdate and not t.isDLC and t.updateId and t.updateId and not Titles.contains( t.updateId): u = Title() u.setId(t.updateId) if u.lastestVersion(): Titles.set(t.updateId, u) Print.info("%s[%s] FOUND" % (str(t.name), str(u.id))) i = i + 1 if i % 20 == 0: Titles.save() Titles.save()
def refresh(titleRightsOnly=False): nut.initTitles() nut.initFiles() i = 0 for k, f in Nsps.files.items(): try: if titleRightsOnly: title = Titles.get(f.titleId) if title and title.rightsId and (title.key or f.path.endswith('.nsx')): continue i = i + 1 print(f.path) f.open() f.readMeta() f.close() if i > 20: i = 0 Titles.save() except BaseException as e: print('exception: ' + str(e)) pass Titles.save()
def exportNcaMap(path): nut.initTitles() nut.initFiles() map = {} i = 0 for id, title in Titles.items(): print(id) try: nsp = title.getLatestFile() if not nsp: continue nsp.open(args.info, 'r+b') map[id] = {} map[id]['version'] = int(title.version) map[id]['files'] = [] for f in nsp: if isinstance(f, Fs.Nca): map[id]['files'].append(f._path) i += 1 if i > 100: i = 0 with open(path, 'w') as outfile: json.dump(map, outfile, indent=4) except BaseException as e: Print.error(str(e)) with open(path, 'w') as outfile: json.dump(map, outfile, indent=4)
def updateDb(url, c=0): nut.initTitles() c += 1 if c > 3: return False Print.info("Downloading new title database " + url) try: if url == '' or not url: return if "http://" not in url and "https://" not in url: try: url = base64.b64decode(url) except Exception as e: Print.info("\nError decoding url: ", e) return r = requests.get(url) r.encoding = 'utf-8-sig' if r.status_code == 200: try: m = re.search(r'<a href="([^"]*)">Proceed</a>', r.text) if m: return updateDb(m.group(1), c) except: pass Titles.loadTitleBuffer(r.text, False) else: Print.info('Error updating database: ', repr(r)) except Exception as e: Print.info('Error downloading:' + str(e)) raise
except BaseException as e: Print.error(str(e)) raise if args.info: f = Fs.factory(args.info) f.open(args.info, 'r+b') f.printInfo(args.depth+1) if args.verify_ncas: nut.initTitles() nut.initFiles() f = Fs.factory(args.verify_ncas) f.open(args.verify_ncas, 'r+b') if not f.verify(): Print.error('Archive is INVALID: %s' % args.verify_ncas) else: Print.error('Archive is VALID: %s' % args.verify_ncas) f.close() if not Titles.contains(args.scrape_title): Print.error('Could not find title ' + args.scrape_title) else: Titles.get(args.scrape_title).scrape(False) Titles.save() pprint.pprint(Titles.get(args.scrape_title).__dict__)