def isUpdateAvailable(self): title = self.title() if self.titleId and title.version != None and self.version < title.version and str( title.version) != '0': return { 'id': title.id, 'baseId': title.baseId, 'currentVersion': self.version, 'newVersion': title.version } if not title.isUpdate and not title.isDLC and Titles.contains( title.updateId): updateFile = self.getUpdateFile() if updateFile: return updateFile.isUpdateAvailable() updateTitle = Titles.get(title.updateId) if updateTitle.version and str(updateTitle.version) != '0': return { 'id': updateTitle.id, 'baseId': title.baseId, 'currentVersion': None, 'newVersion': updateTitle.version } return None
def export(file, cols=[ 'id', 'rightsId', 'key', 'isUpdate', 'isDLC', 'isDemo', 'name', 'baseName', 'version', 'region' ]): initTitles() Titles.export(file, cols)
def downloadThread(i): Print.info('starting thread ' + str(i)) global status while Config.isRunning: try: id = Titles.queue.shift() if id and Titles.contains(id): activeDownloads[i] = 1 t = Titles.get(id) path = CDNSP.download_game(t.id.lower(), t.lastestVersion(), t.key, True, '', True) if os.path.isfile(path): nsp = Fs.Nsp(path, None) nsp.move() Nsps.files[nsp.path] = nsp Nsps.save() status.add() activeDownloads[i] = 0 else: time.sleep(1) except KeyboardInterrupt: pass except BaseException as e: Print.error(str(e)) activeDownloads[i] = 0 Print.info('ending thread ' + str(i))
def importRegion(region='US', language='en'): if not region in Config.regionLanguages( ) or language not in Config.regionLanguages()[region]: Print.error('Could not locate %s/%s !' % (region, language)) return False for region2 in Config.regionLanguages(): for language2 in Config.regionLanguages()[region2]: for nsuId, regionTitle in Titles.data(region2, language2).items(): if not regionTitle.id: continue title = Titles.get(regionTitle.id, None, None) title.importFrom(regionTitle, region2, language2) for region2 in Config.regionLanguages(): for language2 in Config.regionLanguages()[region2]: if language2 != language: continue for nsuId, regionTitle in Titles.data(region2, language2).items(): if not regionTitle.id: continue title = Titles.get(regionTitle.id, None, None) title.importFrom(regionTitle, region2, language2) for nsuId, regionTitle in Titles.data(region, language).items(): if not regionTitle.id: continue title = Titles.get(regionTitle.id, None, None) title.importFrom(regionTitle, region, language) Titles.loadTxtDatabases() Titles.save()
def getScreenshotImage(request, response): if len(request.bits) < 3: return Server.Response404(request, response) id = request.bits[2] try: i = int(request.bits[3]) except: return Server.Response404(request, response) if not Titles.contains(id): return Server.Response404(request, response) path = Titles.get(id).screenshotFile(i) if not path: return Server.Response404(request, response) response.setMime(path) response.headers['Cache-Control'] = 'max-age=31536000' if os.path.isfile(path): with open(path, 'rb') as f: response.write(f.read()) return Server.Response500(request, response)
def scrapeShogun(): initTitles() initFiles() for region in cdn.regions(): cdn.Shogun.scrapeTitles(region) Titles.saveAll()
def updateDb(url): initTitles() Print.info("Downloading new title database " + url) try: if url == '' or not url: return if "http://" not in url and "https://" not in url: try: url = base64.b64decode(url) except Exception as e: Print.info("\nError decoding url: ", e) return r = requests.get(url) r.encoding = 'utf-8-sig' if r.status_code == 200: Titles.loadTitleBuffer(r.text, False) else: Print.info('Error updating database: ', repr(r)) except Exception as e: Print.info('Error downloading:' + str(e)) raise
def downloadAll(wait = True): global activeDownloads global status try: for k,t in Titles.items(): if t.isUpdateAvailable() and not t.retailOnly and (t.isDLC or t.isUpdate or Config.download.base) and (not t.isDLC or Config.download.DLC) and (not t.isDemo or Config.download.demo) and (not t.isUpdate or Config.download.update) and (t.key or Config.download.sansTitleKey) and (len(titleWhitelist) == 0 or t.id in titleWhitelist) and t.id not in titleBlacklist: if not t.id or t.id == '0' * 16 or (t.isUpdate and t.lastestVersion() in [None, '0']): #Print.warning('no valid id? ' + str(t.path)) continue if not t.lastestVersion(): Print.info('Could not get version for ' + str(t.name)) continue Titles.queue.add(t.id) Titles.save() status = Status.create(Titles.queue.size(), 'Total Download') startDownloadThreads() while wait and (not Titles.queue.empty() or sum(activeDownloads) > 0): time.sleep(1) except KeyboardInterrupt: pass except BaseException as e: Print.error(str(e)) if status: status.close()
def move(self): if not self.path: return False if not self.fileName(): #print('could not get filename for ' + self.path) return False if os.path.abspath(self.fileName()).lower() == os.path.abspath( self.path).lower(): return False if os.path.isfile(self.fileName()) and os.path.abspath( self.path) == os.path.abspath(self.fileName()): print('duplicate title: ') print(os.path.abspath(self.path)) print(os.path.abspath(self.fileName())) return False try: os.makedirs(os.path.dirname(self.fileName()), exist_ok=True) newPath = self.fileName() os.rename(self.path, newPath) self.path = newPath except BaseException as e: print('failed to rename file! %s -> %s : %s' % (self.path, self.fileName(), e)) #print(self.path + ' -> ' + self.fileName()) if self.titleId in Titles.keys(): Titles.get(self.titleId).path = self.fileName() return True
def download(id): bits = id.split(',') version = None key = None if len(bits) == 1: id = bits[0].upper() elif len(bits) == 2: id = bits[0].upper() key = bits[1].strip() elif len(bits) == 3: id = bits[0].upper() key = bits[1].strip() version = bits[2].strip() else: Print.info('invalid args: ' + download) return False if key == '': key = None if version == '': version = None if len(id) != 16: raise IOError('Invalid title id format') if Titles.contains(id): title = Titles.get(id) CDNSP.download_game(title.id.lower(), version or title.lastestVersion(), key or title.key, True, '', True) else: CDNSP.download_game(id.lower(), version or Title.getCdnVersion(id.lower()), key, True, '', True) return True
def getTitleImage(request, response): if len(request.bits) < 3: return Server.Response404(request, response) id = request.bits[2] try: width = int(request.bits[3]) except: return Server.Response404(request, response) if width < 32 or width > 1024: return Server.Response404(request, response) if not Titles.contains(id): return Server.Response404(request, response) path = Titles.get(id).iconFile(width) or Titles.get(id).frontBoxArtFile( width) if not path: return Server.Response404(request, response) response.setMime(path) response.headers['Cache-Control'] = 'max-age=31536000' if os.path.isfile(path): with open(path, 'rb') as f: response.write(f.read()) return Server.Response500(request, response)
def genTinfoilTitles(): initTitles() initFiles() for region, languages in Config.regionLanguages().items(): for language in languages: importRegion(region, language) Titles.save('titledb/titles.%s.%s.json' % (region, language))
def scrapeLangTitles(region='US', language='en', shop_id=4): Print.info('Scraping %s %s' % (region, language)) pageSize = 50 offset = 0 total = 1 c = 0 while offset < total: url = 'https://bugyo.hac.%s.eshop.nintendo.net/shogun/v1/titles?shop_id=%d&lang=%s&country=%s&sort=new&limit=%d&offset=%d' % ( Config.cdn.environment, shop_id, language, region, pageSize, offset) #print(url) j = makeJsonRequest( 'GET', url, {}, '%d/%s/%s/titles/index/%d-%d.json' % (shop_id, language, region, pageSize, offset)) if not j: break total = int(j['total']) try: for i in j['contents']: title = Titles.getNsuid(i['id'], region, language) n = getTitleByNsuid(i['id'], region, language) title.parseShogunJson(n) try: if n and "applications" in n and len( n["applications"]) > 0: titleId = n["applications"][0]["id"].upper() if titleId: title.setId(titleId) for x in cdn.Superfly.getAddOns(titleId): getNsuIds(x, 'aoc', region, language) scrapeDlc(i['id'], region, language) else: print('Could not get title json!') else: #print('no title id found in json!') pass except Exception as e: print(str(e)) raise pass except Exception as e: print(str(e)) raise break offset = offset + len(j['contents']) Titles.saveRegion(region, language)
def get_name(titleId): titleId = titleId.upper() if Titles.contains(titleId): try: t = Titles.get(titleId) return (re.sub(r'[/\\:*?!"|???]+', "", unidecode.unidecode(t.name.strip())))[:70] except: pass return 'Unknown Title'
def scan(): global hasScanned if hasScanned: return hasScanned = True initTitles() initFiles() Nsps.scan(Config.paths.scan) Titles.save()
def genTinfoilTitles(): initTitles() initFiles() for region, languages in Config.regionLanguages().items(): for language in languages: importRegion(region, language) Titles.save('titledb/titles.%s.%s.json' % (region, language)) #Print.info('%s - %s' % (region, language)) scanLatestTitleUpdates() export('titledb/versions.txt', ['id', 'version'])
def title(self): if not self.titleId: raise IOError('NSP no titleId set') if self.titleId in Titles.keys(): return Titles.get(self.titleId) t = Title.Title() t.setId(self.titleId) Titles.data()[self.titleId] = t return t
def scrapeThread(id, delta=True): size = len(Titles.titles) // scrapeThreads st = Status.create(size, 'Thread ' + str(id)) for i, titleId in enumerate(Titles.titles.keys()): try: if (i - id) % scrapeThreads == 0: Titles.get(titleId).scrape(delta) st.add() except BaseException as e: Print.error(str(e)) st.close()
def get_name(titleId): titleId = titleId.upper() lines = titlekey_list if Titles.contains(titleId): try: t = Titles.get(titleId) return re.sub(r'[/\\:*?!"|™©®]+', "", unidecode.unidecode(t.name.strip())) except: pass return 'Unknown Title'
def unlock(self): #if not self.isOpen(): # self.open('r+b') if not Titles.contains(self.titleId): raise IOError('No title key found in database!') self.ticket().setTitleKeyBlock(int(Titles.get(self.titleId).key, 16)) Print.info('setting title key to ' + Titles.get(self.titleId).key) self.ticket().flush() self.close() self.hasValidTicket = True self.move()
def initTitles(): global isInitTitles if isInitTitles: return isInitTitles = True Titles.load() loadTitleWhitelist() loadTitleBlacklist() Nsps.load()
def refresh(): initTitles() initFiles() for k, f in Nsps.files.items(): try: f.open() f.readMeta() f.close() except: raise pass Titles.save()
def getFiles(request, response): r = {} for path, nsp in Nsps.files.items(): if Titles.contains(nsp.titleId): title = Titles.get(nsp.titleId) if not title.baseId in r: r[title.baseId] = {'base': [], 'dlc': [], 'update': []} if title.isDLC: r[title.baseId]['dlc'].append(nsp.dict()) elif title.isUpdate: r[title.baseId]['update'].append(nsp.dict()) else: r[title.baseId]['base'].append(nsp.dict()) response.write(json.dumps(r))
def scan(): global hasScanned #if hasScanned: # return hasScanned = True initTitles() initFiles() refreshRegions() importRegion(Config.region, Config.language) r = Nsps.scan(Config.paths.scan) Titles.save() return r
def getFiles(): for k, t in Titles.items(): f = t.getLatestFile() if f and f.hasValidTicket: o.append({'id': t.id, 'name': t.name, 'version': int(f.version) if f.version else None , 'size': f.getFileSize(), 'mtime': f.getFileModified() }) return json.dumps(o)
def organize(): initTitles() initFiles() #scan() Print.info('organizing') for k, f in Nsps.files.items(): #print('moving ' + f.path) #Print.info(str(f.hasValidTicket) +' = ' + f.path) f.move() for id, t in Titles.data().items(): files = t.getFiles() if len(files) > 1: #Print.info("%d - %s - %s" % (len(files), t.id, t.name)) latest = t.getLatestFile() if not latest: continue for f in files: if f.path != latest.path: f.moveDupe() Print.info('removing empty directories') Nsps.removeEmptyDir('.', False) Nsps.save()
def getSearch(request, response): o = [] region = request.query.get('region') publisher = request.query.get('publisher') dlc = request.query.get('dlc') if dlc: dlc = int(dlc[0]) update = request.query.get('update') if update: update = int(update[0]) demo = request.query.get('demo') if demo: demo = int(demo[0]) for k, t in Titles.items(): f = t.getLatestFile() if f and f.hasValidTicket and ( region == None or t.region in region) and (dlc == None or t.isDLC == dlc) and ( update == None or t.isUpdate == update) and (demo == None or t.isDemo == demo) and ( publisher == None or t.publisher in publisher): o.append({ 'id': t.id, 'name': t.name, 'version': int(f.version) if f.version else None, 'region': t.region, 'size': f.getFileSize(), 'mtime': f.getFileModified() }) response.write(json.dumps(o))
def getNsuIds(titleIds, type='title', region = 'US', language = 'en', shop_id = 4): j = ids(titleIds, type, region, language, shop_id) lst = {} try: for i in j['id_pairs']: titleId = i['title_id'].upper() nsuId = int(i['id']) lst[titleId] = nsuId title = Titles.getNsuid(nsuId, region, language) title.setId(titleId) try: pass if title.isDLC: title.parseShogunJson(getDlcByNsuid(nsuId, region, language), region, language, True) elif not title.isUpdate: title.parseShogunJson(getTitleByNsuid(nsuId, region, language), region, language, True) except: Print.error(str(e)) pass except BaseException as e: Print.error(str(e)) return lst
def exportKeys(fileName): initTitles() with open(fileName, 'w') as f: f.write('id|key|version\n') for tid,title in Titles.items(): if title and title.rightsId and title.key: f.write(str(title.rightsId) + '|' + str(title.key) + '|' + str(title.version) + '\n')
def exportVerifiedKeys(fileName): initTitles() with open(fileName, 'w') as f: f.write('id|key|version\n') for tid,key in blockchain.blockchain.export().items(): title = Titles.get(tid) if title and title.rightsId: f.write(str(title.rightsId) + '|' + str(key) + '|' + str(title.version) + '\n')
def got_words(target): words = [] for t in twt_get(target)['statuses']: text = t['text'] #print(text) words = words + Titles.get_titles(text) words = sorted(words) #print(words) return words
def got_words(sample): words = [] for s in sample: while True: twt = twt_get(s) if len(twt) != 0: break print("#### Waiting 15 minutes to recover access limit ####") wait_m = 5 time.sleep(wait_m * 60) for t in twt['statuses']: #print(t) text = t['text'] #print(text) words = words + Titles.get_titles(text) words = sorted(words) #print(words) return words
#start = "有川浩" start = "読売新聞" #start = "高橋" #start = "僕は友達が少ない" target = "井上麻里奈" #text = Text.get_wiki(start)[2] ## [id,title,text] #words = Titles.get_titles(text) #print(words) routes = [] result = ["### ERROR ###"] routes.append([start]) target_text = Text.get_wiki(target)[2] ## [id,title,text] target_words = list(set(Titles.get_titles(target_text))) f_name = target + "(text).txt" f = open(f_name, 'w') # 書き込みモードで開く f.write(target_text) # 引数の文字列をファイルに書き込む f.close() # ファイルを閉じる f_name = target + "(words).txt" f = open(f_name, 'w') # 書き込みモードで開く f.write("\n".join(target_words)) # 引数の文字列をファイルに書き込む f.close() # ファイルを閉じる print(target_text) print("================================================================\n") print(target_words)