def get(self, args): """ Prints the config setting for the specified var. Parameters ---------- args : argparse.Namespace namespace object as passed from argument parser Returns ------- int success code """ console('Config setting "%s" is currently set to:\n%s' % (args.variable, self.config.option[args.variable])) return 0
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Set this up at the earlist so no processing time is wasted if not self.parsed_args.user or not self.parsed_args.password: raise ValueError( 'User login to target wiki is required for this operation.') self.site_english = mwclient.Site(WIKIS['English'], path='/', scheme='https') self.site_english.login(self.parsed_args.en_user, self.parsed_args.en_password) self.site_other = mwclient.Site(WIKIS[self.lang], path='/', scheme='https') self.site_other.login(self.parsed_args.user, self.parsed_args.password) if self.lang == 'English': raise ValueError('Can\'t export unique items TO English wiki') self.rr_english = RelationalReader( path_or_ggpk=self.base_path, raise_error_on_missing_relation=False, read_options={ 'use_dat_value': False, 'auto_build_index': True, }, language='English') console('Creating lookup cache...') self.words = [] for row in self.rr_english['Words.dat']: if row['WordlistsKey'] == WORDLISTS.UNIQUE_ITEM: self.words.append(row) self.cache = defaultdict(BaseItemCacheInstance) for row in self.rr_english['BaseItemTypes.dat']: self.cache[row['ItemClassesKey']['Id']].append(row) self.cache[row['ItemClassesKey']['Id']].index['Name'][ row['Name']].append(row)
def fuzzy_find_text(self, text, file_name, key, source_list=None, fuzzy_func=fuzz.partial_ratio): text = text.strip() if source_list is None: source_list = self.rr_english[file_name] # Try faster indexed search first and see if we get any perfect results if key not in self.rr_english[file_name].index: self.rr_english[file_name].build_index(key) results = self.rr_english[file_name].index[key][text] if len(results) == 1: return self.rr[file_name][results[0].rowid][key] # Try to find translation for the name using fuzzy search results = [] for row in source_list: ratio = fuzzy_func(row[key], text) if ratio > 90: results.append({ 'id': row.rowid, 'text': row[key], 'ratio': ratio, }) if len(results) == 0: console('No matching text found.') text = input('Enter translated text.\n') if text == '': console('No text specified - skipping search for "%s".' % text) return return text elif len(results) >= 2: console('Multiple matching names found.\n') for i, row in enumerate(results): row['i'] = i console('%(i)s: %(ratio)s\n%(text)s\n----------------' % row) try: correct = results[int( input('Enter index of correct translation:\n'))] except Exception as e: traceback.print_exc() return self.rr[file_name][correct['id']][key] else: return self.rr[file_name][results[0]['id']][key]
def set(self, args): """ Sets the specified config setting to the specified value. Parameters ---------- args : argparse.Namespace namespace object as passed from argument parser Returns ------- int success code """ try: self.config.set_option(args.variable, args.value) except ValidateError as e: return self._show_error(e) self.config.write() console('Config setting "%s" has been set to:\n%s' % (args.variable, args.value)) if self.config.needs_setup( args.variable) and not self.config.is_setup(args.variable): console('', raw=True) console('Variable needs setup. Please run:\nsetup perform') return 0
def _read_dat_files(self, args, prefix=''): path = get_content_ggpk_path() console(prefix + 'Reading "%s"...' % path) ggpk = get_content_ggpk(path) console(prefix + 'Reading .dat files') dat_files = {} ggpk_data = ggpk['Data'] lang = args.language or config.get_option('language') if lang != 'English': ggpk_data = ggpk_data[lang] remove = [] for name in tqdm(args.files): try: node = ggpk_data[name] except FileNotFoundError: console('Skipping "%s" (missing)' % name, msg=Msg.warning) remove.append(name) continue df = dat.DatFile(name) df.read(file_path_or_raw=node.record.extract(), use_dat_value=False) dat_files[name] = df for file_name in remove: args.files.remove(file_name) return dat_files
def export(self, parsed_args, monsters): r = ExporterResult() if not monsters: console( 'No monsters found for the specified parameters. Quitting.', msg=Msg.warning, ) return r console('Found %s monsters, parsing...' % len(monsters)) console('Accessing additional data...') for monster in monsters: data = OrderedDict() for row_key, copy_data in self._COPY_KEYS.items(): value = monster[row_key] condition = copy_data.get('condition') if condition is not None and not condition(monster): continue fmt = copy_data.get('format') if fmt: value = fmt(value) if value: data[copy_data['template']] = value cond = MonsterWikiCondition( data=data, cmdargs=parsed_args, ) r.add_result( text=cond, out_file='monster_%s.txt' % data['metadata_id'].replace( '/', '_'), wiki_page=[ { 'page': 'Monster:' + self._format_wiki_title(data['metadata_id']), 'condition': cond, }, ], wiki_message='Monster updater', ) return r
def get_option(self, key, safe=True): """ Returns the handled value for the specified key from the config. If the safe option is specified the function will check if any setups for the specified key need to be formed and raises an Error if the setup is pending. If False this behaviour is disabled .. warning:: if the specified key is missing this method will shutdown the CLI with an error message to configure the key Parameters ---------- key : str key to retrieve the value for safe : bool whether to check setup is needed Returns ------- object handled value Raises ------ SetupError if the setup for the key was not performed """ if safe and key in self.setup: if not self.setup[key]['performed']: raise SetupError('Setup not performed.') try: value = self.option[key] except KeyError: console( 'Config variable "%s" is not configured. Consider running:' % key, msg=Msg.error) console('config set "%s" "<value>"' % key, msg=Msg.error) console('Exiting...', msg=Msg.error) sys.exit(-1) return self.validator.check(self.optionspec[key], value)
def handle(self, *a, mwclient, result, cmdargs, parser): # First row is handled separately to prompt the user for his password url = WIKIS.get(config.get_option('language')) if url is None: console( 'There is no wiki defined for language "%s"' % cmdargs.language, msg=Msg.error ) return self.site = mwclient.Site( url, path='/', scheme='https' ) self.site.login( username=cmdargs.user or input('Enter your gamepedia user name:\n'), password=cmdargs.password or input( 'Please enter your password for the specified user\n' 'WARNING: Password will be visible in console\n' ), ) self.mwclient = mwclient self.cmdargs = cmdargs self.parser = parser if cmdargs.wiki_threads > 1: console('Starting thread pool...') tp = ThreadPoolExecutor(max_workers=cmdargs.wiki_threads) for row in result: tp.submit( self._error_catcher, row=row, ) tp.shutdown(wait=True) else: console('Editing pages...') for row in result: self._error_catcher(row=row) time.sleep(cmdargs.wiki_sleep)
def _read_dat_files(self, args, prefix=''): path = get_content_path() console(prefix + 'Loading file system...') file_system = FileSystem(root_path=path) console(prefix + 'Reading .dat files') dat_files = {} lang = args.language or config.get_option('language') dir_path = "Data/" if lang != 'English': #ggpk_data = index.get_dir_record("Data/%s" % lang) dir_path = "Data/%s/" % lang remove = [] for name in tqdm(args.files): file_path = dir_path + name try: data = file_system.get_file(file_path) except FileNotFoundError: console('Skipping "%s" (missing)' % file_path, msg=Msg.warning) remove.append(name) continue df = dat.DatFile(name) try: df.read(file_path_or_raw=data, use_dat_value=False) except Exception: print(name, traceback.format_exc()) remove.append(name) dat_files[name] = df for file_name in remove: args.files.remove(file_name) return dat_files
def graph(self, parsed_args, **kwargs): if parsed_args.type == 'map': dat_file = self.rr['WarbandsMapGraph.dat'] out_file = 'warbands_map_graph.cv' elif parsed_args.type == 'normal': dat_file = self.rr['WarbandsGraph.dat'] out_file = 'warbands_graph.cv' console('Creating Graph...') dot = Digraph(comment='Warbands Graph', engine='dot', format=parsed_args.format) for row in dat_file: world_area = row['WorldAreasKey'] dot.node(str(row.rowid), world_area['Name']) for node in row['Connections']: dot.edge(str(row.rowid), str(node)) out_path = os.path.join(kwargs['out_dir'], out_file) console('Writing graph to "%s"...' % out_path) dot.render(out_path, view=parsed_args.print) console('Done.') return 0
def copy(self, pn): console('Processing %s' % pn) page = self.site_english.pages[pn] if not page.exists: raise Exception('Page %s not found' % pn) mwtext = mwparserfromhell.parse(page.text()) for mwtemplate in mwtext.filter_templates(): if mwtemplate.name.strip().lower() == 'item': break else: raise Exception('Item template not found') console('Finding flavour text...') if not mwtemplate.has('flavour_text_id') and mwtemplate.has( 'flavour_text'): console( 'Missing flavour_text_id. Trying to find flavour text in FlavourText.dat' ) ftext = self.fuzzy_find_text( mwtemplate.get('flavour_text'), 'FlavourText.dat', 'Text', fuzzy_func=fuzz.partial_token_set_ratio) results = [] for row in self.rr_english['FlavourText.dat']: ratio = fuzz.partial_token_set_ratio(row['Text'], ftext) if ratio > 90: results.append({ 'id': row['Id'], 'text': row['Text'], 'ratio': ratio, }) if len(results) == 0: console('No matching flavour text found.') text = input( 'Enter translated flavour text. Type None to skip item entirely.\n' ) if text == 'None': console('Skipping item %s.' % pn) return mwtemplate.get('flavour_text').value = text elif len(results) >= 2: console('Multiple matching flavour text entries found.\n') for i, row in enumerate(results): row['i'] = i console( '%(i)s %(id)s: %(ratio)s\n%(text)s\n----------------' % row) try: correct = results[int( input('Enter index of correct translation.'))] except Exception as e: traceback.print_exc() mwtemplate.get('flavour_text_id').value = correct['id'] else: mwtemplate.get('flavour_text_id').value = results[0]['id'] # Grab flavour text from other language if mwtemplate.has('flavour_text_id'): mwtemplate.get('flavour_text').value = ' %s\n' % self.rr[ 'FlavourText.dat'].index['Id'][mwtemplate.get( 'flavour_text_id').value.strip()]['Text'].replace( '\r', '').replace('\n', '<br>') # Need this for multiple things name = mwtemplate.get('name').value.strip() # Add inventory icon so it shows up correctly if not mwtemplate.has('inventory_icon'): mwtemplate.add('{0: <40}'.format('inventory_icon'), name) # Find translated item name console('Finding item name...') new = self.fuzzy_find_text(name, 'Words.dat', 'Text2', source_list=self.words) if new is None: console('Didn\'t get an english name for this item, skipping.') return mwtemplate.get('name').value = ' %s\n' % new # Find the correct name of the base item console('Finding base item...') if mwtemplate.has('base_item_id'): # TODO pass elif mwtemplate.has('base_item_page'): # TODO pass elif mwtemplate.has('base_item'): base = self.fuzzy_find_text(mwtemplate.get('base_item'), 'BaseItemTypes.dat', 'Name', source_list=self.cache[mwtemplate.get( 'class_id').value.strip()]) if base is None: console('Base item is required for unique items. Skipping.') return mwtemplate.get('base_item').value = ' %s\n' % base if self.parsed_args.copy_upgraded_from: # TODO pass else: # Need to copy the list or it won't be deleted properly as it deletes from itself during iteration for mwparam in list(mwtemplate.params): pname = mwparam.name.strip() if pname.startswith('upgraded_from'): mwtemplate.remove(mwparam.name) elif pname in ('class'): mwtemplate.remove(mwparam.name) if mwtemplate.has('drop_text'): console('Drop text might need a translation. Current text:\n\n%s' % mwtemplate.get('drop_text').value.strip()) text = input('\nNew text (leave empty to copy old):\n') if text: mwtemplate.get('drop_text').value = ' %s\n' % text console('Saving on other wiki...') if pn == name: page = self.site_other.pages[new] else: console( 'Name of page doesn\'t equal item name. \nOld: %s\nItem:%s' % (pn, new)) cont = True while cont: t = '%s (%s)' % (new, input('Enter phrase for parenthesis:\n')) console('Is this correct?:\n%s' % t) cont = input('y/n?\n') != 'y' page = self.site_other.pages[t] page.save('%s\n\n[[en:%s]]' % (str(mwtemplate), pn)) console('Done.')
def handle(self, args): super().handle(args) dict_spec = args.spec.as_dict() with open(args.target, mode='w', encoding='ascii' if args.ascii else 'utf-8') as f: dat_files = self._read_dat_files(args) console('Building data object...') out = [] for file_name in args.files: dat_file = dat_files[file_name] header = [ dict({ 'name': name, 'rowid': index }, **props) for index, (name, props) in enumerate( dict_spec[file_name]['fields'].items()) ] virtual_header = [ dict({ 'name': name, 'rowid': index }, **props) for index, (name, props) in enumerate( dict_spec[file_name]['virtual_fields'].items()) ] if args.use_object_format: out_obj = { 'filename': file_name, 'header': {row['name']: row for row in header}, 'data': [{ cid: row[i] for i, cid in enumerate( dat_file.reader.columns_data) } for row in dat_file.reader.table_data], } virtual_header = ({ row['name']: row for row in virtual_header }) else: out_obj = { 'filename': file_name, 'header': header, 'data': dat_file.reader.table_data, } if args.include_virtual_fields: out_obj['virtual_header'] = virtual_header if args.include_record_length: out_obj['record_length'] = dat_files[ file_name].reader.table_record_length out.append(out_obj) console('Dumping data to "%s"...' % args.target) dump(out, f, ensure_ascii=args.ascii, indent=4) console('Done.')
def print_all(self, args): """ Prints all currently registered config variables. Parameters ---------- args : argparse.Namespace namespace object as passed from argument parser Returns ------- int success code """ spec = set(self.config.optionspec.keys()) real = set(self.config.option.keys()) missing = spec.difference(real) extra = real.difference(spec) configured = spec.difference(missing) console('Current stored config variables:') for key in sorted(list(configured)): console("%s: %s" % (key, self.config.option[key])) if missing: console('', raw=True) console('Missing config variables (require config set):', msg=Msg.error) for key in sorted(list(missing)): console("%s" % (key, ), Msg.error) if extra: console('', raw=True) console('Extra variables (unused):', msg=Msg.warning) for key in sorted(list(extra)): console("%s: %s" % (key, self.config.option[key]), msg=Msg.warning) return 0
def export(self, parsed_args, incursion_rooms): r = ExporterResult() if not incursion_rooms: console( 'No incursion rooms found for the specified parameters. ' 'Quitting.', msg=Msg.warning, ) return r console('Found %s rooms...' % len(incursion_rooms)) console( 'Additional files may be loaded. Processing information - this ' 'may take a while...') self._image_init(parsed_args) idl_sources = set() if parsed_args.store_images: idl = IDLFile() idl.read(file_path_or_raw=self.file_system.get_file( 'Art/UIImages1.txt')) idl_lookup = idl.as_dict() console('Parsing data into templates...') for incursion_room in incursion_rooms: if 'TEMPLATE' in incursion_room['Id']: console('Skipping template room "%s"' % incursion_room['Id'], msg=Msg.warning) continue elif not incursion_room['Name']: console('Skipping incursion room "%s" without a name' % incursion_room['Id'], msg=Msg.warning) continue data = OrderedDict() for row_key, copy_data in self._COPY_KEYS.items(): value = incursion_room[row_key] condition = copy_data.get('condition') if condition is not None and not condition(incursion_room): continue # Skip default values to reduce size of template if value == copy_data.get('default'): continue fmt = copy_data.get('format') if fmt: value = fmt(value) data[copy_data['template']] = value if incursion_room['IncursionArchitectKey']: mv = incursion_room['IncursionArchitectKey'][ 'MonsterVarietiesKey'] data['architect_metadata_id'] = mv['Id'] data['architect_name'] = mv['Name'] cond = IncursionRoomWikiCondition( data=data, cmdargs=parsed_args, ) if parsed_args.store_images and incursion_room['UIIcon']: idl_record = idl_lookup[incursion_room['UIIcon']] src = os.path.join(self._img_path, os.path.split(idl_record.source)[-1]) if src not in idl_sources: console('Writing source file "%s" to images' % src) with open(src, 'wb') as f: img_data = self.file_system.extract_dds( self.file_system.get_file(idl_record.source)) f.write(img_data[:84]) if img_data[84:88].decode('ascii') == 'DXT4': f.write('DXT5'.encode('ascii')) else: f.write(img_data[84:88]) f.write(img_data[88:]) idl_sources.add(src) os.system( 'magick "%(src)s" -crop %(w)sx%(h)s+%(x)s+%(y)s ' '"%(dst)s incursion room icon.png"' % { 'src': src, 'dst': os.path.join(self._img_path, data['icon']), 'h': idl_record.h, 'w': idl_record.w, 'x': idl_record.x1, 'y': idl_record.y1, }) r.add_result( text=cond, out_file='incursion_room_%s.txt' % data['name'], wiki_page=[{ 'page': data['name'], 'condition': cond, }, { 'page': data['name'] + ' (%s)' % (self._incursion_room_page_name[ config.get_option('language')]), 'condition': cond, }], wiki_message='Incursion room updater', ) if idl_sources: console('Cleaning up image files that are no longer necessary') for src in idl_sources: os.remove(os.path.join(self._img_path, src)) return r
def _show_error(self, e): console("%s: %s" % (e.__class__.__name__, ''.join(e.args)), msg=Msg.error) return -1
def print_sep(self, char='-'): console(char * 70)
def _export(self, parsed_args, mods): r = ExporterResult() if mods: console('Found %s mods. Processing...' % len(mods)) else: console('No mods found for the specified parameters. Quitting.', msg=Msg.warning) return r # Needed for localizing sell prices self.rr['BaseItemTypes.dat'].build_index('Id') for mod in mods: data = OrderedDict() for k in ( ('Id', 'id'), ('CorrectGroup', 'mod_group'), ('Domain', 'domain'), ('GenerationType', 'generation_type'), ('Level', 'required_level'), ): v = mod[k[0]] if v: data[k[1]] = v if mod['Name']: root = text.parse_description_tags(mod['Name']) def handler(hstr, parameter): return hstr if parameter == 'MS' else '' data['name'] = root.handle_tags({ 'if': handler, 'elif': handler }) if mod['BuffDefinitionsKey']: data['granted_buff_id'] = mod['BuffDefinitionsKey']['Id'] data['granted_buff_value'] = mod['BuffValue'] # todo ID for GEPL if mod['GrantedEffectsPerLevelKeys']: data['granted_skill'] = ', '.join([ k['GrantedEffectsKey']['Id'] for k in mod['GrantedEffectsPerLevelKeys'] ]) data['mod_type'] = mod['ModTypeKey']['Name'] stats = [] values = [] for i in MOD_STATS_RANGE: k = mod['StatsKey%s' % i] if k is None: continue stat = k['Id'] value = mod['Stat%sMin' % i], mod['Stat%sMax' % i] if value[0] == 0 and value[1] == 0: continue stats.append(stat) values.append(value) data['stat_text'] = '<br>'.join(self._get_stats( stats, values, mod)) for i, (sid, (vmin, vmax)) in enumerate(zip(stats, values), start=1): data['stat%s_id' % i] = sid data['stat%s_min' % i] = vmin data['stat%s_max' % i] = vmax for i, tag in enumerate(mod['SpawnWeight_TagsKeys']): j = i + 1 data['spawn_weight%s_tag' % j] = tag['Id'] data['spawn_weight%s_value' % j] = mod['SpawnWeight_Values'][i] for i, tag in enumerate(mod['GenerationWeight_TagsKeys']): j = i + 1 data['generation_weight%s_tag' % j] = tag['Id'] data['generation_weight%s_value' % j] = \ mod['GenerationWeight_Values'][i] tags = ', '.join([t['Id'] for t in mod['ModTypeKey']['TagsKeys']] + [t['Id'] for t in mod['TagsKeys']]) if tags: data['tags'] = tags if mod['ModTypeKey']: sell_price = defaultdict(int) for msp in mod['ModTypeKey']['ModSellPriceTypesKeys']: for i, (item_id, amount) in enumerate( MOD_SELL_PRICES[msp['Id']].items(), start=1): data['sell_price%s_name' % i] = self.rr[ 'BaseItemTypes.dat'].index['Id'][item_id]['Name'] data['sell_price%s_amount' % i] = amount # Make sure this is always the same order sell_price = sorted(sell_price.items(), key=lambda x: x[0]) for i, (item_name, amount) in enumerate(sell_price, start=1): data['sell_price%s_name' % i] = item_name data['sell_price%s_amount' % i] = amount # 3+ tildes not allowed page_name = 'Modifier:' + self._format_wiki_title(mod['Id']) cond = ModWikiCondition(data, parsed_args) r.add_result( text=cond, out_file='mod_%s.txt' % data['id'], wiki_page=[ { 'page': page_name, 'condition': cond }, ], wiki_message='Mod updater', ) return r
def run(self, parsed_args, **kwargs): console('Parsing...') for item in parsed_args.page: self.copy(item)
def export(self, parsed_args, areas): console('Found %s areas, parsing...' % len(areas)) r = ExporterResult() if not areas: console( 'No areas found for the specified parameters. Quitting.', msg=Msg.warning, ) return r console('Accessing additional data...') self.rr['MapPins.dat'].build_index('WorldAreasKeys') self.rr['AtlasNode.dat'].build_index('WorldAreasKey') self.rr['MapSeries.dat'].build_index('Id') if not parsed_args.skip_main_page: self.rr['Maps.dat'].build_index('Regular_WorldAreasKey') self.rr['UniqueMaps.dat'].build_index('WorldAreasKey') console('Found %s areas. Processing...' % len(areas)) lang = self._LANG[config.get_option('language')] for area in areas: data = OrderedDict() for row_key, copy_data in self._COPY_KEYS.items(): value = area[row_key] condition = copy_data.get('condition') if condition is not None and not condition(area): continue # Skip default values to reduce size of template if value == copy_data.get('default'): continue '''default = copy_data.get('default') if default is not None and value == default: continue''' fmt = copy_data.get('format') if fmt: value = fmt(value) data[copy_data['template']] = value for i, (tag, value) in enumerate(zip(area['SpawnWeight_TagsKeys'], area['SpawnWeight_Values']), start=1): data['spawn_weight%s_tag' % i] = tag['Id'] data['spawn_weight%s_value' % i] = value map_pin = self.rr['MapPins.dat'].index['WorldAreasKeys'].get(area) if map_pin: data['flavour_text'] = map_pin[0]['FlavourText'] atlas_node = self.rr['AtlasNode.dat'].index['WorldAreasKey'].get( area) if atlas_node: data['flavour_text'] = atlas_node[0]['FlavourTextKey']['Text'] # # Add main-page if possible # if not parsed_args.skip_main_page: map = self.rr['Maps.dat'].index['Regular_WorldAreasKey'].get( area) if map: map = map[0] if map['MapSeriesKey']['Id'] == 'MapWorlds': data['main_page'] = map['BaseItemTypesKey']['Name'] else: data['main_page'] = '%s (%s)' % ( map['BaseItemTypesKey']['Name'], map['MapSeriesKey']['Name']) elif data.get('tags') and 'map' in data['tags']: map_version = None for row in self.rr['MapSeries.dat']: if not area['Id'].startswith(row['Id']): continue map_version = row['Name'] if map_version: if map_version == self.rr['MapSeries.dat'].index['Id'][ 'MapWorlds']['Name']: map_version = None if 'Unique' in area['Id'] or 'BreachBoss' in area['Id']\ or area['Id'].endswith('ShapersRealm'): if map_version is None: data['main_page'] = area['Name'] else: data['main_page'] = '%s (%s)' % (area['Name'], map_version) elif 'Harbinger' in area['Id']: tier = re.sub('^.*Harbinger', '', area['Id']) if tier: if map_version is None: data['main_page'] = '%s (%s)' % ( area['Name'], lang[tier], ) else: data['main_page'] = '%s (%s) (%s)' % ( area['Name'], lang[tier], map_version, ) else: if map_version is None: data['main_page'] = area['Name'] else: data['main_page'] = '%s (%s)' % ( area['Name'], map_version, ) cond = WikiCondition( data=data, cmdargs=parsed_args, ) r.add_result( text=cond, out_file='area_%s.txt' % data['id'], wiki_page=[ { 'page': 'Area:' + self._format_wiki_title(data['id']), 'condition': cond, }, ], wiki_message='Area updater', ) return r
def copy(self, parsed_args, pn): console('Processing %s' % pn) page = self.site_english.pages[pn] if not page.exists: raise Exception('Page %s not found' % pn) mwtext = mwparserfromhell.parse(page.text()) for mwtemplate in mwtext.filter_templates(): if mwtemplate.name.strip().lower() == 'item': break else: raise Exception('Item template not found') console('Finding flavour text...') ftext = None if not mwtemplate.has('flavour_text_id') and mwtemplate.has( 'flavour_text'): console( 'Missing flavour_text_id. Trying to find flavour text in FlavourText.dat' ) ftext = self.fuzzy_find_text(mwtemplate.get('flavour_text'), 'FlavourText.dat', 'Text', fuzzy_func=fuzz.partial_ratio) mwtemplate.get('flavour_text').value = ftext # Grab flavour text from other language elif mwtemplate.has('flavour_text_id'): ftext = self.rr['FlavourText.dat'].index['Id'][mwtemplate.get( 'flavour_text_id').value.strip()]['Text'] if ftext: mwtemplate.get('flavour_text').value = ' %s\n' % ftext.replace( '\r', '').replace('\n', '<br>') # Need this for multiple things name = mwtemplate.get('name').value.strip() # Add inventory icon so it shows up correctly if not mwtemplate.has('inventory_icon'): mwtemplate.add('{0: <40}'.format('inventory_icon'), name) # Find translated item name console('Finding item name...') new = self.fuzzy_find_text(name, 'Words.dat', 'Text2', source_list=self.words) if new is None: console('Didn\'t get an english name for this item, skipping.') return mwtemplate.get('name').value = ' %s\n' % new # Find the correct name of the base item console('Finding base item...') if mwtemplate.has('base_item_id'): # TODO pass elif mwtemplate.has('base_item_page'): # TODO pass elif mwtemplate.has('base_item'): base = self.fuzzy_find_text(mwtemplate.get('base_item').value, 'BaseItemTypes.dat', 'Name', source_list=self.cache[mwtemplate.get( 'class_id').value.strip()]) if base is None: console('Base item is required for unique items. Skipping.') return mwtemplate.get('base_item').value = ' %s\n' % base if self.parsed_args.copy_upgraded_from: # TODO pass else: # Need to copy the list or it won't be deleted properly as it deletes from itself during iteration for mwparam in list(mwtemplate.params): pname = mwparam.name.strip() if pname.startswith('upgraded_from') and \ pname != 'upgraded_from_disabled': mwtemplate.remove(mwparam.name) elif pname in ('class'): mwtemplate.remove(mwparam.name) if mwtemplate.has('drop_text') and not parsed_args.ignore_drop_text: console('Drop text might need a translation. Current text:\n\n%s' % mwtemplate.get('drop_text').value.strip()) text = input('\nNew text (leave empty to copy old):\n') if text: mwtemplate.get('drop_text').value = ' %s\n' % text if pn == name: page = self.site_other.pages[new] else: console( 'Name of page doesn\'t equal item name. \nOld: %s\nItem:%s' % (pn, new)) cont = True while cont: t = '%s (%s)' % (new, input('Enter phrase for parenthesis:\n')) console('Is this correct?:\n%s' % t) cont = input('y/n?\n') != 'y' page = self.site_other.pages[t] console('Saving to "%s" on other wiki...' % page.name) page.save('%s\n\n[[en:%s]]' % (str(mwtemplate), pn)) console('Done.')
def handle_page(self, *a, row): if isinstance(row['wiki_page'], str): pages = [ {'page': row['wiki_page'], 'condition': None}, ] else: pages = row['wiki_page'] console('Scanning for wiki page candidates "%s"' % ', '.join([p['page'] for p in pages])) page_found = False new = False for pdata in pages: page = self.site.pages[pdata['page']] if page.exists: condition = pdata.get('condition') success = True if condition is None: console( 'No conditions given - page content on "%s" will be ' 'overriden' % pdata['page'], msg=Msg.warning, ) success = True elif callable(condition): success = condition(page=page) elif isinstance(condition, Iterable): for cond in condition: success = cond(page=page) if not success: break else: raise ValueError('Invalid condition type "%s"' % type(condition)) if success: console('All conditions met on page "%s". Editing.' % pdata['page']) page_found = True break else: console( 'One or more conditions failed on page "%s". Skipping.' % pdata['page'], msg=Msg.warning ) elif self.cmdargs.only_existing: console( 'Page "%s" does not exist. Bot is set to only write to ' 'existing pages, skipping.' % pdata['page'], msg=Msg.warning ) return else: console('Page "%s" does not exist. It will be created.' % pdata['page']) page_found = True new = True break if page_found: text = row['text'] if callable(text): kwargs = {} if not new: kwargs['page'] = page text = text(**kwargs) if text == page.text(): console('No update required. Skipping.') return if self.cmdargs.dry_run: console(text) else: response = page.save( text=text, summary='PyPoE/ExporterBot/%s: %s' % ( __version__, self.cmdargs.wiki_message or row['wiki_message'] ) ) if response['result'] == 'Success': console('Page was edited successfully (time: %s)' % response.get('newtimestamp')) else: #TODO: what happens if it fails? console('Something went wrong, status code:', msg=Msg.error) console(response, msg=Msg.error) else: console( 'No wiki page candidates found, skipping this row.', msg=Msg.error, )
def export(self, parsed_args, passives): r = ExporterResult() passives = self._apply_filter(parsed_args, passives) if not passives: console( 'No passives found for the specified parameters. Quitting.', msg=Msg.warning, ) return r console('Accessing additional data...') psg = PSGFile() psg.read(file_path_or_raw=self.file_system.get_file( 'Metadata/PassiveSkillGraph.psg'), ) node_index = {} for group in psg.groups: for node in group.nodes: node_index[node.passive_skill] = node # Connections are one-way, make them two way for psg_id, node in node_index.items(): for other_psg_id in node.connections: node_index[other_psg_id].connections.append(psg_id) self.rr['PassiveSkills.dat'].build_index('PassiveSkillGraphId') self._image_init(parsed_args) console('Found %s, parsing...' % len(passives)) for passive in passives: data = OrderedDict() for row_key, copy_data in self._COPY_KEYS.items(): value = passive[row_key] condition = copy_data.get('condition') if condition is not None and not condition(passive): continue # Skip default values to reduce size of template if value == copy_data.get('default'): continue fmt = copy_data.get('format') if fmt: value = fmt(value) data[copy_data['template']] = value if passive['Icon_DDSFile']: icon = passive['Icon_DDSFile'].split('/') if passive['Icon_DDSFile'].startswith( 'Art/2DArt/SkillIcons/passives/'): if icon[-2] == 'passives': data['icon'] = icon[-1] else: data['icon'] = '%s (%s)' % (icon[-1], icon[-2]) else: data['icon'] = icon[-1] data['icon'] = data['icon'].replace('.dds', '') stat_ids = [] values = [] j = 0 for i in range(0, self._MAX_STAT_ID): try: stat = passive['StatsKeys'][i] except IndexError: break j = i + 1 stat_ids.append(stat['Id']) data['stat%s_id' % j] = stat['Id'] values.append(passive['Stat%sValue' % j]) data['stat%s_value' % j] = passive['Stat%sValue' % j] data['stat_text'] = '<br>'.join( self._get_stats( stat_ids, values, translation_file='passive_skill_stat_descriptions.txt')) # For now this is being added to the stat text for ps_buff in passive['PassiveSkillBuffsKeys']: stat_ids = [ stat['Id'] for stat in ps_buff['BuffDefinitionsKey']['StatsKeys'] ] values = ps_buff['Buff_StatValues'] #if passive['Id'] == 'AscendancyChampion7': # index = stat_ids.index('damage_taken_+%_from_hits') # del stat_ids[index] # del values[index] for i, (sid, val) in enumerate(zip(stat_ids, values)): j += 1 data['stat%s_id' % j] = sid data['stat%s_value' % j] = val text = '<br>'.join( self._get_stats( stat_ids, values, translation_file= 'passive_skill_aura_stat_descriptions.txt')) if data['stat_text']: data['stat_text'] += '<br>' + text else: data['stat_text'] = text node = node_index.get(passive['PassiveSkillGraphId']) if node and node.connections: data['connections'] = ','.join([ self.rr['PassiveSkills.dat'].index['PassiveSkillGraphId'] [psg_id]['Id'] for psg_id in node.connections ]) # extract icons if specified if parsed_args.store_images: fn = data['icon'] + ' passive skill icon' dds = os.path.join(self._img_path, fn + '.dds') png = os.path.join(self._img_path, fn + '.png') if not (os.path.exists(dds) or os.path.exists(png)): self._write_dds( data=self.file_system.get_file( passive['Icon_DDSFile']), out_path=dds, parsed_args=parsed_args, ) cond = WikiCondition( data=data, cmdargs=parsed_args, ) r.add_result( text=cond, out_file='passive_skill_%s.txt' % data['id'], wiki_page=[ { 'page': 'Passive Skill:' + self._format_wiki_title(data['id']), 'condition': cond, }, ], wiki_message='Passive skill updater', ) return r
def wrapper(pargs, *args, **kwargs): # Check Hash if not check_hash(): console('Game file hash mismatch. Please rerun setup.', msg=Msg.error) return -1 # Check outdir, if specified: if hasattr(pargs, 'outdir') and pargs.outdir: out_dir = pargs.outdir else: out_dir = config.get_option('out_dir') temp_dir = config.get_option('temp_dir') for item in (out_dir, temp_dir): if not os.path.exists(item): console('Path "%s" does not exist' % item, msg=Msg.error) return -1 console('Reading .dat files...') parser = cls(base_path=temp_dir, parsed_args=pargs) console('Parsing...') if handler: return handler(parser, pargs, out_dir=out_dir) else: result = func(parser, pargs, *args, **kwargs) for item in result: if callable(item['text']): text = item['text']() else: text = item['text'] if pargs.print: console(text) if pargs.write: out_path = os.path.join(out_dir, item['out_file']) console('Writing data to "%s"...' % out_path) with open(out_path, 'w', encoding='utf-8') as f: f.write(text) if pargs.wiki: if mwclient is None: try: # Will raise the exception appropriately __import__('') except ImportError: console('Run pip install -e cli', msg=Msg.error) except Exception: raise if wiki_handler is None: console('No wiki-handler defined for this function', msg=Msg.error) return 0 console('Running wikibot...') console('-'*80) wiki_handler.handle(mwclient=mwclient, result=result, cmdargs=pargs, parser=parser) console('-'*80) console('Completed wikibot execution.') console('Done.') return 0
def _ver_dist_changed(self, key, value, old_value): if value == old_value: return config.set_setup_variable('temp_dir', 'performed', False) console('Setup needs to be performed due to changes to "%s"' % key, msg=Msg.warning)