def run(args): enemy_skillset_dump.set_data_dir(args.output_dir) raw_input_dir = os.path.join(args.input_dir, 'raw') na_db = database.Database('na', raw_input_dir) na_db.load_database(skip_skills=True, skip_bonus=True, skip_extra=True) jp_db = database.Database('jp', raw_input_dir) jp_db.load_database(skip_skills=True, skip_bonus=True, skip_extra=True) combined_cards = merged_data.build_cross_server_cards(jp_db, na_db) fixed_card_id = args.card_id if args.interactive: fixed_card_id = input("enter a card id:").strip() count = 0 for csc in combined_cards: merged_card = csc.na_card card = merged_card.card if fixed_card_id and card.card_id != int(fixed_card_id): continue try: count += 1 if count % 100 == 0: print('processing {} of {}'.format(count, len(combined_cards))) process_card(merged_card) except Exception as ex: print('failed to process', card.name) print(ex) if 'unsupported operation' not in str(ex): import traceback traceback.print_exc()
def load_data(args): if args.logsql: logging.getLogger('database').setLevel(logging.DEBUG) dry_run = not args.doupdates input_dir = args.input_dir output_dir = args.output_dir logger.info('Loading data') jp_database = database.Database('jp', input_dir) jp_database.load_database() na_database = database.Database('na', input_dir) na_database.load_database() if not args.skipintermediate: logger.info('Storing intermediate data') calc_skills = skill_info.reformat_json_info(jp_database.raw_skills) jp_database.calc_skills = calc_skills jp_database.save_all(output_dir, args.pretty) na_database.save_all(output_dir, args.pretty) logger.info('Connecting to database') with open(args.db_config) as f: db_config = json.load(f) db_wrapper = DbWrapper(dry_run) db_wrapper.connect(db_config) cross_server_dungeons = merged_data.build_cross_server_dungeons( jp_database, na_database) logger.info('Starting JP event diff') database_diff_events(db_wrapper, jp_database, cross_server_dungeons) logger.info('Starting NA event diff') database_diff_events(db_wrapper, na_database, cross_server_dungeons) logger.info('Starting card diff') database_diff_cards(db_wrapper, jp_database, na_database) logger.info('Starting egg machine update') try: database_update_egg_machines(db_wrapper, jp_database, na_database) except Exception as ex: print('updating egg machines failed', str(ex)) logger.info('Starting news update') try: database_update_news(db_wrapper) except Exception as ex: print('updating news failed', str(ex)) logger.info('Starting tstamp update') timestamp_processor.update_timestamps(db_wrapper) print('done')
def run_test(args): raw_input_dir = os.path.join(args.input_dir, 'raw') processed_input_dir = os.path.join(args.input_dir, 'processed') output_dir = args.output_dir new_output_dir = os.path.join(output_dir, 'new') pathlib.Path(new_output_dir).mkdir(parents=True, exist_ok=True) golden_output_dir = os.path.join(output_dir, 'golden') pathlib.Path(golden_output_dir).mkdir(parents=True, exist_ok=True) db = database.Database('na', raw_input_dir) print('loading') db.load_database(skip_skills=True, skip_bonus=True, skip_extra=True) dungeon_id_to_wavedata = defaultdict(set) wave_summary_data = wave.load_wave_summary(processed_input_dir) for w in wave_summary_data: dungeon_id_to_wavedata[w.dungeon_id].add(w) split_dungeons = [ # Marks dungeons which are enormous and should be broken into subfiles 110, # Endless Corridors ] golden_dungeons = [ 307, # Hera-Is 318, # Zeus-Dios 317, # ECO 331, # Hera-Ur ] for dungeon_id, wave_data in dungeon_id_to_wavedata.items(): dungeon = db.dungeon_by_id(dungeon_id) if not dungeon: print('skipping', dungeon_id) continue print('processing', dungeon_id, dungeon.clean_name) file_output_dir = golden_output_dir if dungeon_id in golden_dungeons else new_output_dir if dungeon_id in split_dungeons: # Disable endless for now it takes a long time to run continue for floor in dungeon.floors: floor_id = floor.floor_number file_name = '{}_{}.txt'.format(dungeon_id, floor_id) with open(os.path.join(file_output_dir, file_name), encoding='utf-8', mode='w') as f: f.write( flatten_data(wave_data, dungeon, db, limit_floor_id=floor_id)) else: file_name = '{}.txt'.format(dungeon_id) with open(os.path.join(file_output_dir, file_name), encoding='utf-8', mode='w') as f: f.write(flatten_data(wave_data, dungeon, db))
def run_dump(args): esd.set_data_dir(args.es_input_dir) monster_id = int(args.monster_id) db = database.Database('na', args.raw_input_dir) db.load_database(skip_skills=True, skip_bonus=True, skip_extra=True) card = db.raw_card_by_id(monster_id) summary = esd.load_summary(monster_id) info = yaml.dump(summary.info, default_flow_style=False, allow_unicode=True) levels = {} for listing in summary.data: level_data = { 'raw': yaml.dump(listing, default_flow_style=False, allow_unicode=True), 'processed': esd.summary_as_dump_text(summary, card, listing.level, 1), } levels[listing.level] = level_data output = { 'info': info, 'levels': levels, } print(json.dumps(output, indent=2, sort_keys=True))
def run(args): raw_input_dir = os.path.join(args.input_dir, 'raw') db = database.Database('na', raw_input_dir) db.load_database(skip_skills=True, skip_bonus=True, skip_extra=True) count = 0 for card in db.cards: if args.card_id and card.card.card_id != int(args.card_id): continue try: count += 1 if count % 50 == 0: print('processing {} of {}'.format(count, len(db.cards))) process_card(card) except Exception as ex: print('failed to process', card.card.name) print(ex) if 'unsupported operation' not in str(ex): import traceback traceback.print_exc()
def run_test(args): input_dir = args.input_dir output_dir = args.output_dir new_output_dir = os.path.join(output_dir, 'new') pathlib.Path(new_output_dir).mkdir(parents=True, exist_ok=True) golden_output_dir = os.path.join(output_dir, 'golden') pathlib.Path(golden_output_dir).mkdir(parents=True, exist_ok=True) for server in ['na', 'jp']: print('starting {} checks'.format(server)) db = database.Database(server, input_dir) print('loading') db.load_database() print('saving') db.save_all(new_output_dir, True) print('diffing') files = { '{}_raw_cards.json'.format(server): db.raw_cards, '{}_dungeons.json'.format(server): db.dungeons, '{}_skills.json'.format(server): db.skills, '{}_enemy_skills.json'.format(server): db.enemy_skills, '{}_bonuses.json'.format(server): db.bonuses, '{}_cards.json'.format(server): db.cards, '{}_exchange.json'.format(server): db.exchange, '{}_leader_skills.json'.format(server): db.leader_skills, '{}_active_skills.json'.format(server): db.active_skills, } for file, data in files.items(): new_file = os.path.join(new_output_dir, file) golden_file = os.path.join(golden_output_dir, file) if not os.path.exists(golden_file): print('golden file does not exist, creating', golden_file) shutil.copy(new_file, golden_file) continue with open(golden_file) as f: golden_data = json.load(f) if len(golden_data) != len(data): print('ERROR') print( 'ERROR: file lengths differed, indicates old golden data for', file) print('ERROR') continue failures = [] for i in range(len(golden_data)): gold_row = golden_data[i] new_row = data[i] gold_str = json.dumps(gold_row, indent=4, sort_keys=True, default=dump_helper) new_str = json.dumps(new_row, indent=4, sort_keys=True, default=dump_helper) if gold_str != new_str: failures.append([gold_str, new_str]) if not failures: continue fail_count = len(failures) disp_count = min(fail_count, 3) print('encountered', fail_count, 'errors, displaying the first', disp_count) for i in range(disp_count): gold_str = failures[i][0] new_str = failures[i][1] id_text = '\n'.join( filter(lambda x: '_id' in x, gold_str.split('\n'))) print('row identifiers:\n{}\n'.format(id_text)) diff_lines = difflib.context_diff(gold_str.split('\n'), new_str.split('\n'), fromfile='golden', tofile='new', n=1) print('\n'.join(diff_lines))
op=int) elif args.dungeon_seq: dungeon_seq = int(args.dungeon_seq) pad_dungeon_id = db_wrapper.get_single_value( "select pad_dungeon_id from etl_dungeon_map where dungeon_seq = {}". format(dungeon_seq), op=int) else: raise Exception('must specify pad_dungeon_id or dungeon_seq') loader = dungeon.DungeonLoader(db_wrapper) print(dungeon_seq, pad_dungeon_id) dungeon = loader.load_dungeon(dungeon_seq) jp_database = database.Database('jp', args.raw_input_dir) jp_database.load_database() na_database = database.Database('na', args.raw_input_dir) na_database.load_database() jp_data = jp_database.dungeons na_data = na_database.dungeons jp_dungeon = None na_dungeon = None for d in jp_data: if d.dungeon_id == pad_dungeon_id: jp_dungeon = d break
return parser.parse_args() args = parse_args() processed_dir = '/home/tactical0retreat/pad_data/processed' bonuses_file = '{}/{}_bonuses.json'.format(processed_dir, args.server) with open(bonuses_file) as f: bonuses = json.load(f) current_dungeons = active_dungeons.filter_current_bonuses( bonuses, args.group, include_normals=False, include_multiplayer=True) raw_dir = '/home/tactical0retreat/pad_data/raw' pad_db = database.Database(args.server, raw_dir) pad_db.load_database() for dungeon in pad_db.dungeons: # This is a hack because I don't want to overhaul this whole script which is deprecated. if dungeon.one_time: continue if dungeon.alt_dungeon_type not in ['Normal Dungeon', 'Technical Dungeon']: continue dungeon_map = { 'dungeon_id': dungeon.dungeon_id, 'clean_name': dungeon.clean_name, 'floors': [{ 'floor_number': x.floor_number } for x in dungeon.floors],