def update_kinsects(mhdata, item_updater: ItemUpdater): print('Loading kinsect info') kinsect_tree = load_kinsect_tree() def resolve_parent_name(entry): if entry.parent: return entry.parent.name['en'] return '' items = [ f"{r.id},{r.name['en']},{resolve_parent_name(r)}" for r in kinsect_tree.crafted() ] artifacts.write_artifact('kinsect_all.txt', *items) items = [f"{r.id},{r.name['en']}" for r in kinsect_tree.roots] artifacts.write_artifact('kinsect_roots.txt', *items) kinsect_map = DataMap(languages=['en']) for kinsect_node in kinsect_tree.crafted(): binary = kinsect_node.binary new_entry = kinsect_map.insert({ 'id': binary.id + 1, 'name': kinsect_node.name, 'previous_en': resolve_parent_name(kinsect_node), 'rarity': binary.rarity + 1, 'attack_type': kinsect_attack_types[binary.attack_type], 'dust_effect': kinsect_dusts[binary.dust_type], 'power': binary.power, 'speed': binary.speed, 'heal': binary.heal }) if kinsect_node.upgrade: new_entry['craft'] = convert_recipe(item_updater, kinsect_node.upgrade) # Write new data writer = create_writer() writer.save_base_map_csv( "weapons/kinsect_base.csv", kinsect_map, schema=schema.KinsectBaseSchema(), translation_filename="weapons/kinsect_base_translations.csv") writer.save_data_csv("weapons/kinsect_craft_ext.csv", kinsect_map, key="craft", schema=schema.RecipeSchema()) print("Kinsect files updated\n")
def update_monsters(mhdata): monster_keys = read_csv(dirname(abspath(__file__)) + '/monster_map.csv') monster_keys = dict((r['name_en'], r) for r in monster_keys) monster_name_text = load_text('common/text/em_names') monster_info_text = load_text('common/text/em_info') for monster_entry in mhdata.monster_map.values(): name_en = monster_entry.name('en') if name_en not in monster_keys: print(f'Warning: {name_en} not mapped, skipping') monster_key_entry = monster_keys[monster_entry.name('en')] key = monster_key_entry['key'] info_key = monster_key_entry['key_info_override'] or key monster_entry['name'] = monster_name_text[key] if info_key != 'NONE': monster_entry['description'] = monster_info_text[f'NOTE_{info_key}_DESC'] # Write new data writer = create_writer() writer.save_base_map_csv( "monsters/monster_base.csv", mhdata.monster_map, schema=schema.MonsterBaseSchema(), translation_filename="monsters/monster_base_translations.csv", translation_extra=['description'] ) print("Monsters updated\n")
def update_tools(mhdata): tool_data = ToolCollection() new_tools = DataMap(start_id=mhdata.tool_map.max_id + 1) for tool in tool_data.tools: name_en = tool.name_upgraded['en'] existing_entry = mhdata.tool_map.entry_of('en', name_en) new_entry = {} if existing_entry: new_entry = {**existing_entry} new_entry['name'] = tool.name_upgraded new_entry['name_base'] = tool.name new_entry['description'] = tool.description new_entry['tool_type'] = 'booster' if 'booster' in tool.name[ 'en'].lower() else 'mantle' new_entry.setdefault('duration', 0) new_entry.setdefault('duration_upgraded', None) new_entry.setdefault('recharge', 0) new_entry['slot_1'] = tool.slots[0] new_entry['slot_2'] = tool.slots[1] new_entry['slot_3'] = tool.slots[2] new_entry.setdefault('icon_color', None) new_tools.insert(new_entry) writer = create_writer() writer.save_base_map_csv( "tools/tool_base.csv", new_tools, schema=schema.ToolSchema(), translation_filename="tools/tool_base_translations.csv", translation_extra=['name_base', 'description'])
def update_quests(mhdata, item_updater: ItemUpdater, monster_meta: MonsterMetadata, area_map): print('Beginning load of quest binary data') quests = load_quests() print('Loaded quest binary data') quest_data = [ get_quest_data(q, item_updater, monster_meta, area_map) for q in quests ] quest_by_id = {q.id: q for q in quests} quest_data_by_id = {q['id']: q for q in quest_data} # test for duplicates first. duplicate_candidates = get_quests_with_duplicate_names(quest_by_id) for (q1, q2) in duplicate_candidates: quest2_data = quest_data_by_id[q2.id] if compare_quest_data(quest_data_by_id[q1.id], quest2_data): quest_name = q1.name['en'] print(f'Warning: Quest {quest_name} has exact duplicates.') write_quest_raw_data(quests, item_updater, monster_meta) print( 'Quest artifacts written. Copy ids and names to quest_base.csv to add to build' ) # Merge the quest data for raw in quest_data: existing_entry = mhdata.quest_map.get(raw['id']) if existing_entry: existing_entry.update(raw) print('Quests merged') writer = create_writer() writer.save_base_map_csv( "quests/quest_base.csv", mhdata.quest_map, translation_filename="quests/quest_base_translations.csv", translation_extra=['objective', 'description'], schema=schema.QuestBaseSchema(), key_join='id') writer.save_data_csv('quests/quest_monsters.csv', mhdata.quest_map, key='monsters', key_join='id') writer.save_data_csv('quests/quest_rewards.csv', mhdata.quest_map, key='rewards', key_join='id') print('Quest files updated\n')
def update_decorations(mhdata, item_data: ItemCollection): print("Updating decorations") data = DecorationCollection(item_data) skill_text_handler = SkillTextHandler() # write artifact file (used to debug) def create_deco_artifact(d): return {'name': d.name['en'], 'slot': d.size, 'rarity': d.rarity} artifacts.write_dicts_artifact( "decorations_all.csv", list(map(create_deco_artifact, data.decorations))) for entry in mhdata.decoration_map.values(): deco_name = entry['name_en'] try: deco = data.by_name(entry['name_en']) except KeyError: print(f"Could not find decoration {deco_name} in the game files") continue entry['name'] = deco.name entry['rarity'] = deco.rarity for i in range(2): skill_name = None skill_pts = None if i < len(deco.skills): (skill_id, skill_pts) = deco.skills[i] skill_name = skill_text_handler.get_skilltree_name( skill_id)['en'] entry[f'skill{i+1}_name'] = skill_name entry[f'skill{i+1}_level'] = skill_pts writer = create_writer() writer.save_base_map_csv( "decorations/decoration_base.csv", mhdata.decoration_map, schema=schema.DecorationBaseSchema(), translation_filename="decorations/decoration_base_translations.csv") print("Decoration files updated\n")
def translate_skills(mhdata): print("Translating skills") skilltext = SkillTextHandler() for skill in mhdata.skill_map.values(): skill_name = skill['name']['en'] try: (new_name, new_description) = skilltext.get_skilltree_translation(skill_name) skill['name'] = new_name skill['description'] = new_description except KeyError: print(f"Could not find skill {skill_name} in the game files") continue for level in skill['levels']: current_description = level['description']['en'] try: description = skilltext.get_skill_description_translation(current_description) level['description'] = description except KeyError: print(f"Failed to find description translations for skill {skill_name} level {level['level']}") writer = create_writer() writer.save_base_map_csv( "skills/skill_base.csv", mhdata.skill_map, schema=schema.SkillBaseSchema(), translation_filename="skills/skill_base_translations.csv", translation_extra=['description'] ) writer.save_data_csv( "skills/skill_levels.csv", mhdata.skill_map, key='levels', schema=schema.SkillLevelSchema() ) print("Skill files updated\n")
import requests from mhdata.io import create_writer from mhdata.load import load_data, schema writer = create_writer() # note: inc means incoming def merge_weapons(): inc_data = requests.get("https://mhw-db.com/weapons").json() data = load_data().weapon_map not_exist = [] mismatches_atk = [] mismatches_def = [] mismatches_other = [] def print_all(items): for item in items: print(item) print() for weapon_inc in inc_data: inc_id = weapon_inc['id'] inc_type = weapon_inc['type'] name = weapon_inc['name'] inc_label = f"{name} ({inc_type})" # Our system uses I/II/III, their's uses 1/2/3 if name not in data.names('en'): name = name.replace(" 3", " III")
def update_items(item_updater: ItemUpdater, *, mhdata=None): if not mhdata: mhdata = load_data() print("Existing Data loaded. Using to expand item list") new_item_map = DataMap(languages='en', start_id=mhdata.item_map.max_id + 1) unlinked_item_names = OrderedSet() # used to track dupes to throw proper errors updated_names = set() # First pass. Iterate over existing ingame items and merge with existing data for entry in item_updater.item_data: name_dict, description_dict = item_updater.name_and_description_for( entry.id, track=False) existing_item = mhdata.item_map.entry_of('en', name_dict['en']) is_encountered = entry.id in item_updater.encountered_item_ids if not is_encountered and not existing_item: unlinked_item_names.add(name_dict['en']) continue if name_dict['en'] in updated_names: raise Exception(f"Duplicate item {name_dict['en']}") updated_names.add(name_dict['en']) # note: we omit buy price as items may have a buy price even if not sold. # We only care about the buy price of BUYABLE items new_data = { 'name': name_dict, 'description': description_dict, 'rarity': entry.rarity + 1, 'sell_price': None, 'points': None } is_ez = entry.flags.ez is_account = entry.type == 'endemic' is_tradein = "(Trade-in Item)" in description_dict['en'] is_appraisal = entry.flags.appraisal sell_value = entry.sell_price if entry.sell_price != 0 else None if is_account: new_data['points'] = sell_value else: new_data['sell_price'] = sell_value if name_dict['en'] == 'Normal Ammo 1': new_data['category'] = 'hidden' elif is_ez: new_data['category'] = 'misc' new_data['subcategory'] = 'trade' if is_tradein else 'supply' elif is_account: new_data['category'] = 'misc' new_data['subcategory'] = 'trade' if is_tradein else 'account' elif is_appraisal or ('Appraised after investigation' in description_dict['en']): new_data['category'] = 'misc' new_data['subcategory'] = 'appraisal' new_data['sell_price'] = None # why does this have values? else: new_data['category'] = entry.type new_data['subcategory'] = 'trade' if is_tradein else None # Whether we show carry limit at all is based on item type. # Materials are basically infinite carry infinite_carry = new_data['category'] == 'material' new_data[ 'carry_limit'] = None if infinite_carry else entry.carry_limit if existing_item: new_item_map.insert({**existing_item, **new_data}) else: new_item_map.insert(new_data) # Second pass, add old entries that are not in the new one for old_entry in mhdata.item_map.values(): if old_entry.name('en') not in new_item_map.names('en'): new_item_map.insert(old_entry) # Third pass. Items need to be reordered based on type unsorted_item_map = new_item_map # store reference to former map def filter_category(category, subcategory=None): "helper that returns items and then removes from unsorted item map" results = [] for item in unsorted_item_map.values(): if item['category'] == category and item[ 'subcategory'] == subcategory: results.append(item) for result in results: del unsorted_item_map[result.id] return results normal_ammo_1 = unsorted_item_map.entry_of("en", "Normal Ammo 1") # start the before-mentioned third pass by creating a new map based off the old one new_item_map = DataMap(languages="en") new_item_map.extend(filter_category('item')) new_item_map.extend(filter_category('material')) new_item_map.extend(filter_category('material', 'trade')) if normal_ammo_1: new_item_map.insert(normal_ammo_1) new_item_map.extend(filter_category('ammo')) new_item_map.extend(filter_category('misc', 'appraisal')) new_item_map.extend(filter_category('misc', 'account')) new_item_map.extend(filter_category('misc', 'supply')) # Write out data writer = create_writer() writer.save_base_map_csv( "items/item_base.csv", new_item_map, schema=schema.ItemSchema(), translation_filename="items/item_base_translations.csv", translation_extra=['description']) # Write out artifact data print("Writing unlinked item names to artifacts") artifacts.write_names_artifact('items_unlinked.txt', unlinked_item_names) print("Writing all items and ids") artifact_data = [{ 'id': i.id, 'name': i.name['en'] } for i in item_updater.data] artifacts.write_dicts_artifact('items_ids.csv', artifact_data) print("Item files updated")
def update_monsters(mhdata, item_data: ItemCollection, monster_data: MonsterCollection): root = Path(get_chunk_root()) # Load hitzone entries. EPG files contain hitzones, parts, and base hp print('Loading monster hitzone data') if monster_data.load_epg_eda(): print('Loaded Monster epg data (hitzones and breaks)') # Load status entries monster_statuses = read_status(monster_data) print('Loaded Monster status data') # Write hitzone data to artifacts hitzone_raw_data = [{ 'name': m.name['en'], **struct_to_json(m.epg) } for m in monster_data.monsters if m.epg is not None] artifacts.write_json_artifact("monster_hitzones_and_breaks.json", hitzone_raw_data) print( "Monster hitzones+breaks raw data artifact written (Automerging not supported)" ) write_hitzone_artifacts(monster_data) print("Monster hitzones artifact written (Automerging not supported)") artifacts.write_json_artifact("monster_status.json", list(monster_statuses.values())) print("Monster status artifact written (Automerging not supported)") monster_drops = read_drops(monster_data, item_data) print('Loaded Monster drop rates') for monster_entry in mhdata.monster_map.values(): name_en = monster_entry.name('en') try: monster = monster_data.by_name(name_en) except KeyError: print(f'Warning: Monster {name_en} not in metadata, skipping') continue monster_entry['name'] = monster.name if monster.description: monster_entry['description'] = monster.description # Compare drops (use the hunting notes key name if available) drop_tables = monster_drops.get(monster.id, None) if drop_tables: # Write drops to artifact files joined_drops = [] for idx, drop_table in enumerate(drop_tables): joined_drops.extend({ 'group': f'Group {idx+1}', **e } for e in drop_table) artifacts.write_dicts_artifact( f'monster_drops/{name_en} drops.csv', joined_drops) # Check if any drop table in our current database is invalid if 'rewards' in monster_entry: rewards_sorted = sorted(monster_entry['rewards'], key=itemgetter('condition_en')) rewards = itertools.groupby(rewards_sorted, key=lambda r: (r['condition_en'], r['rank'])) for (condition, rank), existing_table in rewards: if condition in itlot_conditions: existing_table = list(existing_table) if not any( compare_drop_tables(existing_table, table) for table in drop_tables): print( f"Validation Error: Monster {name_en} has invalid drop table {condition} in {rank}" ) else: print(f'Warning: no drops file found for monster {name_en}') # Compare hitzones hitzone_data = monster.hitzones if hitzone_data and 'hitzones' in monster_entry: # Create tuples of the values of the hitzone, to use as a comparator hitzone_key = lambda h: tuple(h[v] for v in hitzone_fields) stored_hitzones = [hitzone_key(h) for h in hitzone_data] stored_hitzones_set = set(stored_hitzones) # Check if any hitzone we have doesn't actually exist for hitzone in monster_entry['hitzones']: if hitzone_key(hitzone) not in stored_hitzones_set: print( f"Validation Error: Monster {name_en} has invalid hitzone {hitzone['hitzone']['en']}" ) elif 'hitzones' not in monster_entry and hitzone_data: print( f'Warning: no hitzones in monster entry {name_en}, but binary data exists' ) elif 'hitzones' in monster_entry: print( f'Warning: hitzones exist in monster {name_en}, but no binary data exists to compare' ) else: print(f"Warning: No hitzone data for monster {name_en}") # Status info status = monster_statuses.get(monster.id, None) if status: test = lambda v: v['base'] > 0 and v['decrease'] > 0 monster_entry['pitfall_trap'] = True if test( status['pitfall_trap_buildup']) else False monster_entry['shock_trap'] = True if test( status['shock_trap_buildup']) else False monster_entry['vine_trap'] = True if test( status['vine_trap_buildup']) else False # Write new data writer = create_writer() writer.save_base_map_csv( "monsters/monster_base.csv", mhdata.monster_map, schema=schema.MonsterBaseSchema(), translation_filename="monsters/monster_base_translations.csv", translation_extra=['description']) print("Monsters updated\n")
def update_weapon_songs(mhdata): # unfortunately, song data linking is unknown, but we have a few pieces # We know where the text file is, and we know of the id -> notes linking. print("Beginning load of hunting horn melodies") song_data = WeaponMelodyCollection() print("Writing artifact files for melody english text entries") artifacts.write_names_artifact("melody_strings_en.txt", [v.name['en'] for v in song_data]) # adding NA to melody_names_map effect_none = { lang:'N/A' for lang in cfg.all_languages } # Create artifact of song data melody id -> all possible songs that apply that melody melody_note_artifacts = [] for song in song_data: if not song.notes: melody_note_artifacts.append(f'{song.id},{song.name["en"]},NONE') for notes in song.notes: melody_note_artifacts.append(f'{song.id},{song.name["en"]},{notes}') print("Writing artifact files HH melodies") artifacts.write_lines_artifact("melody_notes.txt", melody_note_artifacts) print("Merging text values and notes into weapon melodies") fields = ['name', 'effect1', 'effect2'] for melody in mhdata.weapon_melodies.values(): try: data = song_data.by_name(melody['name_en']) except KeyError: print("Could not find binary music notes entries for " + melody['name']['en']) continue melody['name'] = data.name melody['effect1'] = data.effect1 melody['effect2'] = data.effect2 or effect_none melody['notes'] = [{'notes': notes} for notes in data.notes] melody_length_convert = lambda mlength: { 'duration': mlength.duration or None, 'extension': mlength.extension or None } melody['base'] = melody_length_convert(data.base) melody['m1'] = melody_length_convert(data.maestro1) melody['m2'] = melody_length_convert(data.maestro2) # Write new data writer = create_writer() writer.save_base_map_csv( "weapons/weapon_melody_base.csv", mhdata.weapon_melodies, translation_filename="weapons/weapon_melody_base_translations.csv", translation_extra=['effect1', 'effect2'], schema=schema.WeaponMelodyBaseSchema() ) writer.save_data_csv( "weapons/weapon_melody_notes.csv", mhdata.weapon_melodies, key='notes' ) print("Weapon Melody files updated\n")
def update_weapons(mhdata, item_updater: ItemUpdater): skill_text_handler = SkillTextHandler() print("Beginning load of binary weapon data") weapon_loader = WeaponDataLoader() notes_data = load_schema(wep_wsl.WepWsl, "common/equip/wep_whistle.wep_wsl") sharpness_reader = SharpnessDataReader() ammo_reader = WeaponAmmoLoader() coating_data = load_schema(bbtbl.Bbtbl, "common/equip/bottle_table.bbtbl") print("Loaded weapon binary data") def bind_weapon_blade_ext(weapon_type: str, existing_entry, weapon): binary: wp_dat.WpDatEntry = weapon.binary for key in ['kinsect_bonus', 'phial', 'phial_power', 'shelling', 'shelling_level', 'notes']: existing_entry[key] = None if weapon_type == cfg.CHARGE_BLADE: existing_entry['phial'] = cb_phials[binary.wep1_id] if weapon_type == cfg.SWITCH_AXE: try: (phial, power) = s_axe_phials[binary.wep1_id] existing_entry['phial'] = phial existing_entry['phial_power'] = power except: raise KeyError(f"Failed to load saxe phials for {weapon.name['en']} (SAXE ID: {binary.wep1_id})") if weapon_type == cfg.GUNLANCE: # first 5 are normals, second 5 are wide, third 5 are long if binary.wep1_id >= 15: value = binary.wep1_id - 15 shelling = ['normal', 'wide', 'long'][value % 3] level = value // 3 + 6 else: shelling = ['normal', 'wide', 'long'][binary.wep1_id // 5] level = (binary.wep1_id % 5) + 1 existing_entry['shelling'] = shelling existing_entry['shelling_level'] = level if weapon_type == cfg.INSECT_GLAIVE: try: existing_entry['kinsect_bonus'] = glaive_boosts[binary.wep1_id] except: raise KeyError(f"Failed to load kinsect bonus for {weapon.name['en']} (BOOST ID: {binary.wep1_id})") if weapon_type == cfg.HUNTING_HORN: note_entry = notes_data[binary.wep1_id] notes = [note_entry.note1, note_entry.note2, note_entry.note3] notes = [str(note_colors[n]) for n in notes] existing_entry['notes'] = "".join(notes) # Load weapon tree binary data weapon_trees = {} for weapon_type in cfg.weapon_types: weapon_tree = weapon_loader.load_tree(weapon_type) print(f"Loaded {weapon_type} weapon tree binary data") weapon_trees[weapon_type] = weapon_tree # Load Kulve Augment Data kulve_augments = weapon_loader.load_kulve_augments() artifacts.write_dicts_artifact("kulve_augments.csv", kulve_augments.flattened()) # Write artifact lines print("Writing artifact files for weapons (use it to add new weapons)") write_weapon_artifacts(mhdata, weapon_trees, ammo_reader) # Store new weapon entries new_weapon_map = DataMap(languages=["en"], start_id=mhdata.weapon_map.max_id+1, keys_ex=["weapon_type"]) # Iterate over existing weapons, merge new data in for existing_entry in mhdata.weapon_map.values(): weapon_type = existing_entry['weapon_type'] weapon_tree = weapon_trees[weapon_type] # Note: weapon data ordering is unknown. order field and tree_id asc are sometimes wrong # Therefore its unsorted, we have to work off the spreadsheet order multiplier = cfg.weapon_multiplier[weapon_type] weapon = weapon_tree.by_name(existing_entry.name('en')) if not weapon: print(f"Could not find binary entry for {existing_entry.name('en')}") new_weapon_map.insert(existing_entry) continue is_kulve = existing_entry['category'] == 'Kulve' is_special = existing_entry['category'] in ('Kulve', 'Safi') binary = weapon.binary name = weapon.name new_entry = { **existing_entry } # Bind name and parent new_entry['name'] = name new_entry['weapon_type'] = weapon_type new_entry['previous_en'] = None if weapon.parent != None: new_entry['previous_en'] = weapon.parent.name['en'] # Apply augmentation if its a kulve weapon that can get augmented if is_kulve: augment_params = kulve_augments.get(weapon_type, weapon.rarity) if augment_params: weapon = AugmentedWeapon(weapon, augment_params, 4) # Bind info new_entry['weapon_type'] = weapon_type new_entry['rarity'] = weapon.rarity new_entry['attack'] = (weapon.attack * multiplier).quantize(Decimal('1.'), rounding=ROUND_HALF_UP) new_entry['affinity'] = weapon.affinity new_entry['defense'] = weapon.defense or None new_entry['slot_1'] = binary.gem_slot1_lvl new_entry['slot_2'] = binary.gem_slot2_lvl new_entry['slot_3'] = binary.gem_slot3_lvl new_entry['elderseal'] = elderseal[binary.elderseal] # Bind Elements if name['en'] in ["Twin Nails", "Fire and Ice", "Blizzard and Blaze"]: print(f"Skipping {name['en']} element data") else: hidden = binary.hidden_element_id != 0 element_atk = weapon.element_value new_entry['element_hidden'] = hidden new_entry['element1'] = weapon.element_type new_entry['element1_attack'] = element_atk * 10 if element_atk else None new_entry['element2'] = None new_entry['element2_attack'] = None # Bind skill skill = skill_text_handler.get_skilltree_name(binary.skill_id) new_entry['skill'] = skill['en'] if binary.skill_id != 0 else None # Bind Extras (Blade/Gun/Bow data) if weapon_type in cfg.weapon_types_melee: bind_weapon_blade_ext(weapon_type, new_entry, weapon) new_entry['sharpness'] = sharpness_reader.sharpness_for(binary) elif weapon_type in cfg.weapon_types_gun: tree = weapon.tree if is_special: tree = existing_entry['category'] (ammo_name, ammo_data) = ammo_reader.create_data_for( wtype=weapon_type, tree=tree, binary=weapon.binary) new_entry['ammo_config'] = ammo_name else: # TODO: Bows have an Enabled+ flag. Find out what it means # 1 = enabled, 2 = enabled+ coating_binary = coating_data[binary.special_ammo_type] new_entry['bow'] = { 'close': coating_binary.close_range > 0, 'power': coating_binary.power > 0, 'paralysis': coating_binary.paralysis > 0, 'poison': coating_binary.poison > 0, 'sleep': coating_binary.sleep > 0, 'blast': coating_binary.blast > 0 } # crafting data new_entry['craft'] = [] if weapon.craft: new_entry['craft'].append({ 'type': 'Create', **convert_recipe(item_updater, weapon.craft) }) if weapon.upgrade: new_entry['craft'].append({ 'type': 'Upgrade', **convert_recipe(item_updater, weapon.upgrade) }) new_weapon_map.insert(new_entry) # Write new data writer = create_writer() writer.save_base_map_csv( "weapons/weapon_base.csv", new_weapon_map, schema=schema.WeaponBaseSchema(), translation_filename="weapons/weapon_base_translations.csv" ) writer.save_data_csv( "weapons/weapon_sharpness.csv", new_weapon_map, key="sharpness", schema=schema.WeaponSharpnessSchema() ) writer.save_data_csv( "weapons/weapon_bow_ext.csv", new_weapon_map, key="bow", schema=schema.WeaponBowSchema() ) writer.save_data_csv( "weapons/weapon_craft.csv", new_weapon_map, key="craft", schema=schema.WeaponRecipeSchema() ) writer.save_keymap_csv( "weapons/weapon_ammo.csv", ammo_reader.data, schema=schema.WeaponAmmoSchema() ) print("Weapon files updated\n")
def update_armor(): "Populates and updates armor information using the armorset_base as a source of truth" armor_text = load_text("common/text/steam/armor") armorset_text = load_text("common/text/steam/armor_series") # Parses binary armor data, mapped by the english name armor_data = {} for armor_entry in load_schema(am_dat.AmDat, "common/equip/armor.am_dat").entries: if armor_entry.gender == 0: continue if armor_entry.order == 0: continue name_en = armor_text[armor_entry.gmd_name_index]['en'] armor_data[name_en] = armor_entry # Parses craft data, mapped by the binary armor id armor_craft_data = {} for craft_entry in load_schema(eq_crt.EqCrt, "common/equip/armor.eq_crt").entries: armor_craft_data[craft_entry.equip_id] = craft_entry # Get number of times armor can be upgraded by rarity level. # Unk7 is max level pre-augment, Unk8 is max post-augment # Thanks to the MHWorld Modders for the above info rarity_upgrades = {} for entry in load_schema(arm_up.ArmUp, "common/equip/arm_upgrade.arm_up").entries: rarity_upgrades[entry.index + 1] = (entry.unk7 - 1, entry.unk8 - 1) print("Binary data loaded") mhdata = load_data() print( "Existing Data loaded. Using existing armorset data to drive new armor data." ) # Will store results. Language lookup and validation will be in english new_armor_map = DataMap(languages="en") new_armorset_bonus_map = DataMap(languages="en") # Temporary storage for later processes all_set_skill_ids = OrderedSet() item_text_handler = ItemTextHandler() skill_text_handler = SkillTextHandler() print("Populating armor data, keyed by the armorset data") next_armor_id = mhdata.armor_map.max_id + 1 for armorset in mhdata.armorset_map.values(): # Handle armor pieces for part, armor_name in datafn.iter_armorset_pieces(armorset): existing_armor = mhdata.armor_map.entry_of('en', armor_name) armor_binary = armor_data.get(armor_name) if not armor_binary: raise Exception( f"Failed to find binary armor data for {armor_name}") if armor_binary.set_skill1_lvl > 0: all_set_skill_ids.add(armor_binary.set_skill1) rarity = armor_binary.rarity + 1 name_dict = armor_text[armor_binary.gmd_name_index] # Initial new armor data new_data = { 'name': name_dict, # Override for translation support! 'rarity': rarity, 'type': part, 'gender': gender_list[armor_binary.gender], 'slot_1': armor_binary.gem_slot1_lvl, 'slot_2': armor_binary.gem_slot2_lvl, 'slot_3': armor_binary.gem_slot3_lvl, 'defense_base': armor_binary.defense, 'defense_max': armor_binary.defense + rarity_upgrades[rarity][0] * 2, 'defense_augment_max': armor_binary.defense + rarity_upgrades[rarity][1] * 2, 'defense_fire': armor_binary.fire_res, 'defense_water': armor_binary.water_res, 'defense_thunder': armor_binary.thunder_res, 'defense_ice': armor_binary.ice_res, 'defense_dragon': armor_binary.dragon_res, 'skills': {}, 'craft': {} } # Add skills to new armor data for i in range(1, 2 + 1): skill_lvl = getattr(armor_binary, f"skill{i}_lvl") if skill_lvl != 0: skill_id = getattr(armor_binary, f"skill{i}") name_en = skill_text_handler.get_skilltree_name( skill_id)['en'] new_data['skills'][f'skill{i}_name'] = name_en new_data['skills'][f'skill{i}_pts'] = skill_lvl else: new_data['skills'][f'skill{i}_name'] = None new_data['skills'][f'skill{i}_pts'] = None # Add recipe to new armor data. Also track the encounter. recipe_binary = armor_craft_data[armor_binary.id] new_data['craft'] = convert_recipe(item_text_handler, recipe_binary) armor_entry = None if not existing_armor: print( f"Entry for {armor_name} not in armor map, creating new entry" ) armor_entry = new_armor_map.add_entry(next_armor_id, new_data) next_armor_id += 1 else: armor_entry = new_armor_map.add_entry(existing_armor.id, { **existing_armor, **new_data }) # Process set skills. As we don't currently understand the set -> skill map, we only translate # We pull the already established set skill name from existing CSV for bonus_entry in mhdata.armorset_bonus_map.values(): skilltree = skill_text_handler.get_skilltree(bonus_entry.name('en')) name_dict = skill_text_handler.get_skilltree_name(skilltree.index) new_armorset_bonus_map.insert({**bonus_entry, 'name': name_dict}) # Write new data writer = create_writer() writer.save_base_map_csv( "armors/armor_base.csv", new_armor_map, schema=schema.ArmorBaseSchema(), translation_filename="armors/armor_base_translations.csv") writer.save_data_csv("armors/armor_skills_ext.csv", new_armor_map, key="skills") writer.save_data_csv("armors/armor_craft_ext.csv", new_armor_map, key="craft") writer.save_base_map_csv( "armors/armorset_bonus_base.csv", new_armorset_bonus_map, schema=schema.ArmorSetBonus(), translation_filename="armors/armorset_bonus_base_translations.csv") print("Armor files updated\n") add_missing_items(item_text_handler.encountered, mhdata=mhdata)
def update_monsters(mhdata, item_updater: ItemUpdater, monster_meta: MonsterMetadata): root = Path(get_chunk_root()) # Mapping of the home folder of each monster by name # Currently these names are not the name_en entries, but the MonsterList entry names folder_for_monster = {} # Load hitzone entries hitzone_json = [] for filename in root.joinpath('em/').rglob('*.dtt_epg'): epg_binary = load_epg(filename) json_data = struct_to_json(epg_binary) try: name = monster_meta.by_id(epg_binary.monster_id).name hitzone_json.append({ 'name': name, 'filename': str(filename.relative_to(root)), **json_data }) except KeyError: pass # warn? monster_name_text = load_text('common/text/em_names') monster_info_text = load_text('common/text/em_info') for monster_entry in mhdata.monster_map.values(): name_en = monster_entry.name('en') if not monster_meta.has_monster(name_en): print(f'Warning: Monster {name_en} not in metadata, skipping') continue monster_key_entry = monster_meta.by_name(name_en) key_name = monster_key_entry.key_name key_description = monster_key_entry.key_description monster_entry['name'] = monster_name_text[key_name] if key_description: monster_entry['description'] = monster_info_text[f'NOTE_{key_description}_DESC'] # Read drops (use the hunting notes key name if available) itlot_key = (key_description or key_name).lower() if itlot_key: itlot_path = root.joinpath(f"common/item/{itlot_key}.itlot") drops = load_itlot(itlot_path) monster_drops = [] for idx, entry in enumerate(drops.entries): monster_drops.extend( [{ 'group': f'Group {idx+1}', 'item_name': item_updater.name_for(iid)['en'], 'quantity': qty, 'percentage': rarity } for iid, qty, rarity, animation in entry.iter_items() if iid != 0] ) artifacts.write_dicts_artifact(f'monster_drops/{name_en} drops.csv', monster_drops) else: print(f'Warning: no drops file found for monster {name_en}') # Write hitzone data to artifacts artifacts.write_json_artifact("monster_hitzones.json", hitzone_json) print("Monster hitzones artifact written (Automerging not supported)") # Write new data writer = create_writer() writer.save_base_map_csv( "monsters/monster_base.csv", mhdata.monster_map, schema=schema.MonsterBaseSchema(), translation_filename="monsters/monster_base_translations.csv", translation_extra=['description'] ) print("Monsters updated\n")
def add_missing_items(encountered_item_ids: Iterable[int], *, mhdata=None): if not mhdata: mhdata = load_data() print("Existing Data loaded. Using to expand item list") item_data = sorted(load_schema(itm.Itm, "common/item/itemData.itm").entries, key=lambda i: i.order) item_text_manager = ItemTextHandler() new_item_map = DataMap(languages='en') # First pass. Iterate over existing ingame items and merge with existing data for entry in item_data: name_dict, description_dict = item_text_manager.text_for(entry.id) existing_item = mhdata.item_map.entry_of('en', name_dict['en']) is_encountered = entry.id in encountered_item_ids if not is_encountered and not existing_item: continue # note: we omit buy price as items may have a buy price even if not sold. # We only care about the buy price of BUYABLE items new_data = { 'name': name_dict, 'description': description_dict, 'rarity': entry.rarity + 1, 'sell_price': entry.sell_price if entry.sell_price != 0 else None } is_ez = (entry.flags & itm.ItmFlag.IsQuestOnly.value) != 0 is_account = item_type_list[entry.type] == 'endemic' is_tradein = "(Trade-in Item)" in description_dict['en'] is_appraisal = (entry.flags & itm.ItmFlag.IsAppraisal.value) != 0 if name_dict['en'] == 'Normal Ammo 1': new_data['category'] = 'hidden' elif is_ez: new_data['category'] = 'misc' new_data['subcategory'] = 'trade' if is_tradein else 'supply' elif is_account: new_data['category'] = 'misc' new_data['subcategory'] = 'trade' if is_tradein else 'account' elif is_appraisal: new_data['category'] = 'misc' new_data['subcategory'] = 'appraisal' new_data['sell_price'] = None # why does this have values? else: new_data['category'] = item_type_list[entry.type] new_data['subcategory'] = 'trade' if is_tradein else None # Whether we show carry limit at all is based on item type. # Materials are basically infinite carry infinite_carry = new_data['category'] == 'material' new_data[ 'carry_limit'] = None if infinite_carry else entry.carry_limit if existing_item: new_item_map.insert({**existing_item, **new_data}) else: new_item_map.insert(new_data) # Second pass, add old entries that are not in the new one for old_entry in mhdata.item_map.values(): if old_entry.name('en') not in new_item_map.names('en'): new_item_map.insert(old_entry) # Third pass. Items need to be reordered based on type unsorted_item_map = new_item_map # store reference to former map def filter_category(category, subcategory=None): "helper that returns items and then removes from unsorted item map" results = [] for item in unsorted_item_map.values(): if item['category'] == category and item[ 'subcategory'] == subcategory: results.append(item) for result in results: del unsorted_item_map[result.id] return results normal_ammo_1 = unsorted_item_map.entry_of("en", "Normal Ammo 1") # start the before-mentioned third pass by creating a new map based off the old one new_item_map = DataMap(languages="en") new_item_map.extend(filter_category('item')) new_item_map.extend(filter_category('material')) new_item_map.extend(filter_category('material', 'trade')) if normal_ammo_1: new_item_map.insert(normal_ammo_1) new_item_map.extend(filter_category('ammo')) new_item_map.extend(filter_category('misc', 'appraisal')) new_item_map.extend(filter_category('misc', 'account')) new_item_map.extend(filter_category('misc', 'supply')) # Write out data writer = create_writer() writer.save_base_map_csv( "items/item_base.csv", new_item_map, schema=schema.ItemSchema(), translation_filename="items/item_base_translations.csv", translation_extra=['description']) print("Item files updated")
def update_weapons(): mhdata = load_data() print("Existing Data loaded. Using to update weapon info") weapon_loader = WeaponDataLoader() item_text_handler = ItemTextHandler() skill_text_handler = SkillTextHandler() notes_data = load_schema(wep_wsl.WepWsl, "common/equip/wep_whistle.wep_wsl") sharpness_reader = SharpnessDataReader() ammo_reader = WeaponAmmoLoader() coating_data = load_schema(bbtbl.Bbtbl, "common/equip/bottle_table.bbtbl") print("Loaded initial weapon binary data data") def bind_weapon_blade_ext(weapon_type: str, existing_entry, binary: wp_dat.WpDatEntry): for key in [ 'kinsect_bonus', 'phial', 'phial_power', 'shelling', 'shelling_level', 'notes' ]: existing_entry[key] = None if weapon_type == cfg.CHARGE_BLADE: existing_entry['phial'] = cb_phials[binary.wep1_id] if weapon_type == cfg.SWITCH_AXE: (phial, power) = s_axe_phials[binary.wep1_id] existing_entry['phial'] = phial existing_entry['phial_power'] = power if weapon_type == cfg.GUNLANCE: # first 5 are normals, second 5 are wide, third 5 are long shelling = ['normal', 'wide', 'long'][binary.wep1_id // 5] level = (binary.wep1_id % 5) + 1 existing_entry['shelling'] = shelling existing_entry['shelling_level'] = level if weapon_type == cfg.INSECT_GLAIVE: existing_entry['kinsect_bonus'] = glaive_boosts[binary.wep1_id] if weapon_type == cfg.HUNTING_HORN: note_entry = notes_data[binary.wep1_id] notes = [note_entry.note1, note_entry.note2, note_entry.note3] notes = [str(note_colors[n]) for n in notes] existing_entry['notes'] = "".join(notes) # Store new weapon entries new_weapon_map = DataMap(languages="en", start_id=mhdata.weapon_map.max_id + 1) # Iterate over weapon types for weapon_type in cfg.weapon_types: print(f"Processing {weapon_type}") # Note: weapon data ordering is unknown. order field and tree_id asc are sometimes wrong # Therefore its unsorted, we have to work off the spreadsheet order weapon_tree = weapon_loader.load_tree(weapon_type) print(f"Loaded {weapon_type} weapon tree binary data") multiplier = cfg.weapon_multiplier[weapon_type] # Iterate over nodes in the weapon tree (does depth first search) for weapon_node in weapon_tree: binary = weapon_node.binary name = weapon_node.name existing_entry = mhdata.weapon_map.entry_of('en', name['en']) new_entry = {} if existing_entry: new_entry = {**existing_entry} # Bind name and parent new_entry['name'] = name new_entry['weapon_type'] = weapon_type new_entry['previous_en'] = None if weapon_node.parent != None: new_entry['previous_en'] = weapon_node.parent.name['en'] # Bind info new_entry['weapon_type'] = weapon_type new_entry['rarity'] = binary.rarity + 1 new_entry['attack'] = binary.raw_damage * multiplier new_entry['affinity'] = binary.affinity new_entry['defense'] = binary.defense or None new_entry['slot_1'] = binary.gem_slot1_lvl new_entry['slot_2'] = binary.gem_slot2_lvl new_entry['slot_3'] = binary.gem_slot3_lvl new_entry['elderseal'] = elderseal[binary.elderseal] # Bind Elements if name['en'] in ["Twin Nails", "Fire and Ice"]: print(f"Skipping {name['en']} element data") else: hidden = binary.hidden_element_id != 0 element_id = binary.hidden_element_id if hidden else binary.element_id element_atk = binary.hidden_element_damage if hidden else binary.element_damage new_entry['element_hidden'] = hidden new_entry['element1'] = elements[element_id] new_entry[ 'element1_attack'] = element_atk * 10 if element_atk else None new_entry['element2'] = None new_entry['element2_attack'] = None # Bind skill skill = skill_text_handler.get_skilltree_name(binary.skill_id) new_entry['skill'] = skill['en'] if binary.skill_id != 0 else None # Bind Extras (Blade/Gun/Bow data) if weapon_type in cfg.weapon_types_melee: bind_weapon_blade_ext(weapon_type, new_entry, binary) new_entry['sharpness'] = sharpness_reader.sharpness_for(binary) elif weapon_type in cfg.weapon_types_gun: (ammo_name, ammo_data) = ammo_reader.create_data_for( wtype=weapon_type, tree=weapon_node.tree, binary=weapon_node.binary) new_entry['ammo_config'] = ammo_name else: # TODO: Bows have an Enabled+ flag. Find out what it means # 1 = enabled, 2 = enabled+ coating_binary = coating_data[binary.special_ammo_type] new_entry['bow'] = { 'close': coating_binary.close_range > 0, 'power': coating_binary.power > 0, 'paralysis': coating_binary.paralysis > 0, 'poison': coating_binary.poison > 0, 'sleep': coating_binary.sleep > 0, 'blast': coating_binary.blast > 0 } # crafting data new_entry['craft'] = [] if weapon_node.craft: new_entry['craft'].append({ 'type': 'Create', **convert_recipe(item_text_handler, weapon_node.craft) }) if weapon_node.upgrade: new_entry['craft'].append({ 'type': 'Upgrade', **convert_recipe(item_text_handler, weapon_node.upgrade) }) new_weapon_map.insert(new_entry) # Write new data writer = create_writer() writer.save_base_map_csv( "weapons/weapon_base.csv", new_weapon_map, schema=schema.WeaponBaseSchema(), translation_filename="weapons/weapon_base_translations.csv") writer.save_data_csv("weapons/weapon_sharpness.csv", new_weapon_map, key="sharpness", schema=schema.WeaponSharpnessSchema()) writer.save_data_csv("weapons/weapon_bow_ext.csv", new_weapon_map, key="bow", schema=schema.WeaponBowSchema()) writer.save_data_csv("weapons/weapon_craft.csv", new_weapon_map, key="craft", schema=schema.WeaponCraftSchema()) writer.save_keymap_csv("weapons/weapon_ammo.csv", ammo_reader.data, schema=schema.WeaponAmmoSchema()) print("Weapon files updated\n") add_missing_items(item_text_handler.encountered, mhdata=mhdata)
def update_weapon_songs(mhdata): # unfortunately, song data linking is unknown, but we have a few pieces # We know where the text file is, and we know of the id -> notes linking. print("Beginning load of hunting horn melodies") print("Warning: Hunting Horn format is unknown, but we do have a few pieces...") print("We know where the text data, and we know the id -> notes file formats") print("Everything else has to be manually connected.") song_data = load_schema(msk.Msk, 'hm/wp/wp05/music_skill.msk') song_text_data = load_text("common/text/vfont/music_skill") # Mapping from english name -> a name dict melody_names_map = {v['en']:v for v in song_text_data.values()} print("Writing artifact files for melody english text entries") artifacts.write_names_artifact("melody_strings_en.txt", melody_names_map.keys()) # adding NA to melody_names_map melody_names_map['N/A'] = { lang:'N/A' for lang in cfg.all_languages } # Create melody id -> all possible songs that apply that melody melody_note_artifacts = [] melody_to_notes = {} for song_entry in song_data: notes = '' for i in range(4): note_idx = getattr(song_entry, f'note{i+1}') if note_idx < len(note_colors): # technically int max means note does not exist notes += note_colors[note_idx] # write a note of id -> notes for the artifacts melody_note_artifacts.append(f'{song_entry.id},{notes}') melody_to_notes.setdefault(song_entry.id, []) melody_to_notes[song_entry.id].append(notes) print("Writing artifact files for id -> notes") artifacts.write_lines_artifact("melody_notes.txt", melody_note_artifacts) print("Merging text values and notes into weapon melodies") fields = ['name', 'effect1', 'effect2'] for melody in mhdata.weapon_melodies.values(): for field in fields: val_en = melody[field]['en'] if val_en not in melody_names_map: print(f"Could not find GMD text entry for {val_en}") continue melody[field] = melody_names_map[val_en] if melody.id in melody_to_notes: melody['notes'] = [{'notes': notes} for notes in melody_to_notes[melody.id]] else: print("Could not find binary music notes entries for " + melody['name']['en']) # Write new data writer = create_writer() writer.save_base_map_csv( "weapons/weapon_melody_base.csv", mhdata.weapon_melodies, schema=schema.WeaponMelodyBaseSchema() ) writer.save_data_csv( "weapons/weapon_melody_notes.csv", mhdata.weapon_melodies, key='notes' ) print("Weapon Melody files updated\n")
def update_armor(mhdata, item_updater: ItemUpdater): "Populates and updates armor information using the armorset_base as a source of truth" armor_series = load_armor_series() # Get number of times armor can be upgraded by rarity level. # Unk7 is max level pre-augment, Unk8 is max post-augment # Thanks to the MHWorld Modders for the above info rarity_upgrades = {} for entry in load_schema(arm_up.ArmUp, "common/equip/arm_upgrade.arm_up").entries: rarity_upgrades[entry.index + 1] = (entry.unk7 - 1, entry.unk8 - 1) print("Binary armor data loaded") print("Writing list of armorset names (in order) to artifacts") artifacts.write_names_artifact( 'setnames.txt', [s.name['en'] for s in armor_series.values()]) # Will store results. Language lookup and validation will be in english new_armorset_map = DataMap(languages="en", start_id=mhdata.armorset_map.max_id + 1) new_armor_map = DataMap(languages="en", start_id=mhdata.armor_map.max_id + 1) new_armorset_bonus_map = DataMap(languages="en") # Temporary storage for later processes all_set_skill_ids = OrderedSet() item_text_handler = ItemTextHandler() skill_text_handler = SkillTextHandler() armor_data_by_name = {} print( "Updating set data, keyed by the existing names in armorset_base.csv") for armorset_entry in mhdata.armorset_map.values(): armorseries_data = armor_series.get(armorset_entry.name('en')) if not armorseries_data: print( f"Armor series {armorset_entry.name('en')} doesn't exist in binary, skipping" ) new_armorset_map.insert(armorset_entry) continue new_entry = { **armorset_entry, 'name': armorseries_data.name, 'rank': armorseries_data.rank } first_armor = armorseries_data.armors[0].binary if first_armor.set_skill1_lvl > 0: skill_id = first_armor.set_skill1 all_set_skill_ids.add(skill_id) new_entry['bonus'] = skill_text_handler.get_skilltree_name( skill_id)['en'] for part in cfg.armor_parts: armor = armorseries_data.get_part(part) if armor: armor_data_by_name[armor.name['en']] = armor new_entry[part] = armor.name['en'] else: new_entry[part] = None new_armorset_map.insert(new_entry) print("Armorset entries updated") print("Updating armor") for armorset_entry in new_armorset_map.values(): # Handle armor pieces for part, armor_name in datafn.iter_armorset_pieces(armorset_entry): existing_armor = mhdata.armor_map.entry_of('en', armor_name) armor_data = armor_data_by_name.get(armor_name, None) if not armor_data: print( f"Failed to find binary armor data for {armor_name}, maintaining existing data" ) new_armor_map.insert(existing_armor) continue armor_binary = armor_data.binary rarity = armor_binary.rarity + 1 # Initial new armor data new_data = { 'name': armor_data.name, 'rarity': rarity, 'type': part, 'gender': gender_list[armor_binary.gender], 'slot_1': armor_binary.gem_slot1_lvl, 'slot_2': armor_binary.gem_slot2_lvl, 'slot_3': armor_binary.gem_slot3_lvl, 'defense_base': armor_binary.defense, 'defense_max': armor_binary.defense + rarity_upgrades[rarity][0] * 2, 'defense_augment_max': armor_binary.defense + rarity_upgrades[rarity][1] * 2, 'defense_fire': armor_binary.fire_res, 'defense_water': armor_binary.water_res, 'defense_thunder': armor_binary.thunder_res, 'defense_ice': armor_binary.ice_res, 'defense_dragon': armor_binary.dragon_res, 'skills': {}, 'craft': {} } if existing_armor: new_data['id'] = existing_armor.id # Add skills to new armor data for i in range(1, 2 + 1): skill_lvl = getattr(armor_binary, f"skill{i}_lvl") if skill_lvl != 0: skill_id = getattr(armor_binary, f"skill{i}") name_en = skill_text_handler.get_skilltree_name( skill_id)['en'] new_data['skills'][f'skill{i}_name'] = name_en new_data['skills'][f'skill{i}_pts'] = skill_lvl else: new_data['skills'][f'skill{i}_name'] = None new_data['skills'][f'skill{i}_pts'] = None # Add recipe to new armor data. Also track the encounter. recipe_binary = armor_data.recipe new_data['craft'] = convert_recipe(item_text_handler, recipe_binary) # Add new data to new armor map new_armor_map.insert(new_data) # Process set skills. As we don't currently understand the set -> skill map, we only translate # We pull the already established set skill name from existing CSV for bonus_entry in mhdata.armorset_bonus_map.values(): skilltree = skill_text_handler.get_skilltree(bonus_entry.name('en')) name_dict = skill_text_handler.get_skilltree_name(skilltree.index) new_armorset_bonus_map.insert({**bonus_entry, 'name': name_dict}) # Write new data writer = create_writer() writer.save_base_map_csv( "armors/armorset_base.csv", new_armorset_map, schema=schema.ArmorSetSchema(), translation_filename="armors/armorset_base_translations.csv") writer.save_base_map_csv( "armors/armor_base.csv", new_armor_map, schema=schema.ArmorBaseSchema(), translation_filename="armors/armor_base_translations.csv") writer.save_data_csv("armors/armor_skills_ext.csv", new_armor_map, key="skills") writer.save_data_csv("armors/armor_craft_ext.csv", new_armor_map, key="craft") writer.save_base_map_csv( "armors/armorset_bonus_base.csv", new_armorset_bonus_map, schema=schema.ArmorSetBonus(), translation_filename="armors/armorset_bonus_base_translations.csv") print("Armor files updated\n") item_updater.add_missing_items(item_text_handler.encountered)
def update_monsters(mhdata, item_updater: ItemUpdater, monster_meta: MonsterMetadata): root = Path(get_chunk_root()) # Mapping of the home folder of each monster by name # Currently these names are not the name_en entries, but the MonsterList entry names folder_for_monster = {} # Load hitzone entries. EPG files contain hitzones, parts, and base hp print('Loading monster hitzone data') monster_hitzones = {} hitzone_raw_data = [] hitzone_raw_data_flat = [] for filename in root.joinpath('em/').rglob('*.dtt_epg'): epg_binary = load_epg(filename) try: meta = monster_meta.by_id(epg_binary.monster_id) name = meta.name except KeyError: continue # warn? path_key = filename.stem + "_" + str(filename.parents[1].stem) hitzone_raw_data.append({ 'name': name, 'filename': str(filename.relative_to(root)), **struct_to_json(epg_binary) }) monster_hitzones[name] = [] for hitzone_id, hitzone in enumerate(epg_binary.hitzones): monster_hitzones[name].append({ 'hitzone_id': hitzone_id, 'cut': hitzone.Sever, 'impact': hitzone.Blunt, 'shot': hitzone.Shot, 'fire': hitzone.Fire, 'water': hitzone.Water, 'thunder': hitzone.Thunder, 'ice': hitzone.Ice, 'dragon': hitzone.Dragon, 'ko': hitzone.Stun }) unlinked = set(range(len(monster_hitzones[name]))) def get_hitzone(idx): if idx == -1: return None hitzone = monster_hitzones[name][idx] if idx in unlinked: unlinked.remove(idx) return hitzone for part_id, part in enumerate(epg_binary.parts): sever_type = None sever_value = None for cleave_idx in part.iter_cleaves(): if cleave_idx == -1: continue cleave = epg_binary.cleaves[cleave_idx] if cleave.damage_type == 'any': continue sever_type = cleave.damage_type sever_value = cleave.special_hp for subpart in part.subparts: base_params = { 'name_en': name, 'part_id': part_id, 'part_name': monster_meta.get_part(path_key, part_id), 'flinch': part.flinchValue, 'sever_type': sever_type, 'sever': sever_value, 'extract': part.extract, } base_hzv = get_hitzone(subpart.hzv_base) if base_hzv: hitzone_raw_data_flat.append({ **base_params, 'type': 'base', **base_hzv }) broken_hzv = get_hitzone(subpart.hzv_broken) if broken_hzv: hitzone_raw_data_flat.append({ **base_params, 'type': 'broken', **broken_hzv }) if name not in ['Behemoth']: for special_idx in range(3): value = getattr(subpart, 'hzv_special' + str(special_idx+1)) hzv_spec = get_hitzone(value) if hzv_spec: hitzone_raw_data_flat.append({ **base_params, 'type': 'special ' + str(special_idx + 1), **hzv_spec }) for idx in unlinked: hitzone_raw_data_flat.append({ 'name_en': name, 'part_id': 'unlinked', 'part_name': 'unlinked', 'type': 'unlinked', **monster_hitzones[name][idx] }) print('Loaded Monster hitzone data') # Load status entries monster_statuses = read_status(monster_meta) print('Loaded Monster status data') # Write hitzone data to artifacts artifacts.write_json_artifact("monster_hitzones_and_breaks.json", hitzone_raw_data) print("Monster hitzones+breaks raw data artifact written (Automerging not supported)") artifacts.write_dicts_artifact('monster_hitzones_raw.csv', hitzone_raw_data_flat) print("Monster hitzones artifact written (Automerging not supported)") artifacts.write_json_artifact("monster_status.json", list(monster_statuses.values())) print("Monster status artifact written (Automerging not supported)") monster_name_text = load_text('common/text/em_names') monster_info_text = load_text('common/text/em_info') monster_drops = read_drops(monster_meta, item_updater) print('Loaded Monster drop rates') for monster_entry in mhdata.monster_map.values(): name_en = monster_entry.name('en') if not monster_meta.has_monster(name_en): print(f'Warning: Monster {name_en} not in metadata, skipping') continue monster_key_entry = monster_meta.by_name(name_en) key_name = monster_key_entry.key_name key_description = monster_key_entry.key_description monster_entry['name'] = monster_name_text[key_name] if key_description: monster_entry['description'] = monster_info_text[f'NOTE_{key_description}_DESC'] # Compare drops (use the hunting notes key name if available) drop_tables = monster_drops.get(monster_key_entry.id, None) if drop_tables: # Write drops to artifact files joined_drops = [] for idx, drop_table in enumerate(drop_tables): joined_drops.extend({'group': f'Group {idx+1}', **e} for e in drop_table) artifacts.write_dicts_artifact(f'monster_drops/{name_en} drops.csv', joined_drops) # Check if any drop table in our current database is invalid if 'rewards' in monster_entry: rewards_sorted = sorted(monster_entry['rewards'], key=itemgetter('condition_en')) rewards = itertools.groupby(rewards_sorted, key=lambda r: (r['condition_en'], r['rank'])) for (condition, rank), existing_table in rewards: if condition in itlot_conditions: existing_table = list(existing_table) if not any(compare_drop_tables(existing_table, table) for table in drop_tables): print(f"Validation Error: Monster {name_en} has invalid drop table {condition} in {rank}") else: print(f'Warning: no drops file found for monster {name_en}') # Compare hitzones hitzone_data = monster_hitzones.get(name_en, None) if hitzone_data and 'hitzones' in monster_entry: # Create tuples of the values of the hitzone, to use as a comparator hitzone_key = lambda h: tuple(h[v] for v in hitzone_fields) stored_hitzones = [hitzone_key(h) for h in hitzone_data] stored_hitzones_set = set(stored_hitzones) # Check if any hitzone we have doesn't actually exist for hitzone in monster_entry['hitzones']: if hitzone_key(hitzone) not in stored_hitzones_set: print(f"Validation Error: Monster {name_en} has invalid hitzone {hitzone['hitzone']['en']}") elif 'hitzones' not in monster_entry and hitzone_data: print(f'Warning: no hitzones in monster entry {name_en}, but binary data exists') else: print(f"Warning: No hitzone data for monster {name_en}") # Status info status = monster_statuses.get(monster_key_entry.id, None) if status: test = lambda v: v['base'] > 0 and v['decrease'] > 0 monster_entry['pitfall_trap'] = True if test(status['pitfall_trap_buildup']) else False monster_entry['shock_trap'] = True if test(status['shock_trap_buildup']) else False monster_entry['vine_trap'] = True if test(status['vine_trap_buildup']) else False # Write new data writer = create_writer() writer.save_base_map_csv( "monsters/monster_base.csv", mhdata.monster_map, schema=schema.MonsterBaseSchema(), translation_filename="monsters/monster_base_translations.csv", translation_extra=['description'] ) print("Monsters updated\n")
def update_charms(mhdata, item_updater: ItemUpdater, armor_collection: ArmorCollection): "Populates and updates charm information using the charm_base as a source of truth" print("Writing list of charm names (in order) to artifacts") def get_charm_raw(c): return { 'name_en': c.name['en'], 'parent': c.parent and c.parent.name['en'] } artifacts.write_dicts_artifact( 'charms_raw.csv', [get_charm_raw(c) for c in armor_collection.charms]) skill_text_handler = SkillTextHandler() charm_by_name = {c.name['en']: c for c in armor_collection.charms} new_charm_map = DataMap(languages=["en"]) for charm_entry in mhdata.charm_map.values(): new_charm_entry = {**charm_entry} data = charm_by_name.get(charm_entry['name_en']) if not data: print( f"Warning: Charm {charm_entry['name_en']} has no associated binary data" ) new_charm_map.insert(new_charm_entry) continue new_charm_entry['name'] = data.name new_charm_entry['previous_en'] = data.parent and data.parent.name['en'] new_charm_entry['rarity'] = data.rarity # Add skills to new armor data skills = data.skills + ([(None, None)] * (2 - len(data.skills))) for i, (skill_id, skill_lvl) in enumerate(skills): if skill_id is None: new_charm_entry[f'skill{i+1}_name'] = None new_charm_entry[f'skill{i+1}_level'] = None else: name_en = skill_text_handler.get_skilltree_name(skill_id)['en'] new_charm_entry[f'skill{i+1}_name'] = name_en new_charm_entry[f'skill{i+1}_level'] = skill_lvl new_charm_entry['craft'] = [] recipes = [('Create', data.craft), ('Upgrade', data.upgrade)] for rtype, recipe in recipes: if recipe: new_charm_entry['craft'].append({ 'type': rtype, **convert_recipe(item_updater, recipe) }) new_charm_map.insert(new_charm_entry) # Write new data writer = create_writer() writer.save_base_map_csv( 'charms/charm_base.csv', new_charm_map, translation_filename="charms/charm_base_translations.csv", schema=schema.CharmBaseSchema()) writer.save_data_csv("charms/charm_craft.csv", new_charm_map, key="craft") print("Charm files updated\n")