Example #1
0
def update_tools(mhdata):
    tool_data = ToolCollection()

    new_tools = DataMap(start_id=mhdata.tool_map.max_id + 1)
    for tool in tool_data.tools:
        name_en = tool.name_upgraded['en']
        existing_entry = mhdata.tool_map.entry_of('en', name_en)

        new_entry = {}
        if existing_entry:
            new_entry = {**existing_entry}

        new_entry['name'] = tool.name_upgraded
        new_entry['name_base'] = tool.name
        new_entry['description'] = tool.description
        new_entry['tool_type'] = 'booster' if 'booster' in tool.name[
            'en'].lower() else 'mantle'
        new_entry.setdefault('duration', 0)
        new_entry.setdefault('duration_upgraded', None)
        new_entry.setdefault('recharge', 0)
        new_entry['slot_1'] = tool.slots[0]
        new_entry['slot_2'] = tool.slots[1]
        new_entry['slot_3'] = tool.slots[2]
        new_entry.setdefault('icon_color', None)

        new_tools.insert(new_entry)

    writer = create_writer()

    writer.save_base_map_csv(
        "tools/tool_base.csv",
        new_tools,
        schema=schema.ToolSchema(),
        translation_filename="tools/tool_base_translations.csv",
        translation_extra=['name_base', 'description'])
Example #2
0
def update_kinsects(mhdata, item_updater: ItemUpdater):
    print('Loading kinsect info')
    kinsect_tree = load_kinsect_tree()

    def resolve_parent_name(entry):
        if entry.parent:
            return entry.parent.name['en']
        return ''

    items = [
        f"{r.id},{r.name['en']},{resolve_parent_name(r)}"
        for r in kinsect_tree.crafted()
    ]
    artifacts.write_artifact('kinsect_all.txt', *items)
    items = [f"{r.id},{r.name['en']}" for r in kinsect_tree.roots]
    artifacts.write_artifact('kinsect_roots.txt', *items)

    kinsect_map = DataMap(languages=['en'])
    for kinsect_node in kinsect_tree.crafted():
        binary = kinsect_node.binary
        new_entry = kinsect_map.insert({
            'id':
            binary.id + 1,
            'name':
            kinsect_node.name,
            'previous_en':
            resolve_parent_name(kinsect_node),
            'rarity':
            binary.rarity + 1,
            'attack_type':
            kinsect_attack_types[binary.attack_type],
            'dust_effect':
            kinsect_dusts[binary.dust_type],
            'power':
            binary.power,
            'speed':
            binary.speed,
            'heal':
            binary.heal
        })

        if kinsect_node.upgrade:
            new_entry['craft'] = convert_recipe(item_updater,
                                                kinsect_node.upgrade)

    # Write new data
    writer = create_writer()

    writer.save_base_map_csv(
        "weapons/kinsect_base.csv",
        kinsect_map,
        schema=schema.KinsectBaseSchema(),
        translation_filename="weapons/kinsect_base_translations.csv")

    writer.save_data_csv("weapons/kinsect_craft_ext.csv",
                         kinsect_map,
                         key="craft",
                         schema=schema.RecipeSchema())

    print("Kinsect files updated\n")
Example #3
0
def test_manual_id_resets_sequence():
    datamap = DataMap()

    datamap.add_entry(25, create_test_entry_en('test1'))
    new_entry = datamap.insert(create_test_entry_en('test2'))

    assert new_entry.id > 25, "new id should have been higher"
Example #4
0
def test_merges_names():
    datamap = DataMap({1: create_test_entry({'en': 'NAME EN'})})

    datamap.merge({'NAME EN': {'name': {'es': 'NAME ES'}}})

    assert datamap[1]['name']['en'] == 'NAME EN', 'kept old name'
    assert 'es' in datamap[1]['name'], 'Added new spanish name'
    assert datamap[1]['name']['es'] == 'NAME ES', 'spanish name is correct'
Example #5
0
def test_save_base_symmetric(writer):
    data = DataMap()
    data.add_entry(1, create_entry_en('test1'))
    data.add_entry(2, create_entry_en('test2'))

    writer.save_base_map('testbase.json', data)
    new_data = writer.load_base_json('testbase.json', languages)

    assert dict(data) == dict(new_data), "saved data didn't match"
Example #6
0
def test_can_iterate_values_in_order():
    expected_names = ['test1', 'test2', 'test3']

    map = DataMap()
    for (id, name) in enumerate(expected_names):
        map.add_entry(id, create_test_entry_en(name))

    found = [entry['name']['en'] for entry in map.values()]
    assert found == expected_names, "Expected map entries to match"
Example #7
0
def test_save_base_csv_symmetric(writer: DataReaderWriter):
    data = DataMap()
    data.insert(create_entry_en('test1'))
    data.insert(create_entry_en('test2'))

    groups = ['name', 'description']
    writer.save_base_map_csv('testbase.csv', data, groups=groups)
    new_data = writer.load_base_csv('testbase.csv', groups=groups)

    assert data.to_list() == new_data.to_list(), "saved data didn't match"
Example #8
0
def test_clone_returns_equal_map():
    data = {
        25: create_test_entry_en('test1', { 'somedata': {'nested': 5}}),
        28: create_test_entry_en('test2', { 'somedata': {'alsonested': 'hey'}})
    }

    datamap = DataMap(data)
    cloned_datamap = datamap.copy()

    assert datamap.to_dict() == cloned_datamap.to_dict(), "expected clone to match"
    assert id(datamap) != id(cloned_datamap), "expecting clone to be a different object"
Example #9
0
def test_save_base_csv_symmetric(writer: DataReaderWriter):
    # Note: CSVs do not save typing info, so everything is strings
    data = DataMap()
    data.insert(create_entry_en('test1', {'id': '1'}))
    data.insert(create_entry_en('test2', {'id': '2'}))

    groups = ['name', 'description']
    writer.save_base_map_csv('testbase.csv', data, groups=groups)
    new_data = writer.load_base_csv('testbase.csv', languages, groups=groups)

    assert data.to_list() == new_data.to_list(), "saved data didn't match"
Example #10
0
def test_can_iterate_values_in_order():
    expected_entries = [
        (1, create_test_entry_en('test1')),
        (2, create_test_entry_en("test2")),
        (3, create_test_entry_en("test3"))]
    
    map = DataMap()
    for (id, entry) in expected_entries:
        map.add_entry(id, entry)
    
    found = [(id, entry) for (id, entry) in map.items()]
    assert found == expected_entries, "Expected map entries to match"
Example #11
0
def test_to_dict_correct_data():
    data = {
        25: create_test_entry_en('test1', { 'id': 25, 'somedata': {'nested': 5}}),
        28: create_test_entry_en('test2', { 'id': 28, 'somedata': {'alsonested': 'hey'}})
    }

    datamap = DataMap()
    for row in data.values():
        datamap.insert(row)

    serialized = datamap.to_dict()
    assert serialized == data, "expected serialized data to match original data"
Example #12
0
def test_to_dict_correct_data():
    data = {
        25: create_test_entry_en('test1', { 'somedata': {'nested': 5}}),
        28: create_test_entry_en('test2', { 'somedata': {'alsonested': 'hey'}})
    }

    datamap = DataMap()
    datamap.add_entry(25, data[25])
    datamap.add_entry(28, data[28])

    serialized = datamap.to_dict()
    assert serialized == data, "expected serialized data to match original data"
Example #13
0
def transform_dmap(dmap: DataMap, obj_schema):
    """Returns a new datamap, 
    where the items in the original have run through the marshmallow schema."""
    results = DataMap()
    for entry_id, entry in dmap.items():
        data = entry.to_dict()
        (converted, errors) = obj_schema.load(data, many=False) # converted

        if errors:
            raise Exception(str(errors))

        results.add_entry(entry_id, converted)
    return results
Example #14
0
def test_row_add_value_in_middle():
    test_keys = ['id', 'test1', 'test2', 'test3']
    test_dict = {k: 1 for k in test_keys}
    test_dict['name'] = {'en': 'a test'}  # required field

    datamap = DataMap()
    entry = datamap.insert(test_dict)

    entry.set_value('NEW', 1, after='test2')

    # note: name exists because it was manually added to test_dict
    expected_keys = ['id', 'test1', 'test2', 'NEW', 'test3', 'name']
    entry_keys = list(entry.keys())
    assert entry_keys == expected_keys, "Expected new to be after test2"
Example #15
0
def test_save_data_json_symmetric(writer):
    basedata = DataMap()
    basedata.add_entry(1, create_entry_en('test1'))
    basedata.add_entry(2, create_entry_en('test2'))

    extdata = DataMap()
    extdata.add_entry(1, {**basedata[1], 'data': 'test1'})
    extdata.add_entry(2, {**basedata[2], 'data': 'test2'})

    writer.save_data_json('testdatasym.json', extdata, fields=['data'])

    testdata = writer.load_data_json(basedata.copy(), 'testdatasym.json')

    assert extdata.to_dict() == testdata.to_dict(), "expected data to match"
Example #16
0
def test_merge_adds_data():
    baseData = {
        1: create_test_entry_en('test1'),
        2: create_test_entry_en('test2'),
        3: create_test_entry_en('test3')
    }
    datamap = DataMap(baseData.copy())

    extendedData = {'test1': {'extended': 2}, 'test3': {'extended': 3}}

    datamap.merge(extendedData)

    assert datamap[1]['extended'] == 2, 'expected data 1 to get extended'
    assert datamap[3]['extended'] == 3, 'expected data 3 to get extended'
    assert 'extended' not in datamap[2], 'expected data 2 to not update'
Example #17
0
def test_merge_on_multikey_single():
    data = {
        1: create_test_entry_en("test", { 'type': 'great-sword' }),
        2: create_test_entry_en("test", { 'type': 'bow' })
    }

    datamap = DataMap(data, keys_ex=["type"])

    merge_data = [
        { 'name_en': 'test', 'type': 'great-sword', 'attack': 25 },
        { 'name_en': 'test',  'type': 'bow', 'attack': 10 }
    ]
    merge_list(datamap, merge_data, many=False)

    assert datamap.entry_of("en", "test", "great-sword")['attack'] == 25
    assert datamap.entry_of("en", "test", "bow")['attack'] == 10
Example #18
0
def extend_decoration_chances(decoration_map: DataMap):
    """Calculates the drop tables given the decoration map.

    Each decoration is part of a drop table (decided by rarity), and feystones
    will individually land on a drop table. Once on a drop table, each decoration in that drop table
    has an "equal" chance within that drop table.

    Odds are listed here, with one typo (gleaming is actually glowing).
    https://docs.google.com/spreadsheets/d/1ysj6c2boC6GarFvMah34e6VviZeaoKB6QWovWLSGlsY/htmlview?usp=sharing&sle=true#
    """

    rarity_to_table = {
        5: 'C',
        6: 'B',
        7: 'A',
        8: 'S'
    }

    jewel_to_table_odds = {
        'mysterious': { 'C': 85, 'B': 15, 'A': 0,  'S': 0 },
        'glowing':    { 'C': 65, 'B': 34, 'A': 1,  'S': 0 },
        'worn':       { 'C': 10, 'B': 82, 'A': 6,  'S': 2 },
        'warped':     { 'C': 0,  'B': 77, 'A': 18, 'S': 5 },
    }

    drop_tables = rarity_to_table.values()
    
    # Calculate how many entries there are per drop table type
    table_counts = { table:0 for table in drop_tables }
    for entry in decoration_map.values():
        table = rarity_to_table[entry['rarity']]
        table_counts[table] += 1

    # Create an odds map for each drop table level
    # This maps droptable -> feystone -> probability
    # This is necessary because all decorations are assigned to a droptable
    odds_map = { }
    for table in drop_tables:
        odds_map[table] = {}
        for feystone, feystone_odds in jewel_to_table_odds.items():
            value = Decimal(feystone_odds[table]) / Decimal(table_counts[table])
            odds_map[table][feystone] = value.quantize(Decimal('1.00000'))

    # Assign the odds map for the drop table level to the decoration itself
    for entry in decoration_map.values():
        table_name = rarity_to_table[entry['rarity']]
        entry['chances'] = odds_map[table_name]
Example #19
0
def test_save_split_data_map_symmetric(writer):
    basedata = DataMap()
    basedata.add_entry(1, create_entry_en('test1'))
    basedata.add_entry(2, create_entry_en('test2'))

    extdata = DataMap()
    extdata.add_entry(1, { **basedata[1], 'key': 'f1', 'data': 'test1'})
    extdata.add_entry(2, { **basedata[2], 'key': 'f2', 'data': 'test2'})

    writer.save_split_data_map('split', basedata, extdata, 'key')
    new_data = writer.load_split_data_map(basedata, 'split')

    assert extdata.to_dict() == new_data.to_dict(), "expected data to match"
Example #20
0
def repair_armor_data():
    data = load_data()

    armor_map = data.armor_map
    armorset_map = data.armorset_map

    new_armor_map = DataMap()

    # Copy all items in armorset order
    for set_entry in armorset_map.values():
        # All armor pieces in the set
        armor_names = [set_entry[part] for part in cfg.armor_parts]
        armor_names = list(filter(None, armor_names))

        armor_lang = set_entry['armor_lang']
        for armor_name in armor_names:
            armor_id = armor_map.id_of(armor_lang, armor_name)
            armor = armor_map.pop(armor_id)
            new_armor_map.insert(armor)

    # Copy over remaining items
    for remaining_item in armor_map:
        new_armor_map.insert(remaining_item)

    # Save results (todo: refactor, move to writer)
    armor_schema = schema.ArmorBaseSchema()
    result_list = new_armor_map.to_list()
    result, errors = armor_schema.dump(result_list, many=True)
    writer.save_csv("armors/armor_base.csv", result)
Example #21
0
def test_merge_adds_data_under_key():
    # same as the non-key test, but tests that it occured under the key
    baseData = {
        1: create_test_entry_en('test1'),
        2: create_test_entry_en('test2'),
        3: create_test_entry_en('test3')
    }
    datamap = DataMap(baseData.copy())

    extendedData = {'test1': {'extended': 2}, 'test3': {'extended': 3}}

    datamap.merge(extendedData, key="test")

    assert datamap[1]['test'][
        'extended'] == 2, 'expected data 1 to get extended'
    assert datamap[3]['test'][
        'extended'] == 3, 'expected data 3 to get extended'
    assert 'test' not in datamap[2], 'expected data 2 to not update'
Example #22
0
def test_can_lookup_entry_by_name():
    map = DataMap()
    map.insert(create_test_entry_en("test1"))
    map.insert(create_test_entry_en("test2"))
    map.insert(create_test_entry_en("test3"))

    entry = map.entry_of("en", "test2")
    assert entry.name('en') == 'test2', "expected entry name to match"
Example #23
0
def test_can_lookup_id_by_name():
    map = DataMap()
    map.add_entry(1, create_test_entry_en("test1"))
    map.add_entry(2, create_test_entry_en("test2"))
    map.add_entry(3, create_test_entry_en("test3"))

    idval = map.id_of("en", "test2")
    assert idval == 2, "expected test 2 to have id 1"
Example #24
0
def extend_decoration_chances(decoration_map: DataMap):
    """Calculates the drop tables given the decoration map.

    Each decoration is part of a drop table (decided by rarity), and feystones
    will individually land on a drop table. Once on a drop table, each decoration in that drop table
    has an "equal" chance within that drop table.

    Odds are listed here, with one typo (gleaming is actually glowing).
    https://docs.google.com/spreadsheets/d/1ysj6c2boC6GarFvMah34e6VviZeaoKB6QWovWLSGlsY/htmlview?usp=sharing&sle=true#
    """

    jewel_to_table_odds = {}
    droprates = read_csv(
        join(data_path, "decorations/decoration_droprates.csv"))
    for row in droprates:
        entries = {}
        for i in range(5, 14):
            entries[i] = int(row[str(i)] or '0')
        jewel_to_table_odds[row['feystone']] = entries

    # Calculate how many entries there are per drop table type
    table_counts = {table: 0 for table in range(5, 14)}
    for entry in decoration_map.values():
        table_counts[entry['rarity']] += 1

    # Create an odds map for each drop table level
    # This maps droptable -> feystone -> probability
    # This is necessary because all decorations are assigned to a droptable
    odds_map = {}
    for table in range(5, 14):
        odds_map[table] = {}
        for feystone, feystone_odds in jewel_to_table_odds.items():
            count = table_counts[table]
            if count == 0:
                continue
            value = Decimal(feystone_odds[table]) / Decimal(count)
            odds_map[table][feystone] = value.quantize(Decimal('1.00000'))

    # Assign the odds map for the drop table level to the decoration itself
    for entry in decoration_map.values():
        entry['chances'] = odds_map[entry['rarity']]
Example #25
0
def test_can_lookup_by_id():
    map = DataMap()
    map.add_entry(55, create_test_entry_en("test1"))
    map.add_entry(1, create_test_entry_en("test2"))
    map.add_entry(8, create_test_entry_en("test3"))

    found = map[1]  # note: id order is not sequential
    assert found.name('en') == "test2", "found name should match"
Example #26
0
def test_merged_names_update_lookup():
    datamap = DataMap({1: create_test_entry({'en': 'NAME EN'})})

    datamap.merge({'NAME EN': {'name': {'es': 'NAME ES'}}})

    assert 'NAME ES' in datamap.names(
        'es'), "Spanish existance check should work"
    assert datamap.entry_of(
        'es', 'NAME ES') != None, "Name lookup on merged language should work"
Example #27
0
def copy_skill_descriptions(skill_map: DataMap):
    """Copies the descriptions of certain skill levels to the skill tree.

    Some skill trees are "artificial" and do not exist in the game, therefore they
    have no actual description. This includes skills like Good Luck. Therefore,
    should certain conditions be applied, we reuse the skill detail description.

    The conditions for it to occur are:
    - Missing an english description (missing a translation shouldn't trigger this)
    - Only one available skill level (multi-stage skills are ignored)
    """

    for tree_entry in skill_map.values():
        if tree_entry['description']['en']:
            continue
        if len(tree_entry['levels']) != 1:
            continue
        
        # We don't do a default translation here, since its handled by another part of the build
        level_entry = tree_entry['levels'][0]
        for language in cfg.supported_languages:
            tree_entry['description'][language] = level_entry['description'][language]
Example #28
0
def update_quests(mhdata, item_updater: ItemUpdater,
                  monster_data: MonsterCollection, area_map):
    print('Beginning load of quest binary data')
    quests = load_quests()
    print('Loaded quest binary data')

    quest_data = [
        get_quest_data(q, item_updater, monster_data, area_map) for q in quests
    ]

    quest_by_id = {q.id: q for q in quests}
    quest_data_by_id = {q['id']: q for q in quest_data}

    # test for duplicates first.
    duplicate_candidates = get_quests_with_duplicate_names(quest_by_id)
    for (q1, q2) in duplicate_candidates:
        quest2_data = quest_data_by_id[q2.id]
        if compare_quest_data(quest_data_by_id[q1.id], quest2_data):
            quest_name = q1.name['en']
            print(f'Warning: Quest {quest_name} has exact duplicates.')

    write_quest_raw_data(quests, item_updater.data, monster_data)
    print(
        'Quest artifacts written. Copy ids and names to quest_base.csv to add to build'
    )

    # Merge the quest data
    existing_quest_names = set(q['name_en'] for q in mhdata.quest_map.values())
    quest_new = DataMap(languages=[])
    for raw in quest_data:
        existing_entry = mhdata.quest_map.get(raw['id'])
        if existing_entry:
            existing_entry.update(raw)
        elif raw['name']['en'] not in existing_quest_names:
            quest_new.insert(raw)
    print('Quests merged')

    artifact_writer = create_artifact_writer()

    artifact_writer.save_base_map_csv(
        "quests_new.csv",
        quest_new,
        translation_filename="quest_new_translations.csv",
        translation_extra=['objective', 'description'],
        schema=schema.QuestBaseSchema())
    print(
        'Quest artifact quest_new.csv added. Add any new entries to quest_base.csv'
    )

    writer = create_writer()

    writer.save_base_map_csv(
        "quests/quest_base.csv",
        mhdata.quest_map,
        translation_filename="quests/quest_base_translations.csv",
        translation_extra=['objective', 'description'],
        schema=schema.QuestBaseSchema(),
        key_join='id')

    writer.save_data_csv('quests/quest_monsters.csv',
                         mhdata.quest_map,
                         key='monsters',
                         key_join='id')

    writer.save_data_csv('quests/quest_rewards.csv',
                         mhdata.quest_map,
                         key='rewards',
                         key_join='id')

    print('Quest files updated\n')
Example #29
0
def update_weapons():
    mhdata = load_data()
    print("Existing Data loaded. Using to update weapon info")

    weapon_loader = WeaponDataLoader()
    item_text_handler = ItemTextHandler()
    skill_text_handler = SkillTextHandler()
    notes_data = load_schema(wep_wsl.WepWsl,
                             "common/equip/wep_whistle.wep_wsl")
    sharpness_reader = SharpnessDataReader()
    ammo_reader = WeaponAmmoLoader()
    coating_data = load_schema(bbtbl.Bbtbl, "common/equip/bottle_table.bbtbl")

    print("Loaded initial weapon binary data data")

    def bind_weapon_blade_ext(weapon_type: str, existing_entry,
                              binary: wp_dat.WpDatEntry):
        for key in [
                'kinsect_bonus', 'phial', 'phial_power', 'shelling',
                'shelling_level', 'notes'
        ]:
            existing_entry[key] = None
        if weapon_type == cfg.CHARGE_BLADE:
            existing_entry['phial'] = cb_phials[binary.wep1_id]
        if weapon_type == cfg.SWITCH_AXE:
            (phial, power) = s_axe_phials[binary.wep1_id]
            existing_entry['phial'] = phial
            existing_entry['phial_power'] = power
        if weapon_type == cfg.GUNLANCE:
            # first 5 are normals, second 5 are wide, third 5 are long
            shelling = ['normal', 'wide', 'long'][binary.wep1_id // 5]
            level = (binary.wep1_id % 5) + 1
            existing_entry['shelling'] = shelling
            existing_entry['shelling_level'] = level
        if weapon_type == cfg.INSECT_GLAIVE:
            existing_entry['kinsect_bonus'] = glaive_boosts[binary.wep1_id]
        if weapon_type == cfg.HUNTING_HORN:
            note_entry = notes_data[binary.wep1_id]
            notes = [note_entry.note1, note_entry.note2, note_entry.note3]
            notes = [str(note_colors[n]) for n in notes]
            existing_entry['notes'] = "".join(notes)

    # Store new weapon entries
    new_weapon_map = DataMap(languages="en",
                             start_id=mhdata.weapon_map.max_id + 1)

    # Iterate over weapon types
    for weapon_type in cfg.weapon_types:
        print(f"Processing {weapon_type}")

        # Note: weapon data ordering is unknown. order field and tree_id asc are sometimes wrong
        # Therefore its unsorted, we have to work off the spreadsheet order
        weapon_tree = weapon_loader.load_tree(weapon_type)
        print(f"Loaded {weapon_type} weapon tree binary data")

        multiplier = cfg.weapon_multiplier[weapon_type]

        # Iterate over nodes in the weapon tree (does depth first search)
        for weapon_node in weapon_tree:
            binary = weapon_node.binary
            name = weapon_node.name
            existing_entry = mhdata.weapon_map.entry_of('en', name['en'])

            new_entry = {}
            if existing_entry:
                new_entry = {**existing_entry}

            # Bind name and parent
            new_entry['name'] = name
            new_entry['weapon_type'] = weapon_type
            new_entry['previous_en'] = None
            if weapon_node.parent != None:
                new_entry['previous_en'] = weapon_node.parent.name['en']

            # Bind info
            new_entry['weapon_type'] = weapon_type
            new_entry['rarity'] = binary.rarity + 1
            new_entry['attack'] = binary.raw_damage * multiplier
            new_entry['affinity'] = binary.affinity
            new_entry['defense'] = binary.defense or None
            new_entry['slot_1'] = binary.gem_slot1_lvl
            new_entry['slot_2'] = binary.gem_slot2_lvl
            new_entry['slot_3'] = binary.gem_slot3_lvl
            new_entry['elderseal'] = elderseal[binary.elderseal]

            # Bind Elements
            if name['en'] in ["Twin Nails", "Fire and Ice"]:
                print(f"Skipping {name['en']} element data")
            else:
                hidden = binary.hidden_element_id != 0
                element_id = binary.hidden_element_id if hidden else binary.element_id
                element_atk = binary.hidden_element_damage if hidden else binary.element_damage

                new_entry['element_hidden'] = hidden
                new_entry['element1'] = elements[element_id]
                new_entry[
                    'element1_attack'] = element_atk * 10 if element_atk else None
                new_entry['element2'] = None
                new_entry['element2_attack'] = None

            # Bind skill
            skill = skill_text_handler.get_skilltree_name(binary.skill_id)
            new_entry['skill'] = skill['en'] if binary.skill_id != 0 else None

            # Bind Extras (Blade/Gun/Bow data)
            if weapon_type in cfg.weapon_types_melee:
                bind_weapon_blade_ext(weapon_type, new_entry, binary)
                new_entry['sharpness'] = sharpness_reader.sharpness_for(binary)
            elif weapon_type in cfg.weapon_types_gun:
                (ammo_name, ammo_data) = ammo_reader.create_data_for(
                    wtype=weapon_type,
                    tree=weapon_node.tree,
                    binary=weapon_node.binary)
                new_entry['ammo_config'] = ammo_name
            else:
                # TODO: Bows have an Enabled+ flag. Find out what it means
                # 1 = enabled, 2 = enabled+
                coating_binary = coating_data[binary.special_ammo_type]
                new_entry['bow'] = {
                    'close': coating_binary.close_range > 0,
                    'power': coating_binary.power > 0,
                    'paralysis': coating_binary.paralysis > 0,
                    'poison': coating_binary.poison > 0,
                    'sleep': coating_binary.sleep > 0,
                    'blast': coating_binary.blast > 0
                }

            # crafting data
            new_entry['craft'] = []
            if weapon_node.craft:
                new_entry['craft'].append({
                    'type':
                    'Create',
                    **convert_recipe(item_text_handler, weapon_node.craft)
                })
            if weapon_node.upgrade:
                new_entry['craft'].append({
                    'type':
                    'Upgrade',
                    **convert_recipe(item_text_handler, weapon_node.upgrade)
                })

            new_weapon_map.insert(new_entry)

    # Write new data
    writer = create_writer()

    writer.save_base_map_csv(
        "weapons/weapon_base.csv",
        new_weapon_map,
        schema=schema.WeaponBaseSchema(),
        translation_filename="weapons/weapon_base_translations.csv")

    writer.save_data_csv("weapons/weapon_sharpness.csv",
                         new_weapon_map,
                         key="sharpness",
                         schema=schema.WeaponSharpnessSchema())

    writer.save_data_csv("weapons/weapon_bow_ext.csv",
                         new_weapon_map,
                         key="bow",
                         schema=schema.WeaponBowSchema())

    writer.save_data_csv("weapons/weapon_craft.csv",
                         new_weapon_map,
                         key="craft",
                         schema=schema.WeaponCraftSchema())

    writer.save_keymap_csv("weapons/weapon_ammo.csv",
                           ammo_reader.data,
                           schema=schema.WeaponAmmoSchema())

    print("Weapon files updated\n")

    add_missing_items(item_text_handler.encountered, mhdata=mhdata)
Example #30
0
def update_items(item_updater: ItemUpdater, *, mhdata=None):
    if not mhdata:
        mhdata = load_data()
        print("Existing Data loaded. Using to expand item list")

    new_item_map = DataMap(languages='en', start_id=mhdata.item_map.max_id + 1)
    unlinked_item_names = OrderedSet()

    # used to track dupes to throw proper errors
    updated_names = set()

    # First pass. Iterate over existing ingame items and merge with existing data
    for entry in item_updater.item_data:
        name_dict, description_dict = item_updater.name_and_description_for(
            entry.id, track=False)
        existing_item = mhdata.item_map.entry_of('en', name_dict['en'])

        is_encountered = entry.id in item_updater.encountered_item_ids
        if not is_encountered and not existing_item:
            unlinked_item_names.add(name_dict['en'])
            continue

        if name_dict['en'] in updated_names:
            raise Exception(f"Duplicate item {name_dict['en']}")
        updated_names.add(name_dict['en'])

        # note: we omit buy price as items may have a buy price even if not sold.
        # We only care about the buy price of BUYABLE items
        new_data = {
            'name': name_dict,
            'description': description_dict,
            'rarity': entry.rarity + 1,
            'sell_price': None,
            'points': None
        }

        is_ez = entry.flags.ez
        is_account = entry.type == 'endemic'
        is_tradein = "(Trade-in Item)" in description_dict['en']
        is_appraisal = entry.flags.appraisal

        sell_value = entry.sell_price if entry.sell_price != 0 else None
        if is_account:
            new_data['points'] = sell_value
        else:
            new_data['sell_price'] = sell_value

        if name_dict['en'] == 'Normal Ammo 1':
            new_data['category'] = 'hidden'
        elif is_ez:
            new_data['category'] = 'misc'
            new_data['subcategory'] = 'trade' if is_tradein else 'supply'
        elif is_account:
            new_data['category'] = 'misc'
            new_data['subcategory'] = 'trade' if is_tradein else 'account'
        elif is_appraisal or ('Appraised after investigation'
                              in description_dict['en']):
            new_data['category'] = 'misc'
            new_data['subcategory'] = 'appraisal'
            new_data['sell_price'] = None  # why does this have values?
        else:
            new_data['category'] = entry.type
            new_data['subcategory'] = 'trade' if is_tradein else None

            # Whether we show carry limit at all is based on item type.
            # Materials are basically infinite carry
            infinite_carry = new_data['category'] == 'material'
            new_data[
                'carry_limit'] = None if infinite_carry else entry.carry_limit

        if existing_item:
            new_item_map.insert({**existing_item, **new_data})
        else:
            new_item_map.insert(new_data)

    # Second pass, add old entries that are not in the new one
    for old_entry in mhdata.item_map.values():
        if old_entry.name('en') not in new_item_map.names('en'):
            new_item_map.insert(old_entry)

    # Third pass. Items need to be reordered based on type

    unsorted_item_map = new_item_map  # store reference to former map

    def filter_category(category, subcategory=None):
        "helper that returns items and then removes from unsorted item map"
        results = []
        for item in unsorted_item_map.values():
            if item['category'] == category and item[
                    'subcategory'] == subcategory:
                results.append(item)
        for result in results:
            del unsorted_item_map[result.id]
        return results

    normal_ammo_1 = unsorted_item_map.entry_of("en", "Normal Ammo 1")

    # start the before-mentioned third pass by creating a new map based off the old one
    new_item_map = DataMap(languages="en")
    new_item_map.extend(filter_category('item'))
    new_item_map.extend(filter_category('material'))
    new_item_map.extend(filter_category('material', 'trade'))
    if normal_ammo_1:
        new_item_map.insert(normal_ammo_1)
    new_item_map.extend(filter_category('ammo'))
    new_item_map.extend(filter_category('misc', 'appraisal'))
    new_item_map.extend(filter_category('misc', 'account'))
    new_item_map.extend(filter_category('misc', 'supply'))

    # Write out data
    writer = create_writer()

    writer.save_base_map_csv(
        "items/item_base.csv",
        new_item_map,
        schema=schema.ItemSchema(),
        translation_filename="items/item_base_translations.csv",
        translation_extra=['description'])

    # Write out artifact data
    print("Writing unlinked item names to artifacts")
    artifacts.write_names_artifact('items_unlinked.txt', unlinked_item_names)
    print("Writing all items and ids")
    artifact_data = [{
        'id': i.id,
        'name': i.name['en']
    } for i in item_updater.data]
    artifacts.write_dicts_artifact('items_ids.csv', artifact_data)

    print("Item files updated")