コード例 #1
0
def test_save_split_data_map_symmetric(writer):
    basedata = DataMap()
    basedata.add_entry(1, create_entry_en('test1'))
    basedata.add_entry(2, create_entry_en('test2'))

    extdata = DataMap()
    extdata.add_entry(1, { **basedata[1], 'key': 'f1', 'data': 'test1'})
    extdata.add_entry(2, { **basedata[2], 'key': 'f2', 'data': 'test2'})

    writer.save_split_data_map('split', basedata, extdata, 'key')
    new_data = writer.load_split_data_map(basedata, 'split')

    assert extdata.to_dict() == new_data.to_dict(), "expected data to match"
コード例 #2
0
def test_save_data_json_symmetric(writer):
    basedata = DataMap()
    basedata.add_entry(1, create_entry_en('test1'))
    basedata.add_entry(2, create_entry_en('test2'))

    extdata = DataMap()
    extdata.add_entry(1, {**basedata[1], 'data': 'test1'})
    extdata.add_entry(2, {**basedata[2], 'data': 'test2'})

    writer.save_data_json('testdatasym.json', extdata, fields=['data'])

    testdata = writer.load_data_json(basedata.copy(), 'testdatasym.json')

    assert extdata.to_dict() == testdata.to_dict(), "expected data to match"
コード例 #3
0
def repair_armor_data():
    data = load_data()

    armor_map = data.armor_map
    armorset_map = data.armorset_map

    new_armor_map = DataMap()

    # Copy all items in armorset order
    for set_entry in armorset_map.values():
        # All armor pieces in the set
        armor_names = [set_entry[part] for part in cfg.armor_parts]
        armor_names = list(filter(None, armor_names))

        armor_lang = set_entry['armor_lang']
        for armor_name in armor_names:
            armor_id = armor_map.id_of(armor_lang, armor_name)
            armor = armor_map.pop(armor_id)
            new_armor_map.insert(armor)

    # Copy over remaining items
    for remaining_item in armor_map:
        new_armor_map.insert(remaining_item)

    # Save results (todo: refactor, move to writer)
    armor_schema = schema.ArmorBaseSchema()
    result_list = new_armor_map.to_list()
    result, errors = armor_schema.dump(result_list, many=True)
    writer.save_csv("armors/armor_base.csv", result)
コード例 #4
0
ファイル: tools.py プロジェクト: vintyr/MHWorldData
def update_tools(mhdata):
    tool_data = ToolCollection()

    new_tools = DataMap(start_id=mhdata.tool_map.max_id + 1)
    for tool in tool_data.tools:
        name_en = tool.name_upgraded['en']
        existing_entry = mhdata.tool_map.entry_of('en', name_en)

        new_entry = {}
        if existing_entry:
            new_entry = {**existing_entry}

        new_entry['name'] = tool.name_upgraded
        new_entry['name_base'] = tool.name
        new_entry['description'] = tool.description
        new_entry['tool_type'] = 'booster' if 'booster' in tool.name[
            'en'].lower() else 'mantle'
        new_entry.setdefault('duration', 0)
        new_entry.setdefault('duration_upgraded', None)
        new_entry.setdefault('recharge', 0)
        new_entry['slot_1'] = tool.slots[0]
        new_entry['slot_2'] = tool.slots[1]
        new_entry['slot_3'] = tool.slots[2]
        new_entry.setdefault('icon_color', None)

        new_tools.insert(new_entry)

    writer = create_writer()

    writer.save_base_map_csv(
        "tools/tool_base.csv",
        new_tools,
        schema=schema.ToolSchema(),
        translation_filename="tools/tool_base_translations.csv",
        translation_extra=['name_base', 'description'])
コード例 #5
0
ファイル: test_DataMap.py プロジェクト: renaiku/MHWorldData
def test_manual_id_resets_sequence():
    datamap = DataMap()

    datamap.add_entry(25, create_test_entry_en('test1'))
    new_entry = datamap.insert(create_test_entry_en('test2'))

    assert new_entry.id > 25, "new id should have been higher"
コード例 #6
0
ファイル: weapons.py プロジェクト: mtfarkas/MHWorldData
def update_kinsects(mhdata, item_updater: ItemUpdater):
    print('Loading kinsect info')
    kinsect_tree = load_kinsect_tree()

    def resolve_parent_name(entry):
        if entry.parent:
            return entry.parent.name['en']
        return ''

    items = [
        f"{r.id},{r.name['en']},{resolve_parent_name(r)}"
        for r in kinsect_tree.crafted()
    ]
    artifacts.write_artifact('kinsect_all.txt', *items)
    items = [f"{r.id},{r.name['en']}" for r in kinsect_tree.roots]
    artifacts.write_artifact('kinsect_roots.txt', *items)

    kinsect_map = DataMap(languages=['en'])
    for kinsect_node in kinsect_tree.crafted():
        binary = kinsect_node.binary
        new_entry = kinsect_map.insert({
            'id':
            binary.id + 1,
            'name':
            kinsect_node.name,
            'previous_en':
            resolve_parent_name(kinsect_node),
            'rarity':
            binary.rarity + 1,
            'attack_type':
            kinsect_attack_types[binary.attack_type],
            'dust_effect':
            kinsect_dusts[binary.dust_type],
            'power':
            binary.power,
            'speed':
            binary.speed,
            'heal':
            binary.heal
        })

        if kinsect_node.upgrade:
            new_entry['craft'] = convert_recipe(item_updater,
                                                kinsect_node.upgrade)

    # Write new data
    writer = create_writer()

    writer.save_base_map_csv(
        "weapons/kinsect_base.csv",
        kinsect_map,
        schema=schema.KinsectBaseSchema(),
        translation_filename="weapons/kinsect_base_translations.csv")

    writer.save_data_csv("weapons/kinsect_craft_ext.csv",
                         kinsect_map,
                         key="craft",
                         schema=schema.RecipeSchema())

    print("Kinsect files updated\n")
コード例 #7
0
ファイル: test_DataMap.py プロジェクト: renaiku/MHWorldData
def test_can_lookup_id_by_name():
    map = DataMap()
    map.add_entry(1, create_test_entry_en("test1"))
    map.add_entry(2, create_test_entry_en("test2"))
    map.add_entry(3, create_test_entry_en("test3"))

    idval = map.id_of("en", "test2")
    assert idval == 2, "expected test 2 to have id 1"
コード例 #8
0
ファイル: test_DataMap.py プロジェクト: slimlime/MHWorldData
def test_merges_names():
    datamap = DataMap({1: create_test_entry({'en': 'NAME EN'})})

    datamap.merge({'NAME EN': {'name': {'es': 'NAME ES'}}})

    assert datamap[1]['name']['en'] == 'NAME EN', 'kept old name'
    assert 'es' in datamap[1]['name'], 'Added new spanish name'
    assert datamap[1]['name']['es'] == 'NAME ES', 'spanish name is correct'
コード例 #9
0
ファイル: test_DataMap.py プロジェクト: renaiku/MHWorldData
def test_can_lookup_entry_by_name():
    map = DataMap()
    map.insert(create_test_entry_en("test1"))
    map.insert(create_test_entry_en("test2"))
    map.insert(create_test_entry_en("test3"))

    entry = map.entry_of("en", "test2")
    assert entry.name('en') == 'test2', "expected entry name to match"
コード例 #10
0
ファイル: test_DataMap.py プロジェクト: renaiku/MHWorldData
def test_can_lookup_by_id():
    map = DataMap()
    map.add_entry(55, create_test_entry_en("test1"))
    map.add_entry(1, create_test_entry_en("test2"))
    map.add_entry(8, create_test_entry_en("test3"))

    found = map[1]  # note: id order is not sequential
    assert found.name('en') == "test2", "found name should match"
コード例 #11
0
ファイル: test_DataMap.py プロジェクト: renaiku/MHWorldData
def test_can_iterate_values_in_order():
    expected_names = ['test1', 'test2', 'test3']

    map = DataMap()
    for (id, name) in enumerate(expected_names):
        map.add_entry(id, create_test_entry_en(name))

    found = [entry['name']['en'] for entry in map.values()]
    assert found == expected_names, "Expected map entries to match"
コード例 #12
0
ファイル: test_DataMap.py プロジェクト: slimlime/MHWorldData
def test_merged_names_update_lookup():
    datamap = DataMap({1: create_test_entry({'en': 'NAME EN'})})

    datamap.merge({'NAME EN': {'name': {'es': 'NAME ES'}}})

    assert 'NAME ES' in datamap.names(
        'es'), "Spanish existance check should work"
    assert datamap.entry_of(
        'es', 'NAME ES') != None, "Name lookup on merged language should work"
コード例 #13
0
def test_save_base_symmetric(writer):
    data = DataMap()
    data.add_entry(1, create_entry_en('test1'))
    data.add_entry(2, create_entry_en('test2'))

    writer.save_base_map('testbase.json', data)
    new_data = writer.load_base_json('testbase.json', languages)

    assert dict(data) == dict(new_data), "saved data didn't match"
コード例 #14
0
def test_save_base_csv_symmetric(writer: DataReaderWriter):
    data = DataMap()
    data.insert(create_entry_en('test1'))
    data.insert(create_entry_en('test2'))

    groups = ['name', 'description']
    writer.save_base_map_csv('testbase.csv', data, groups=groups)
    new_data = writer.load_base_csv('testbase.csv', groups=groups)

    assert data.to_list() == new_data.to_list(), "saved data didn't match"
コード例 #15
0
def test_clone_returns_equal_map():
    data = {
        25: create_test_entry_en('test1', { 'somedata': {'nested': 5}}),
        28: create_test_entry_en('test2', { 'somedata': {'alsonested': 'hey'}})
    }

    datamap = DataMap(data)
    cloned_datamap = datamap.copy()

    assert datamap.to_dict() == cloned_datamap.to_dict(), "expected clone to match"
    assert id(datamap) != id(cloned_datamap), "expecting clone to be a different object"
コード例 #16
0
def test_save_base_csv_symmetric(writer: DataReaderWriter):
    # Note: CSVs do not save typing info, so everything is strings
    data = DataMap()
    data.insert(create_entry_en('test1', {'id': '1'}))
    data.insert(create_entry_en('test2', {'id': '2'}))

    groups = ['name', 'description']
    writer.save_base_map_csv('testbase.csv', data, groups=groups)
    new_data = writer.load_base_csv('testbase.csv', languages, groups=groups)

    assert data.to_list() == new_data.to_list(), "saved data didn't match"
コード例 #17
0
def test_to_dict_correct_data():
    data = {
        25: create_test_entry_en('test1', { 'id': 25, 'somedata': {'nested': 5}}),
        28: create_test_entry_en('test2', { 'id': 28, 'somedata': {'alsonested': 'hey'}})
    }

    datamap = DataMap()
    for row in data.values():
        datamap.insert(row)

    serialized = datamap.to_dict()
    assert serialized == data, "expected serialized data to match original data"
コード例 #18
0
ファイル: test_DataMap.py プロジェクト: kevinn/MHWorldData
def test_can_iterate_values_in_order():
    expected_entries = [
        (1, create_test_entry_en('test1')),
        (2, create_test_entry_en("test2")),
        (3, create_test_entry_en("test3"))]
    
    map = DataMap()
    for (id, entry) in expected_entries:
        map.add_entry(id, entry)
    
    found = [(id, entry) for (id, entry) in map.items()]
    assert found == expected_entries, "Expected map entries to match"
コード例 #19
0
ファイル: test_DataMap.py プロジェクト: kevinn/MHWorldData
def test_to_dict_correct_data():
    data = {
        25: create_test_entry_en('test1', { 'somedata': {'nested': 5}}),
        28: create_test_entry_en('test2', { 'somedata': {'alsonested': 'hey'}})
    }

    datamap = DataMap()
    datamap.add_entry(25, data[25])
    datamap.add_entry(28, data[28])

    serialized = datamap.to_dict()
    assert serialized == data, "expected serialized data to match original data"
コード例 #20
0
def transform_dmap(dmap: DataMap, obj_schema):
    """Returns a new datamap, 
    where the items in the original have run through the marshmallow schema."""
    results = DataMap()
    for entry_id, entry in dmap.items():
        data = entry.to_dict()
        (converted, errors) = obj_schema.load(data, many=False) # converted

        if errors:
            raise Exception(str(errors))

        results.add_entry(entry_id, converted)
    return results
コード例 #21
0
def test_save_data_csv_symmetric_listmode(writer: DataReaderWriter):
    basedata = DataMap()
    basedata.add_entry(1, create_entry_en('test1'))
    basedata.add_entry(2, create_entry_en('test2'))

    extdata = DataMap()
    extdata.add_entry(1, {**basedata[1], 'data': [{'data': 'test1'}]})
    extdata.add_entry(2, {
        **basedata[2], 'data': [{
            'data': 'test2'
        }, {
            'data': 'test2ext'
        }]
    })

    writer.save_data_csv('testdatasym.csv', extdata, key='data')
    new_data = writer.load_data_csv(basedata.copy(),
                                    'testdatasym.csv',
                                    key='data',
                                    leaftype="list")

    assert extdata.to_dict() == new_data.to_dict(), "expected data to match"
コード例 #22
0
ファイル: test_Saving.py プロジェクト: vintyr/MHWorldData
def test_save_data_csv_symmetric_listmode(writer: DataReaderWriter):
    basedata = DataMap()
    basedata.add_entry(1, create_entry_en('test1'))
    basedata.add_entry(2, create_entry_en('test2'))

    extdata = DataMap()
    extdata.add_entry(1, {**basedata[1], 'data': [{'a': 'test1'}]})
    extdata.add_entry(2, {
        **basedata[2], 'data': [{
            'a': 'test2'
        }, {
            'a': 'test2ext'
        }]
    })

    writer.save_data_csv('testdatasym.csv', extdata, key='data')
    new_data = (DataStitcher(writer).use_base(basedata.copy()).add_csv(
        'testdatasym.csv', key='data').get())

    old_data = extdata.to_dict()
    abc = new_data.to_dict()

    assert extdata.to_dict() == new_data.to_dict(), "expected data to match"
コード例 #23
0
ファイル: test_DataMap.py プロジェクト: renaiku/MHWorldData
def test_row_add_value_in_middle():
    test_keys = ['id', 'test1', 'test2', 'test3']
    test_dict = {k: 1 for k in test_keys}
    test_dict['name'] = {'en': 'a test'}  # required field

    datamap = DataMap()
    entry = datamap.insert(test_dict)

    entry.set_value('NEW', 1, after='test2')

    # note: name exists because it was manually added to test_dict
    expected_keys = ['id', 'test1', 'test2', 'NEW', 'test3', 'name']
    entry_keys = list(entry.keys())
    assert entry_keys == expected_keys, "Expected new to be after test2"
コード例 #24
0
ファイル: test_DataMap.py プロジェクト: renaiku/MHWorldData
def test_merge_adds_data():
    baseData = {
        1: create_test_entry_en('test1'),
        2: create_test_entry_en('test2'),
        3: create_test_entry_en('test3')
    }
    datamap = DataMap(baseData.copy())

    extendedData = {'test1': {'extended': 2}, 'test3': {'extended': 3}}

    datamap.merge(extendedData)

    assert datamap[1]['extended'] == 2, 'expected data 1 to get extended'
    assert datamap[3]['extended'] == 3, 'expected data 3 to get extended'
    assert 'extended' not in datamap[2], 'expected data 2 to not update'
コード例 #25
0
def test_merge_on_multikey_single():
    data = {
        1: create_test_entry_en("test", { 'type': 'great-sword' }),
        2: create_test_entry_en("test", { 'type': 'bow' })
    }

    datamap = DataMap(data, keys_ex=["type"])

    merge_data = [
        { 'name_en': 'test', 'type': 'great-sword', 'attack': 25 },
        { 'name_en': 'test',  'type': 'bow', 'attack': 10 }
    ]
    merge_list(datamap, merge_data, many=False)

    assert datamap.entry_of("en", "test", "great-sword")['attack'] == 25
    assert datamap.entry_of("en", "test", "bow")['attack'] == 10
コード例 #26
0
ファイル: test_DataMap.py プロジェクト: renaiku/MHWorldData
def test_merge_adds_data_under_key():
    # same as the non-key test, but tests that it occured under the key
    baseData = {
        1: create_test_entry_en('test1'),
        2: create_test_entry_en('test2'),
        3: create_test_entry_en('test3')
    }
    datamap = DataMap(baseData.copy())

    extendedData = {'test1': {'extended': 2}, 'test3': {'extended': 3}}

    datamap.merge(extendedData, key="test")

    assert datamap[1]['test'][
        'extended'] == 2, 'expected data 1 to get extended'
    assert datamap[3]['test'][
        'extended'] == 3, 'expected data 3 to get extended'
    assert 'test' not in datamap[2], 'expected data 2 to not update'
コード例 #27
0
ファイル: quests.py プロジェクト: vintyr/MHWorldData
def update_quests(mhdata, item_updater: ItemUpdater,
                  monster_data: MonsterCollection, area_map):
    print('Beginning load of quest binary data')
    quests = load_quests()
    print('Loaded quest binary data')

    quest_data = [
        get_quest_data(q, item_updater, monster_data, area_map) for q in quests
    ]

    quest_by_id = {q.id: q for q in quests}
    quest_data_by_id = {q['id']: q for q in quest_data}

    # test for duplicates first.
    duplicate_candidates = get_quests_with_duplicate_names(quest_by_id)
    for (q1, q2) in duplicate_candidates:
        quest2_data = quest_data_by_id[q2.id]
        if compare_quest_data(quest_data_by_id[q1.id], quest2_data):
            quest_name = q1.name['en']
            print(f'Warning: Quest {quest_name} has exact duplicates.')

    write_quest_raw_data(quests, item_updater.data, monster_data)
    print(
        'Quest artifacts written. Copy ids and names to quest_base.csv to add to build'
    )

    # Merge the quest data
    existing_quest_names = set(q['name_en'] for q in mhdata.quest_map.values())
    quest_new = DataMap(languages=[])
    for raw in quest_data:
        existing_entry = mhdata.quest_map.get(raw['id'])
        if existing_entry:
            existing_entry.update(raw)
        elif raw['name']['en'] not in existing_quest_names:
            quest_new.insert(raw)
    print('Quests merged')

    artifact_writer = create_artifact_writer()

    artifact_writer.save_base_map_csv(
        "quests_new.csv",
        quest_new,
        translation_filename="quest_new_translations.csv",
        translation_extra=['objective', 'description'],
        schema=schema.QuestBaseSchema())
    print(
        'Quest artifact quest_new.csv added. Add any new entries to quest_base.csv'
    )

    writer = create_writer()

    writer.save_base_map_csv(
        "quests/quest_base.csv",
        mhdata.quest_map,
        translation_filename="quests/quest_base_translations.csv",
        translation_extra=['objective', 'description'],
        schema=schema.QuestBaseSchema(),
        key_join='id')

    writer.save_data_csv('quests/quest_monsters.csv',
                         mhdata.quest_map,
                         key='monsters',
                         key_join='id')

    writer.save_data_csv('quests/quest_rewards.csv',
                         mhdata.quest_map,
                         key='rewards',
                         key_join='id')

    print('Quest files updated\n')
コード例 #28
0
def update_weapons():
    mhdata = load_data()
    print("Existing Data loaded. Using to update weapon info")

    weapon_loader = WeaponDataLoader()
    item_text_handler = ItemTextHandler()
    skill_text_handler = SkillTextHandler()
    notes_data = load_schema(wep_wsl.WepWsl,
                             "common/equip/wep_whistle.wep_wsl")
    sharpness_reader = SharpnessDataReader()
    ammo_reader = WeaponAmmoLoader()
    coating_data = load_schema(bbtbl.Bbtbl, "common/equip/bottle_table.bbtbl")

    print("Loaded initial weapon binary data data")

    def bind_weapon_blade_ext(weapon_type: str, existing_entry,
                              binary: wp_dat.WpDatEntry):
        for key in [
                'kinsect_bonus', 'phial', 'phial_power', 'shelling',
                'shelling_level', 'notes'
        ]:
            existing_entry[key] = None
        if weapon_type == cfg.CHARGE_BLADE:
            existing_entry['phial'] = cb_phials[binary.wep1_id]
        if weapon_type == cfg.SWITCH_AXE:
            (phial, power) = s_axe_phials[binary.wep1_id]
            existing_entry['phial'] = phial
            existing_entry['phial_power'] = power
        if weapon_type == cfg.GUNLANCE:
            # first 5 are normals, second 5 are wide, third 5 are long
            shelling = ['normal', 'wide', 'long'][binary.wep1_id // 5]
            level = (binary.wep1_id % 5) + 1
            existing_entry['shelling'] = shelling
            existing_entry['shelling_level'] = level
        if weapon_type == cfg.INSECT_GLAIVE:
            existing_entry['kinsect_bonus'] = glaive_boosts[binary.wep1_id]
        if weapon_type == cfg.HUNTING_HORN:
            note_entry = notes_data[binary.wep1_id]
            notes = [note_entry.note1, note_entry.note2, note_entry.note3]
            notes = [str(note_colors[n]) for n in notes]
            existing_entry['notes'] = "".join(notes)

    # Store new weapon entries
    new_weapon_map = DataMap(languages="en",
                             start_id=mhdata.weapon_map.max_id + 1)

    # Iterate over weapon types
    for weapon_type in cfg.weapon_types:
        print(f"Processing {weapon_type}")

        # Note: weapon data ordering is unknown. order field and tree_id asc are sometimes wrong
        # Therefore its unsorted, we have to work off the spreadsheet order
        weapon_tree = weapon_loader.load_tree(weapon_type)
        print(f"Loaded {weapon_type} weapon tree binary data")

        multiplier = cfg.weapon_multiplier[weapon_type]

        # Iterate over nodes in the weapon tree (does depth first search)
        for weapon_node in weapon_tree:
            binary = weapon_node.binary
            name = weapon_node.name
            existing_entry = mhdata.weapon_map.entry_of('en', name['en'])

            new_entry = {}
            if existing_entry:
                new_entry = {**existing_entry}

            # Bind name and parent
            new_entry['name'] = name
            new_entry['weapon_type'] = weapon_type
            new_entry['previous_en'] = None
            if weapon_node.parent != None:
                new_entry['previous_en'] = weapon_node.parent.name['en']

            # Bind info
            new_entry['weapon_type'] = weapon_type
            new_entry['rarity'] = binary.rarity + 1
            new_entry['attack'] = binary.raw_damage * multiplier
            new_entry['affinity'] = binary.affinity
            new_entry['defense'] = binary.defense or None
            new_entry['slot_1'] = binary.gem_slot1_lvl
            new_entry['slot_2'] = binary.gem_slot2_lvl
            new_entry['slot_3'] = binary.gem_slot3_lvl
            new_entry['elderseal'] = elderseal[binary.elderseal]

            # Bind Elements
            if name['en'] in ["Twin Nails", "Fire and Ice"]:
                print(f"Skipping {name['en']} element data")
            else:
                hidden = binary.hidden_element_id != 0
                element_id = binary.hidden_element_id if hidden else binary.element_id
                element_atk = binary.hidden_element_damage if hidden else binary.element_damage

                new_entry['element_hidden'] = hidden
                new_entry['element1'] = elements[element_id]
                new_entry[
                    'element1_attack'] = element_atk * 10 if element_atk else None
                new_entry['element2'] = None
                new_entry['element2_attack'] = None

            # Bind skill
            skill = skill_text_handler.get_skilltree_name(binary.skill_id)
            new_entry['skill'] = skill['en'] if binary.skill_id != 0 else None

            # Bind Extras (Blade/Gun/Bow data)
            if weapon_type in cfg.weapon_types_melee:
                bind_weapon_blade_ext(weapon_type, new_entry, binary)
                new_entry['sharpness'] = sharpness_reader.sharpness_for(binary)
            elif weapon_type in cfg.weapon_types_gun:
                (ammo_name, ammo_data) = ammo_reader.create_data_for(
                    wtype=weapon_type,
                    tree=weapon_node.tree,
                    binary=weapon_node.binary)
                new_entry['ammo_config'] = ammo_name
            else:
                # TODO: Bows have an Enabled+ flag. Find out what it means
                # 1 = enabled, 2 = enabled+
                coating_binary = coating_data[binary.special_ammo_type]
                new_entry['bow'] = {
                    'close': coating_binary.close_range > 0,
                    'power': coating_binary.power > 0,
                    'paralysis': coating_binary.paralysis > 0,
                    'poison': coating_binary.poison > 0,
                    'sleep': coating_binary.sleep > 0,
                    'blast': coating_binary.blast > 0
                }

            # crafting data
            new_entry['craft'] = []
            if weapon_node.craft:
                new_entry['craft'].append({
                    'type':
                    'Create',
                    **convert_recipe(item_text_handler, weapon_node.craft)
                })
            if weapon_node.upgrade:
                new_entry['craft'].append({
                    'type':
                    'Upgrade',
                    **convert_recipe(item_text_handler, weapon_node.upgrade)
                })

            new_weapon_map.insert(new_entry)

    # Write new data
    writer = create_writer()

    writer.save_base_map_csv(
        "weapons/weapon_base.csv",
        new_weapon_map,
        schema=schema.WeaponBaseSchema(),
        translation_filename="weapons/weapon_base_translations.csv")

    writer.save_data_csv("weapons/weapon_sharpness.csv",
                         new_weapon_map,
                         key="sharpness",
                         schema=schema.WeaponSharpnessSchema())

    writer.save_data_csv("weapons/weapon_bow_ext.csv",
                         new_weapon_map,
                         key="bow",
                         schema=schema.WeaponBowSchema())

    writer.save_data_csv("weapons/weapon_craft.csv",
                         new_weapon_map,
                         key="craft",
                         schema=schema.WeaponCraftSchema())

    writer.save_keymap_csv("weapons/weapon_ammo.csv",
                           ammo_reader.data,
                           schema=schema.WeaponAmmoSchema())

    print("Weapon files updated\n")

    add_missing_items(item_text_handler.encountered, mhdata=mhdata)
コード例 #29
0
def update_items(item_updater: ItemUpdater, *, mhdata=None):
    if not mhdata:
        mhdata = load_data()
        print("Existing Data loaded. Using to expand item list")

    new_item_map = DataMap(languages='en', start_id=mhdata.item_map.max_id + 1)
    unlinked_item_names = OrderedSet()

    # used to track dupes to throw proper errors
    updated_names = set()

    # First pass. Iterate over existing ingame items and merge with existing data
    for entry in item_updater.item_data:
        name_dict, description_dict = item_updater.name_and_description_for(
            entry.id, track=False)
        existing_item = mhdata.item_map.entry_of('en', name_dict['en'])

        is_encountered = entry.id in item_updater.encountered_item_ids
        if not is_encountered and not existing_item:
            unlinked_item_names.add(name_dict['en'])
            continue

        if name_dict['en'] in updated_names:
            raise Exception(f"Duplicate item {name_dict['en']}")
        updated_names.add(name_dict['en'])

        # note: we omit buy price as items may have a buy price even if not sold.
        # We only care about the buy price of BUYABLE items
        new_data = {
            'name': name_dict,
            'description': description_dict,
            'rarity': entry.rarity + 1,
            'sell_price': None,
            'points': None
        }

        is_ez = entry.flags.ez
        is_account = entry.type == 'endemic'
        is_tradein = "(Trade-in Item)" in description_dict['en']
        is_appraisal = entry.flags.appraisal

        sell_value = entry.sell_price if entry.sell_price != 0 else None
        if is_account:
            new_data['points'] = sell_value
        else:
            new_data['sell_price'] = sell_value

        if name_dict['en'] == 'Normal Ammo 1':
            new_data['category'] = 'hidden'
        elif is_ez:
            new_data['category'] = 'misc'
            new_data['subcategory'] = 'trade' if is_tradein else 'supply'
        elif is_account:
            new_data['category'] = 'misc'
            new_data['subcategory'] = 'trade' if is_tradein else 'account'
        elif is_appraisal or ('Appraised after investigation'
                              in description_dict['en']):
            new_data['category'] = 'misc'
            new_data['subcategory'] = 'appraisal'
            new_data['sell_price'] = None  # why does this have values?
        else:
            new_data['category'] = entry.type
            new_data['subcategory'] = 'trade' if is_tradein else None

            # Whether we show carry limit at all is based on item type.
            # Materials are basically infinite carry
            infinite_carry = new_data['category'] == 'material'
            new_data[
                'carry_limit'] = None if infinite_carry else entry.carry_limit

        if existing_item:
            new_item_map.insert({**existing_item, **new_data})
        else:
            new_item_map.insert(new_data)

    # Second pass, add old entries that are not in the new one
    for old_entry in mhdata.item_map.values():
        if old_entry.name('en') not in new_item_map.names('en'):
            new_item_map.insert(old_entry)

    # Third pass. Items need to be reordered based on type

    unsorted_item_map = new_item_map  # store reference to former map

    def filter_category(category, subcategory=None):
        "helper that returns items and then removes from unsorted item map"
        results = []
        for item in unsorted_item_map.values():
            if item['category'] == category and item[
                    'subcategory'] == subcategory:
                results.append(item)
        for result in results:
            del unsorted_item_map[result.id]
        return results

    normal_ammo_1 = unsorted_item_map.entry_of("en", "Normal Ammo 1")

    # start the before-mentioned third pass by creating a new map based off the old one
    new_item_map = DataMap(languages="en")
    new_item_map.extend(filter_category('item'))
    new_item_map.extend(filter_category('material'))
    new_item_map.extend(filter_category('material', 'trade'))
    if normal_ammo_1:
        new_item_map.insert(normal_ammo_1)
    new_item_map.extend(filter_category('ammo'))
    new_item_map.extend(filter_category('misc', 'appraisal'))
    new_item_map.extend(filter_category('misc', 'account'))
    new_item_map.extend(filter_category('misc', 'supply'))

    # Write out data
    writer = create_writer()

    writer.save_base_map_csv(
        "items/item_base.csv",
        new_item_map,
        schema=schema.ItemSchema(),
        translation_filename="items/item_base_translations.csv",
        translation_extra=['description'])

    # Write out artifact data
    print("Writing unlinked item names to artifacts")
    artifacts.write_names_artifact('items_unlinked.txt', unlinked_item_names)
    print("Writing all items and ids")
    artifact_data = [{
        'id': i.id,
        'name': i.name['en']
    } for i in item_updater.data]
    artifacts.write_dicts_artifact('items_ids.csv', artifact_data)

    print("Item files updated")
コード例 #30
0
ファイル: test_DataMap.py プロジェクト: renaiku/MHWorldData
def test_uses_provided_id():
    map = DataMap()
    map.insert({'id': 3, **create_test_entry_en("test1")})

    assert 3 in map.keys(), "entry should have used id 3"