示例#1
0
def test_manual_id_resets_sequence():
    datamap = DataMap()

    datamap.add_entry(25, create_test_entry_en('test1'))
    new_entry = datamap.insert(create_test_entry_en('test2'))

    assert new_entry.id > 25, "new id should have been higher"
示例#2
0
def test_can_lookup_id_by_name():
    map = DataMap()
    map.add_entry(1, create_test_entry_en("test1"))
    map.add_entry(2, create_test_entry_en("test2"))
    map.add_entry(3, create_test_entry_en("test3"))

    idval = map.id_of("en", "test2")
    assert idval == 2, "expected test 2 to have id 1"
示例#3
0
def test_can_lookup_by_id():
    map = DataMap()
    map.add_entry(55, create_test_entry_en("test1"))
    map.add_entry(1, create_test_entry_en("test2"))
    map.add_entry(8, create_test_entry_en("test3"))

    found = map[1]  # note: id order is not sequential
    assert found.name('en') == "test2", "found name should match"
示例#4
0
def test_can_iterate_values_in_order():
    expected_names = ['test1', 'test2', 'test3']

    map = DataMap()
    for (id, name) in enumerate(expected_names):
        map.add_entry(id, create_test_entry_en(name))

    found = [entry['name']['en'] for entry in map.values()]
    assert found == expected_names, "Expected map entries to match"
示例#5
0
def test_save_base_symmetric(writer):
    data = DataMap()
    data.add_entry(1, create_entry_en('test1'))
    data.add_entry(2, create_entry_en('test2'))

    writer.save_base_map('testbase.json', data)
    new_data = writer.load_base_json('testbase.json', languages)

    assert dict(data) == dict(new_data), "saved data didn't match"
示例#6
0
def test_to_dict_correct_data():
    data = {
        25: create_test_entry_en('test1', { 'somedata': {'nested': 5}}),
        28: create_test_entry_en('test2', { 'somedata': {'alsonested': 'hey'}})
    }

    datamap = DataMap()
    datamap.add_entry(25, data[25])
    datamap.add_entry(28, data[28])

    serialized = datamap.to_dict()
    assert serialized == data, "expected serialized data to match original data"
示例#7
0
def test_can_iterate_values_in_order():
    expected_entries = [
        (1, create_test_entry_en('test1')),
        (2, create_test_entry_en("test2")),
        (3, create_test_entry_en("test3"))]
    
    map = DataMap()
    for (id, entry) in expected_entries:
        map.add_entry(id, entry)
    
    found = [(id, entry) for (id, entry) in map.items()]
    assert found == expected_entries, "Expected map entries to match"
示例#8
0
def transform_dmap(dmap: DataMap, obj_schema):
    """Returns a new datamap, 
    where the items in the original have run through the marshmallow schema."""
    results = DataMap()
    for entry_id, entry in dmap.items():
        data = entry.to_dict()
        (converted, errors) = obj_schema.load(data, many=False) # converted

        if errors:
            raise Exception(str(errors))

        results.add_entry(entry_id, converted)
    return results
示例#9
0
def test_save_split_data_map_symmetric(writer):
    basedata = DataMap()
    basedata.add_entry(1, create_entry_en('test1'))
    basedata.add_entry(2, create_entry_en('test2'))

    extdata = DataMap()
    extdata.add_entry(1, { **basedata[1], 'key': 'f1', 'data': 'test1'})
    extdata.add_entry(2, { **basedata[2], 'key': 'f2', 'data': 'test2'})

    writer.save_split_data_map('split', basedata, extdata, 'key')
    new_data = writer.load_split_data_map(basedata, 'split')

    assert extdata.to_dict() == new_data.to_dict(), "expected data to match"
示例#10
0
def test_save_data_json_symmetric(writer):
    basedata = DataMap()
    basedata.add_entry(1, create_entry_en('test1'))
    basedata.add_entry(2, create_entry_en('test2'))

    extdata = DataMap()
    extdata.add_entry(1, {**basedata[1], 'data': 'test1'})
    extdata.add_entry(2, {**basedata[2], 'data': 'test2'})

    writer.save_data_json('testdatasym.json', extdata, fields=['data'])

    testdata = writer.load_data_json(basedata.copy(), 'testdatasym.json')

    assert extdata.to_dict() == testdata.to_dict(), "expected data to match"
示例#11
0
def test_set_value_after_item():
    test_keys = [ 'test1', 'test2', 'test3', 'test4']
    test_dict = { k:1 for k in test_keys }
    test_dict['name'] = { 'en': 'a test' } # required field

    datamap = DataMap()
    entry = datamap.add_entry(1, test_dict)

    entry.set_value('NEW', 1, after='test2')

    # note: name exists because it was manually added to test_dict
    expected_keys = ['test1', 'test2', 'NEW', 'test3', 'test4', 'name']
    entry_keys = list(entry.keys())
    assert entry_keys == expected_keys, "Expected new to be after test2"
示例#12
0
def test_save_data_csv_symmetric_listmode(writer: DataReaderWriter):
    basedata = DataMap()
    basedata.add_entry(1, create_entry_en('test1'))
    basedata.add_entry(2, create_entry_en('test2'))

    extdata = DataMap()
    extdata.add_entry(1, {**basedata[1], 'data': [{'data': 'test1'}]})
    extdata.add_entry(2, {
        **basedata[2], 'data': [{
            'data': 'test2'
        }, {
            'data': 'test2ext'
        }]
    })

    writer.save_data_csv('testdatasym.csv', extdata, key='data')
    new_data = writer.load_data_csv(basedata.copy(),
                                    'testdatasym.csv',
                                    key='data',
                                    leaftype="list")

    assert extdata.to_dict() == new_data.to_dict(), "expected data to match"
示例#13
0
def test_save_data_csv_symmetric_listmode(writer: DataReaderWriter):
    basedata = DataMap()
    basedata.add_entry(1, create_entry_en('test1'))
    basedata.add_entry(2, create_entry_en('test2'))

    extdata = DataMap()
    extdata.add_entry(1, {**basedata[1], 'data': [{'a': 'test1'}]})
    extdata.add_entry(2, {
        **basedata[2], 'data': [{
            'a': 'test2'
        }, {
            'a': 'test2ext'
        }]
    })

    writer.save_data_csv('testdatasym.csv', extdata, key='data')
    new_data = (DataStitcher(writer).use_base(basedata.copy()).add_csv(
        'testdatasym.csv', key='data').get())

    old_data = extdata.to_dict()
    abc = new_data.to_dict()

    assert extdata.to_dict() == new_data.to_dict(), "expected data to match"
示例#14
0
def test_nonmatching_id_throws():
    map = DataMap()
    with pytest.raises(ValueError):
        test_entry = create_test_entry_en("test1")
        map.add_entry(1, {**test_entry, 'id': 25})
示例#15
0
def update_armor():
    "Populates and updates armor information using the armorset_base as a source of truth"

    armor_text = load_text("common/text/steam/armor")
    armorset_text = load_text("common/text/steam/armor_series")

    # Parses binary armor data, mapped by the english name
    armor_data = {}
    for armor_entry in load_schema(am_dat.AmDat,
                                   "common/equip/armor.am_dat").entries:
        if armor_entry.gender == 0: continue
        if armor_entry.order == 0: continue
        name_en = armor_text[armor_entry.gmd_name_index]['en']
        armor_data[name_en] = armor_entry

    # Parses craft data, mapped by the binary armor id
    armor_craft_data = {}
    for craft_entry in load_schema(eq_crt.EqCrt,
                                   "common/equip/armor.eq_crt").entries:
        armor_craft_data[craft_entry.equip_id] = craft_entry

    # Get number of times armor can be upgraded by rarity level.
    # Unk7 is max level pre-augment, Unk8 is max post-augment
    # Thanks to the MHWorld Modders for the above info
    rarity_upgrades = {}
    for entry in load_schema(arm_up.ArmUp,
                             "common/equip/arm_upgrade.arm_up").entries:
        rarity_upgrades[entry.index + 1] = (entry.unk7 - 1, entry.unk8 - 1)

    print("Binary data loaded")

    mhdata = load_data()
    print(
        "Existing Data loaded. Using existing armorset data to drive new armor data."
    )

    # Will store results. Language lookup and validation will be in english
    new_armor_map = DataMap(languages="en")
    new_armorset_bonus_map = DataMap(languages="en")

    # Temporary storage for later processes
    all_set_skill_ids = OrderedSet()

    item_text_handler = ItemTextHandler()
    skill_text_handler = SkillTextHandler()

    print("Populating armor data, keyed by the armorset data")
    next_armor_id = mhdata.armor_map.max_id + 1
    for armorset in mhdata.armorset_map.values():
        # Handle armor pieces
        for part, armor_name in datafn.iter_armorset_pieces(armorset):
            existing_armor = mhdata.armor_map.entry_of('en', armor_name)
            armor_binary = armor_data.get(armor_name)

            if not armor_binary:
                raise Exception(
                    f"Failed to find binary armor data for {armor_name}")

            if armor_binary.set_skill1_lvl > 0:
                all_set_skill_ids.add(armor_binary.set_skill1)

            rarity = armor_binary.rarity + 1
            name_dict = armor_text[armor_binary.gmd_name_index]

            # Initial new armor data
            new_data = {
                'name':
                name_dict,  # Override for translation support!
                'rarity':
                rarity,
                'type':
                part,
                'gender':
                gender_list[armor_binary.gender],
                'slot_1':
                armor_binary.gem_slot1_lvl,
                'slot_2':
                armor_binary.gem_slot2_lvl,
                'slot_3':
                armor_binary.gem_slot3_lvl,
                'defense_base':
                armor_binary.defense,
                'defense_max':
                armor_binary.defense + rarity_upgrades[rarity][0] * 2,
                'defense_augment_max':
                armor_binary.defense + rarity_upgrades[rarity][1] * 2,
                'defense_fire':
                armor_binary.fire_res,
                'defense_water':
                armor_binary.water_res,
                'defense_thunder':
                armor_binary.thunder_res,
                'defense_ice':
                armor_binary.ice_res,
                'defense_dragon':
                armor_binary.dragon_res,
                'skills': {},
                'craft': {}
            }

            # Add skills to new armor data
            for i in range(1, 2 + 1):
                skill_lvl = getattr(armor_binary, f"skill{i}_lvl")
                if skill_lvl != 0:
                    skill_id = getattr(armor_binary, f"skill{i}")
                    name_en = skill_text_handler.get_skilltree_name(
                        skill_id)['en']
                    new_data['skills'][f'skill{i}_name'] = name_en
                    new_data['skills'][f'skill{i}_pts'] = skill_lvl
                else:
                    new_data['skills'][f'skill{i}_name'] = None
                    new_data['skills'][f'skill{i}_pts'] = None

            # Add recipe to new armor data. Also track the encounter.
            recipe_binary = armor_craft_data[armor_binary.id]
            new_data['craft'] = convert_recipe(item_text_handler,
                                               recipe_binary)

            armor_entry = None
            if not existing_armor:
                print(
                    f"Entry for {armor_name} not in armor map, creating new entry"
                )
                armor_entry = new_armor_map.add_entry(next_armor_id, new_data)
                next_armor_id += 1

            else:
                armor_entry = new_armor_map.add_entry(existing_armor.id, {
                    **existing_armor,
                    **new_data
                })

    # Process set skills. As we don't currently understand the set -> skill map, we only translate
    # We pull the already established set skill name from existing CSV
    for bonus_entry in mhdata.armorset_bonus_map.values():
        skilltree = skill_text_handler.get_skilltree(bonus_entry.name('en'))
        name_dict = skill_text_handler.get_skilltree_name(skilltree.index)
        new_armorset_bonus_map.insert({**bonus_entry, 'name': name_dict})

    # Write new data
    writer = create_writer()

    writer.save_base_map_csv(
        "armors/armor_base.csv",
        new_armor_map,
        schema=schema.ArmorBaseSchema(),
        translation_filename="armors/armor_base_translations.csv")

    writer.save_data_csv("armors/armor_skills_ext.csv",
                         new_armor_map,
                         key="skills")

    writer.save_data_csv("armors/armor_craft_ext.csv",
                         new_armor_map,
                         key="craft")

    writer.save_base_map_csv(
        "armors/armorset_bonus_base.csv",
        new_armorset_bonus_map,
        schema=schema.ArmorSetBonus(),
        translation_filename="armors/armorset_bonus_base_translations.csv")

    print("Armor files updated\n")

    add_missing_items(item_text_handler.encountered, mhdata=mhdata)