Exemple #1
0
    def load_data_csv(self, parent_map : DataMap, data_file, *, key=None, groups=[], leaftype):
        """Loads a data file, using a base map to anchor it to id
        The parent_map is updated to map id -> data row.
        Returns the parent_map to support chaining.

        Language is automatically determined by the name of the first column.
        """
        
        data_file = self.get_data_path(data_file)

        if leaftype == 'list' and not key:
            raise ValueError("key is required if leaftype is list")
        
        rows = read_csv(data_file)

        if not rows:
            return parent_map

        # Auto detect language
        first_column = next(iter(rows[0].keys()))
        match = re.match('name_([a-zA-Z]+)', first_column)
        if not match:
            raise Exception("First column needs to be a name_{lang} column")
        
        lang = match.group(1)
        data = unflatten(rows, nest=[first_column], groups=groups, leaftype=leaftype)

        return parent_map.merge(data, lang=lang, key=key)
Exemple #2
0
    def load_base_csv(self, data_file, languages, groups=[], translation_filename=None, translation_extra=[], keys_ex=[], validate=True):
        """Loads a base data map object from a csv
        groups is a list of additional fields (name is automatically include)
        that nest via groupname_subfield.
        """
        data_file = self.get_data_path(data_file)
        groups = ['name'] + groups

        rows = [group_fields(row, groups=groups) for row in read_csv(data_file)]

        basemap = DataMap(languages=languages, keys_ex=keys_ex)
        basemap.extend(rows)

        if translation_filename:
            try:
                translations = fix_id(self.load_list_csv(translation_filename))
                groups = set(['name'] + translation_extra)
                merge_list(basemap, translations, groups=groups, many=False)
            except FileNotFoundError:
                print(f"Warning: Could not find translation file {translation_filename}")

        if languages:
            self._validate_base_map(data_file, basemap, languages, error=validate)

        return basemap
def update_monsters(mhdata):
    monster_keys = read_csv(dirname(abspath(__file__)) + '/monster_map.csv')
    monster_keys = dict((r['name_en'], r) for r in monster_keys)

    monster_name_text = load_text('common/text/em_names')
    monster_info_text = load_text('common/text/em_info')
    for monster_entry in mhdata.monster_map.values():
        name_en = monster_entry.name('en')
        if name_en not in monster_keys:
            print(f'Warning: {name_en} not mapped, skipping')

        monster_key_entry = monster_keys[monster_entry.name('en')]
        key = monster_key_entry['key']
        info_key = monster_key_entry['key_info_override'] or key

        monster_entry['name'] = monster_name_text[key]
        if info_key != 'NONE':
            monster_entry['description'] = monster_info_text[f'NOTE_{info_key}_DESC']

    # Write new data
    writer = create_writer()

    writer.save_base_map_csv(
        "monsters/monster_base.csv",
        mhdata.monster_map,
        schema=schema.MonsterBaseSchema(),
        translation_filename="monsters/monster_base_translations.csv",
        translation_extra=['description']
    )

    print("Monsters updated\n")
    def __init__(self):
        id_alt_keys = ['id_alt', 'id_alt2']

        this_dir = dirname(abspath(__file__))

        # Load data from the quest data dump project
        # Note that since the key is a FILEPATH it can't be joined with the rest of the data
        self.monster_data_ext = json.load(
            open(this_dir + '/metadata_files/MonsterData.json'))

        monster_keys_csv = read_csv(this_dir +
                                    '/metadata_files/monster_map.csv')
        monster_entries = [
            MonsterMetaEntry(
                name=r['name_en'].strip(),
                id=int(r['id'], 16),
                id_alt=[int(r[key], 16) for key in id_alt_keys if r[key]],
                key_name=r['key_name'],
                key_description=r['key_description']) for r in monster_keys_csv
        ]

        self._map = dict((r.name, r) for r in monster_entries)
        self._map_by_id = dict((r.id, r) for r in monster_entries)

        # Add alt keys. Note that they only go one way and cannot be reverse associated
        for r in monster_entries:
            for alt_id in r.id_alt:
                self._map_by_id[alt_id] = r
Exemple #5
0
def load_area_map():
    this_dir = dirname(abspath(__file__))
    area_map = {
        int(r['id'], 16): r['name']
        for r in read_csv(this_dir + '/metadata_files/area_map.csv')
    }
    return area_map
Exemple #6
0
    def load_list_csv(self, data_file, *, schema=None):
        """Loads a simple csv without processing. 
        Accepts marshmallow schema to transform and validate it"""
        data_file = self.get_data_path(data_file)
        data = read_csv(data_file)

        if schema:
            # When version 3 is released, this api will change
            # load will just return converted and errors will auto-raise
            (converted, errors) = schema.load(data, many=True)
            if errors:
                raise Exception(str(errors))
            data = converted

        return data
Exemple #7
0
def update_all():
    "Updates all supported entity types using merged chunk data from ingame binaries."
    from .items import ItemUpdater
    from mhdata.load import load_data
    from mhdata.merge.binary.load import MonsterMetadata

    from .armor import update_armor
    from .weapons import update_weapons, update_weapon_songs, update_kinsects
    from .monsters import update_monsters
    from .quests import update_quests

    from mhdata.io.csv import read_csv
    from os.path import dirname, abspath

    mhdata = load_data()
    print("Existing Data loaded. Using it as a base to merge new data")

    this_dir = dirname(abspath(__file__))
    area_map = {
        int(r['id']): r['name']
        for r in read_csv(this_dir + '/area_map.csv')
    }
    print("Area Map Loaded")

    # validate area map
    error = False
    for name in area_map.values():
        if name not in mhdata.location_map.names('en'):
            print(f"Error: Area map has invalid location name {name}.")
            error = True
    if error:
        return
    print("Area Map validated")

    item_updater = ItemUpdater()
    monster_meta = MonsterMetadata()

    print()  # newline

    update_armor(mhdata, item_updater)
    update_weapons(mhdata, item_updater)
    update_weapon_songs(mhdata)
    update_kinsects(mhdata, item_updater)
    update_monsters(mhdata, item_updater, monster_meta)
    update_quests(mhdata, item_updater, monster_meta, area_map)

    # Now finalize the item updates from parsing the rest of the data
    item_updater.update_items()
Exemple #8
0
    def load_base_csv(self, data_file, groups=[], validate=True):
        """Loads a base data map object from a csv
        groups is a list of additional fields (name is automatically include)
        that nest via groupname_subfield.
        """
        data_file = self.get_data_path(data_file)
        groups = ['name'] + groups

        rows = read_csv(data_file)
        rows = [group_fields(row, groups=groups) for row in rows]

        basemap = DataMap()
        basemap.extend(rows)
        self._validate_base_map(data_file, basemap, error=validate)

        return basemap
Exemple #9
0
    def load_data_csv(self,
                      parent_map: DataMap,
                      data_file,
                      *,
                      key=None,
                      groups=[],
                      leaftype):
        """Loads a data file, using a base map to anchor it to id
        The parent_map is updated to map id -> data row.
        Returns the parent_map to support chaining.

        :param key: The dictionary key name in the base map to add the new data under. 
                    If none, it'll extend the current list
        :param groups: Additional fields in the data map to group together into one field based on suffix.
        :param leaftype: Either list or dict, deciding on whether the result of the new data should be a list or just a dict.
                         Use list if the data is one to many, or dict if its one to one

        Language is automatically determined by the name of the first column.
        """

        data_file = self.get_data_path(data_file)

        if leaftype == 'list' and not key:
            raise ValueError("key is required if leaftype is list")

        rows = read_csv(data_file)

        if not rows:
            return parent_map

        # Auto detect language
        first_column = next(iter(rows[0].keys()))
        match = re.match('(?:base_)?([a-zA-Z]+)(?:_([a-zA-Z]+))?',
                         first_column)
        if not match:
            raise Exception(
                "First column needs to be a base_{field} or base_{field}_{lang} column"
            )

        fieldname = match.group(1)
        lang = match.group(2)
        data = unflatten(rows,
                         nest=[first_column],
                         groups=groups,
                         leaftype=leaftype)

        return parent_map.merge(data, field=fieldname, lang=lang, key=key)
Exemple #10
0
def extend_decoration_chances(decoration_map: DataMap):
    """Calculates the drop tables given the decoration map.

    Each decoration is part of a drop table (decided by rarity), and feystones
    will individually land on a drop table. Once on a drop table, each decoration in that drop table
    has an "equal" chance within that drop table.

    Odds are listed here, with one typo (gleaming is actually glowing).
    https://docs.google.com/spreadsheets/d/1ysj6c2boC6GarFvMah34e6VviZeaoKB6QWovWLSGlsY/htmlview?usp=sharing&sle=true#
    """

    jewel_to_table_odds = {}
    droprates = read_csv(
        join(data_path, "decorations/decoration_droprates.csv"))
    for row in droprates:
        entries = {}
        for i in range(5, 14):
            entries[i] = int(row[str(i)] or '0')
        jewel_to_table_odds[row['feystone']] = entries

    # Calculate how many entries there are per drop table type
    table_counts = {table: 0 for table in range(5, 14)}
    for entry in decoration_map.values():
        table_counts[entry['rarity']] += 1

    # Create an odds map for each drop table level
    # This maps droptable -> feystone -> probability
    # This is necessary because all decorations are assigned to a droptable
    odds_map = {}
    for table in range(5, 14):
        odds_map[table] = {}
        for feystone, feystone_odds in jewel_to_table_odds.items():
            count = table_counts[table]
            if count == 0:
                continue
            value = Decimal(feystone_odds[table]) / Decimal(count)
            odds_map[table][feystone] = value.quantize(Decimal('1.00000'))

    # Assign the odds map for the drop table level to the decoration itself
    for entry in decoration_map.values():
        entry['chances'] = odds_map[entry['rarity']]
    def __init__(self):
        id_alt_keys = ['id_alt', 'id_alt2']

        this_dir = dirname(abspath(__file__))
        monster_keys_csv = read_csv(this_dir + '/monster_map.csv')
        monster_entries = [
            MonsterMetaEntry(
                name=r['name_en'].strip(),
                id=int(r['id'], 16),
                id_alt=[int(r[key], 16) for key in id_alt_keys if r[key]],
                key_name=r['key_name'],
                key_description=r['key_description']) for r in monster_keys_csv
        ]

        self._map = dict((r.name, r) for r in monster_entries)
        self._map_by_id = dict((r.id, r) for r in monster_entries)

        # Add alt keys. Note that they only go one way and cannot be reverse associated
        for r in monster_entries:
            for alt_id in r.id_alt:
                self._map_by_id[alt_id] = r