os.makedirs(csv_dir) if not os.path.exists(json_dir): os.makedirs(json_dir) if not os.path.exists(html_dir): os.makedirs(html_dir) for filename, row_class in specs.lookup.items(): html = [] html.append('<!DOCTYPE html>') html.append('<html>') html.append('<body>') objs = [] html.append('<h1>%s</h1>' % filename) b = bcsv.File(row_class) with open('%s/%s' % (in_path, filename), 'rb') as f: b.load(f.read()) csvw = csv.writer( open('%s/%s.csv' % (csv_dir, filename.replace('.bcsv', '')), 'w', encoding='utf-16le')) html.append('<table border=1>') html.append('<tr>') for field in row_class.fields(): html.append('<th>%s</th>' % field) csvw.writerow(row_class.fields()) html.append('</tr>') for row in b.rows:
shared_enums.append(enum) shared_enum_names.append(name) enum_remaps[enum_uses[enumkey]] = name enum_remaps[(filename, key)] = enum_remaps[enum_uses[enumkey]] else: enum_uses[enumkey] = (filename, key) for name, enum in zip(shared_enum_names, shared_enums): print('%s = (' % name) print_enum('\t', enum) print(')') print() for filename in sorted(files): if filename.endswith('.bcsv'): b = bcsv.File(bcsv.Row) print('class %s(Row):' % (filename[:-5])) with open('%s/%s' % (path, filename), 'rb') as f: b.load(f.read()) it = b.fields.items() if '-sort' in sys.argv: it = sorted(it) for key, (offset, size) in it: try: name = preset_names[key] except KeyError: name = '_%08x' % key if filename in enums and str(key) in enums[filename]: assert (size == 4) try: remap = enum_remaps[(filename, key)]
for label, index in m.labels.items(): a = species.index(label[:3]) b = int(label[3:]) output['villagerNames'][a][b] = fixup(m.strings[index]) # dual stuff for these justnames = ( ('STR_HouseWallName.msbt', 'houseWallNames', 'StructureHouseWallParam', StructureHouseWallParam), ('STR_HouseDoorName.msbt', 'houseDoorNames', 'StructureHouseDoorParam', StructureHouseDoorParam), ('STR_HouseRoofName.msbt', 'houseRoofNames', 'StructureHouseRoofParam', StructureHouseRoofParam), ) for msbt_name, json_key, fname, rowclass in justnames: output[json_key] = {} f = bcsv.File(rowclass) f.load(open(bcsv_path + '/' + fname + '.bcsv', 'rb').read()) for row in f.rows: output[json_key][row.id] = row._39b5a93d m = msbt.MSBT() m.load(msgArc.get_file_data(msbt_name)) for label, index in m.labels.items(): output[json_key][int(label)] = fixup(m.strings[index]) output['fg'] = {} f = bcsv.File(FgMainParam) f.load(open(bcsv_path + '/FgMainParam.bcsv', 'rb').read()) for row in f.rows:
import bcsv import json import pbc import sarc import specs import struct import sys import zstandard from PIL import Image romfs_path = sys.argv[1] savefile_path = sys.argv[2] chunk_defs = bcsv.File(specs.FieldOutsideParts) chunk_defs.load(open(romfs_path + '/Bcsv/FieldOutsideParts.bcsv', 'rb').read()) tile_defs = bcsv.File(specs.FieldLandMakingUnitModelParam) tile_defs.load( open(romfs_path + '/Bcsv/FieldLandMakingUnitModelParam.bcsv', 'rb').read()) structure_info = bcsv.File(specs.StructureInfoParam) structure_info.load( open(romfs_path + '/Bcsv/StructureInfoParam.bcsv', 'rb').read()) facility_models = bcsv.File(specs.StructureFacilityModel) facility_models.load( open(romfs_path + '/Bcsv/StructureFacilityModel.bcsv', 'rb').read()) bridge_defs = bcsv.File(specs.StructureBridgeParam) bridge_defs.load( open(romfs_path + '/Bcsv/StructureBridgeParam.bcsv', 'rb').read())
m = msbt.MSBT() m.load(msgArc.get_file_data(name)) for label, msg in m.data.items(): key = label[:label.rfind('_')] item_id = int(label[label.rfind('_') + 1:], 10) try: outfitGroup[key].append((item_id, fixup(msg))) except KeyError: outfitGroup[key] = [(item_id, fixup(msg))] if 'STR_OutfitGroupName' in name: m = msbt.MSBT() m.load(msgArc.get_file_data(name)) key2 = name[name.rfind('_') + 1:name.rfind('.')] for label, msg in m.data.items(): key1 = label name = fixup(msg) for iid, col in outfitGroup[f'{key1}_{key2}']: item_names[iid] = f'{col} {name}' output = {} f = bcsv.File(ItemParam) f.load(open(bcsv_path + '/ItemParam.bcsv', 'rb').read()) for row in f.rows: if row.UniqueID in item_names: output[row.UniqueID] = f'{row.Label} ({item_names[row.UniqueID]})' else: output[row.UniqueID] = row.Label json.dump(output, open('item_hints.json', 'w'), indent=4, sort_keys=True)
'''"Curly River Island". There is one square cliff at the north east, a few flowers and rocks and a small amount of fruit trees. Only insects associated with water spawn here.''', 19: '''"Big Fish Island 2". This rare island only spawns big fish. Otherwise it is quite normal, with less flowers and a lower chance of appearing than the other big fish island.''', 20: '''"Trash Island". Everything you can fish here is trash. Only water-related insects spawn.''', 21: '''"Fins Island". A rectangular pond with rectangular cliffs inside, the tallest one being too tall to climb up to. The only fish that spawn here are the largest finned fish.''', 23: '''"Falls Island". Lots of cliffs, waterfalls and river forks. Normal resource spawns.''', 24: '''"Gold Island". A very rare island with flowers, scorpions and a rectangular pond. If you climb up onto the cliff and climb down from the back, you can vault over to a tiny island in the middle and get 8 gold nuggets from a single rock.''', } romfs_path = sys.argv[1] tour_param = bcsv.File(specs.MysteryTourParam) tour_param.load(open(romfs_path + '/Bcsv/MysteryTourParam.bcsv', 'rb').read()) tour_field_param = bcsv.File(specs.MysteryTourFieldParam) tour_field_param.load( open(romfs_path + '/Bcsv/MysteryTourFieldParam.bcsv', 'rb').read()) tour_fish_param = bcsv.File(specs.MysteryTourFishParam) tour_fish_param.load( open(romfs_path + '/Bcsv/MysteryTourFishParam.bcsv', 'rb').read()) tour_insect_param = bcsv.File(specs.MysteryTourInsectParam) tour_insect_param.load( open(romfs_path + '/Bcsv/MysteryTourInsectParam.bcsv', 'rb').read()) tour_field_param = bcsv.File(specs.MysteryTourFieldParam)
import bcsv import specs import sys b = bcsv.File(specs.ItemParam) b.load(open('../ac120upd/romfs/Bcsv/ItemParam.bcsv', 'rb').read()) pall = False if '-all' in sys.argv: sys.argv.remove('-all') pall = True def prow(row): if pall: for fld in row.fields(): print(f'{fld} -> {getattr(row,fld)}') else: print(f'{row.UniqueID} - {row.Label}') mode = sys.argv[1] if mode == 'agg': key = sys.argv[2] agg = {} for row in b.rows: val = getattr(row, key) if val in agg: agg[val].append(row) else: agg[val] = [row]