コード例 #1
0
ファイル: mainscript_text.py プロジェクト: kmeisthax/telefang
def asm(args):
    """Generate the ASM for the metatable and each section.

    This operation needs to be performed once, and once again if tables are to
    be relocated. To relocate tables, add a third parameter for that table into
    the bank names file with it's new flat address, regenerate the metatable
    ASM, then reassemble the ROM.

    Generated ASM will be printed to console. This portion of the script is
    intended to be piped into a file."""

    charmap = parse_charmap(args.charmap)
    banknames = parse_bank_names(args.banknames)
    banknames = extract_metatable_from_rom(args.rom, charmap, banknames, args)

    print('SECTION "MainScript Meta Table", ' +
          format_sectionaddr_rom(args.metatable_loc))

    for bank in banknames:
        print("dw " + bank["symbol"])
        print("db BANK(" + bank["symbol"] + ')')

    print('')

    for bank in banknames:
        print('SECTION "' + bank["symbol"] + ' Section", ' +
              format_sectionaddr_rom(flat(bank["basebank"], bank["baseaddr"])))
        print(bank["symbol"] + ':')
        print('\tINCBIN "' +
              os.path.join(args.output, bank["objname"]).replace("\\", "/") +
              '"')
        print(bank["symbol"] + '_END')
        print('')
コード例 #2
0
ファイル: mainscript_text.py プロジェクト: kmeisthax/telefang
def update_data(args):
    charmap = parse_charmap(args.charmap)
    banknames = parse_bank_names(args.banknames)

    for h, bank in enumerate(banknames):
        #Wikitext to CSV conversion pass
        #At the end of this conversion, the wikitext will be deleted and a CSV
        #will have been created. Existing CSV file will be deleted, if any.
        try:
            with io.open(os.path.join(args.input, bank["legacy_filename"]),
                         'r',
                         encoding="utf-8") as bank_wikifile:
                rows, hdrs = parse_wikitext(bank_wikifile)

                with open(os.path.join(args.output, bank["filename"]),
                          "w",
                          encoding="utf-8") as bank_csvfile:
                    csvwriter = csv.writer(bank_csvfile)

                    encoded_hdrs = [hdr.encode("utf-8") for hdr in hdrs]
                    csvwriter.writerow(encoded_hdrs)

                    for row in rows:
                        encoded_row = [cell.encode("utf-8") for cell in row]
                        csvwriter.writerow(encoded_row)

            os.remove(os.path.join(args.output, bank["legacy_filename"]))
        except IOError:
            pass
コード例 #3
0
def make_maps(args):
    """Compile the stated (or all) tilemaps into .tmap files suitable for
    inclusion within the compressed tilemap banks."""

    charmap = parse_charmap(args.charmap)

    #We Are Number One but a python script compiles everything
    #YouTube: Our Cancer Has Cancer.
    for i, filename in enumerate(args.filenames):
        objname = os.path.join(args.output,
                               os.path.splitext(filename)[0] + ".tmap")

        with open(filename, "r") as csvfile:
            csvreader = csv.reader(csvfile)
            csv_data = []

            for row in csvreader:
                nrow = []
                for cell in row:
                    try:
                        nrow.append(int(cell, 10))
                    except ValueError:
                        pass

                csv_data.append(nrow)

            print("Compiling " + filename)

            with open(objname, "wb") as objfile:
                objfile.write(encode_tilemap(csv_data))
コード例 #4
0
def asm(args):
    """Generate the ASM for string tables.

    This operation needs to be performed once, plus 

    Generated ASM will be printed to console. This portion of the script is
    intended to be piped into a file."""

    charmap = parse_charmap(args.charmap)
    tablenames = parse_tablenames(args.tablenames)

    for table in tablenames:
        print(
            'SECTION "' + table["symbol"] + ' Section", ' +
            format_sectionaddr_rom(flat(table["basebank"], table["baseaddr"])))
        print(table["symbol"] + '::')
        print('\tINCBIN "' +
              os.path.join(args.output, table["objname"]).replace("\\", "/") +
              '"')
        print(table["symbol"] + '_END')
        print('')
コード例 #5
0
ファイル: mainscript_text.py プロジェクト: kmeisthax/telefang
def wikisync(args):
    charmap = parse_charmap(args.charmap)
    banknames = parse_bank_names(args.banknames)

    for h, bank in enumerate(banknames):
        api_url = "http://wiki.telefang.net/api.php?action=query&titles=Wikifang:Telefang_1_Translation_Patch/Text_dump/{}&format=json&prop=revisions&rvprop=content".format(
            bank["wikiname"].strip())
        full_wikiname = "Wikifang:Telefang 1 Translation Patch/Text dump/{}".format(
            bank["wikiname"].strip())

        wikifile = urllib.request.urlopen(api_url)
        data = json.load(wikifile)

        for pageid in data["query"]["pages"]:
            if data["query"]["pages"][pageid]["title"] == full_wikiname:
                wikidir = os.path.join(args.input, bank["basedir"])
                wikipath = os.path.join(args.input, bank["filename"])

                install_path(wikidir)
                with open(wikipath, "w", encoding="utf-8") as bank_wikitext:
                    bank_wikitext.write(
                        data["query"]["pages"][pageid]["revisions"][0]["*"])
コード例 #6
0
def make_tbl(args):
    charmap = parse_charmap(args.charmap)
    tablenames = parse_tablenames(args.tablenames)

    for table in tablenames:
        #Indexes are compiled in a second pass
        if table["format"] == "index":
            continue

        #If filenames are specified, only export banks that are mentioned there
        if len(args.filenames) > 0 and table["objname"] not in args.filenames:
            continue

        print("Compiling " + table["filename"])

        #Open and parse the data
        with open(os.path.join(args.input, table["filename"]),
                  "r",
                  encoding="utf-8") as csvfile:
            csvreader = csv.reader(csvfile)
            headers = None
            rows = []

            for row in csvreader:
                if headers is None:
                    headers = row
                else:
                    rows.append(row)

        #Determine what column we want
        index_col = headers.index("#")
        try:
            str_col = headers.index(args.language)
        except ValueError:
            str_col = index_col

        #Pack our strings
        packed_strings = []

        baseaddr = table["baseaddr"]

        entries = []
        reverse_entries = {}

        for i, row in enumerate(rows):
            if str_col >= len(row):
                print("WARNING: ROW {} IS MISSING IT'S TEXT!!!".format(i))
                packed_strings.append(b"")
                continue

            packed = pack_text(row[str_col],
                               specials,
                               charmap[0],
                               None,
                               args.window_width,
                               1,
                               memory_widths,
                               wrap=False,
                               do_not_terminate=True)

            if "stride" in table:
                if len(packed) > table["stride"]:
                    print(
                        "WARNING: Row {} is too long for the current string table stride of {} in table {}."
                        .format(i, table["stride"], table["filename"]))
                    packed = packed[0:table["stride"]]
                else:
                    #Pad the string out with E0s.
                    packed = packed + bytes([0xE0] *
                                            (table["stride"] - len(packed)))
            elif b"\xe0" not in packed:
                #Any data beyond an E0 is understood to be trash bytes; thus,
                #it does not recieve the implicit terminator.
                packed = packed + b"\xe0"

            packed_strings.append(packed)

            reverse_entries[flat(table["basebank"], baseaddr)] = len(entries)
            entries.append(flat(table["basebank"], baseaddr))

            baseaddr += len(packed)

        #Save these for later
        table["entries"] = entries
        table["reverse_entries"] = reverse_entries

        #Write the data out to the object files. We're done here!
        if not os.path.exists(
                os.path.dirname(os.path.join(args.output, table["objname"]))):
            try:
                os.makedirs(
                    os.path.dirname(os.path.join(args.output,
                                                 table["objname"])))
            except FileExistsError:
                pass

        with open(os.path.join(args.output, table["objname"]),
                  "wb") as objfile:
            for line in packed_strings:
                objfile.write(line)

    for table in tablenames:
        #Now's the time to compile indexes
        if table["format"] != "index":
            continue

        #If filenames are specified, only export banks that are mentioned there
        if len(args.filenames) > 0 and table["objname"] not in args.filenames:
            continue

        print("Compiling " + table["filename"])

        foreign_ptrs = tablenames[table["foreign_id"]]["entries"]
        packed_strings = []

        entries = []
        reverse_entries = {}

        #Open and parse the data
        with open(os.path.join(args.input, table["filename"]),
                  "r",
                  encoding="utf-8") as csvfile:
            csvreader = csv.reader(csvfile)

            for row in csvreader:
                for cell in row:
                    packed_strings.append(
                        PTR.pack(foreign_ptrs[int(cell, 10) - 1] % 0x4000 +
                                 0x4000))

        #Write the data out to the object files. We're done here!
        with open(os.path.join(args.output, table["objname"]),
                  "wb") as objfile:
            for line in packed_strings:
                objfile.write(line)
コード例 #7
0
def extract(args):
    charmap = parse_charmap(args.charmap)
    tablenames = parse_tablenames(args.tablenames)

    with open(args.rom, 'rb') as rom:
        #Extract a list of pointers each index is expecting
        #This is used for trash byte detection later
        for table in tablenames:
            if table["format"] != "index":
                continue

            try:
                all_ptrs = tablenames[table["foreign_id"]]["expected_ptrs"]
            except KeyError:
                all_ptrs = []

            rom.seek(flat(table["basebank"], table["baseaddr"]))

            for i in range(table["count"]):
                ptr = PTR.unpack(rom.read(2))[0]
                addr = flat(table["basebank"], ptr)

                if addr not in all_ptrs:
                    all_ptrs.append(addr)

            all_ptrs.sort()
            tablenames[table["foreign_id"]]["expected_ptrs"] = all_ptrs

        for table in tablenames:
            #Indexes are extracted in a second pass
            if table["format"] == "index":
                continue

            entries = []
            reverse_entries = {}

            try:
                expected_ptrs = table["expected_ptrs"]
            except KeyError:
                expected_ptrs = None

            csvdir = os.path.join(args.input, table["basedir"])
            csvpath = os.path.join(args.input, table["filename"])
            install_path(csvdir)

            with open(csvpath, "w+", encoding="utf-8") as table_csvfile:
                csvwriter = csv.writer(table_csvfile)
                csvwriter.writerow(["#", args.language])

                if table["format"] == "table":
                    for i in range(table["count"]):
                        rom.seek(
                            flat(table["basebank"],
                                 table["baseaddr"] + i * table["stride"]))
                        reverse_entries[rom.tell()] = len(entries)
                        entries.append(rom.tell())
                        data = extract_string(rom, charmap, table["stride"],
                                              expected_ptrs).encode("utf-8")
                        idx = "{0}".format(i + 1).encode("utf-8")
                        csvwriter.writerow([idx, data])
                elif table["format"] == "block":
                    rom.seek(flat(table["basebank"], table["baseaddr"]))
                    for i in range(table["count"]):
                        reverse_entries[rom.tell()] = len(entries)
                        entries.append(rom.tell())
                        data = extract_string(rom, charmap, None,
                                              expected_ptrs).encode("utf-8")
                        idx = "{0}".format(i + 1).encode("utf-8")
                        csvwriter.writerow([idx, data])

            #Save these for later
            table["entries"] = entries
            table["reverse_entries"] = reverse_entries

        #OK, now we can extract the indexes
        for table in tablenames:
            if table["format"] != "index":
                continue

            foreign_ptrs = tablenames[table["foreign_id"]]["reverse_entries"]
            rom.seek(flat(table["basebank"], table["baseaddr"]))

            csvdir = os.path.join(args.input, table["basedir"])
            csvpath = os.path.join(args.input, table["filename"])
            install_path(csvdir)

            with open(csvpath, "w+", encoding="utf-8") as table_csvfile:
                csvwriter = csv.writer(table_csvfile)

                pretty_row_length = math.ceil(math.sqrt(table["count"]))
                cur_row = []

                for i in range(table["count"]):
                    ptr = PTR.unpack(rom.read(2))[0]
                    addr = flat(table["basebank"], ptr)

                    cur_row.append("{0}".format(foreign_ptrs[addr] +
                                                1).encode("utf-8"))
                    if len(cur_row) >= pretty_row_length:
                        csvwriter.writerow(cur_row)
                        cur_row = []

                if len(cur_row) > 0:
                    csvwriter.writerow(cur_row)
コード例 #8
0
def asm(args):
    """Generate an ASM file for the metatables and tables present within the
    compressed tilemap system.
    
    This process needs to be run at least once, to convert the compressed
    tilemap names into imports that the assembler and linker can use. Changes
    to that file can be mirrored into the ASM by re-running this command, or
    manually doing so."""

    charmap = parse_charmap(args.charmap)
    datas, datas_index, banks, bank_index = parse_mapnames(args.mapnames)

    with open(args.rom, 'rb') as rom:
        #Extract the table so we know what order to reference data.
        metatable = extract_metatable(rom, args.metatable_length,
                                      args.metatable_loc)
        metatable_attribs = extract_metatable(rom, args.metatable_length,
                                              args.metatable_loc_attribs)

        bank_tables = {"tilemap": [], "attrib": []}
        bank_datas = {"tilemap": [], "attrib": []}

        for i, bank in enumerate(metatable):
            this_bank_data, this_bank_table = decompress_bank(
                rom, 0x4000 * bank)
            bank_tables["tilemap"].append(this_bank_table)
            bank_datas["tilemap"].append(this_bank_data)

        for i, bank in enumerate(metatable_attribs):
            this_bank_data, this_bank_table = decompress_bank(
                rom, 0x4000 * bank)
            bank_tables["attrib"].append(this_bank_table)
            bank_datas["attrib"].append(this_bank_data)

        #Generate ASM for the metatables
        for category_name, category_index in list(datas_index.items()):
            if category_name == "attrib":
                print('SECTION "' + category_name + ' Section", ' +
                      format_sectionaddr_rom(args.metatable_loc_attribs))
                print('RLEAttribmapBanks::')
            elif category_name == "tilemap":
                print('SECTION "' + category_name + ' Section", ' +
                      format_sectionaddr_rom(args.metatable_loc))
                print('RLETilemapBanks::')

            for bank_id, bank_table_index in list(category_index.items()):
                bank = banks[bank_index[category_name][bank_id]]
                print('\tdb BANK(' + bank["symbol"] + ')')

            print('')

        print('')

        #Generate ASM for each individual table.
        for category_name, category_index in list(datas_index.items()):
            for bank_id, _ in list(category_index.items()):
                bank = banks[bank_index[category_name][bank_id]]

                rom.seek(bank["flataddr"])

                print('SECTION "' + category_name +
                      ' Bank {0}'.format(bank_id) + '", ' +
                      format_sectionaddr_rom(bank["flataddr"]))
                print(bank["symbol"] + '::')

                #Print out the table in the order we extracted it from baserom
                for table_index in bank_tables[category_name][bank_id]:
                    #Detect if a table pointer refers to the end of the table
                    is_junk = False

                    if table_index >= len(datas_index[category_name][bank_id]):
                        data_meta = datas[datas_index[category_name][bank_id][
                            table_index - 1]]
                        print('\tdw ' + data_meta["symbol"] + "_END")
                    else:
                        data_meta = datas[datas_index[category_name][bank_id]
                                          [table_index]]
                        print('\tdw ' + data_meta["symbol"])

                print('')

                #Print out all the data blocks in the table.
                for tmap_id, _ in enumerate(
                        bank_datas[category_name][bank_id]):
                    #Prevent spitting out nonexistent data here
                    if tmap_id >= len(datas_index[category_name][bank_id]):
                        continue

                    data_meta = datas[datas_index[category_name][bank_id]
                                      [tmap_id]]

                    print(data_meta["symbol"] + '::')
                    print('\tincbin "' + os.path.join(
                        args.output, data_meta["objname"]).replace("\\", "/") +
                          '"')
                    print(data_meta["symbol"] + '_END')
                    print('')

            print('')
コード例 #9
0
ファイル: extract.py プロジェクト: kmeisthax/telefang
def extract(args):
    charmap = parse_charmap(args.charmap)
    banknames = parse_bank_names(args.banknames)
    banknames = extract_metatable_from_rom(args.rom, charmap, banknames, args)

    with open(args.rom, 'rb') as rom:
        for bank in banknames:
            wikitext = ["{|", "|-", "!Pointer", "!" + args.language]
            csvdata = [["Pointer", args.language]]

            rom.seek(flat(bank["basebank"], bank["baseaddr"]))

            addr = bank["baseaddr"]
            end = 0x8000

            #Autodetect the end/length of the table by finding the lowest
            #pointer that isn't stored after an existing pointer
            while addr < end:
                next_ptr = PTR.unpack(rom.read(2))[0]

                #Reject obviously invalid pointers
                if (next_ptr < addr or next_ptr > 0x7FFF):
                    break

                end = min(end, next_ptr)
                addr += 2

            tbl_length = (addr - bank["baseaddr"]) // 2

            #Actually extract our strings
            string = []

            #Stores the actual end of the last string, used for alias detection
            last_start = 0xFFFF
            last_end = 0xFFFF
            last_nonaliasing_row = -1

            #Also store if a redirected/overflowed row is being extracted
            redirected = False
            old_loc = None

            for i in range(tbl_length):
                csvrow = [
                    "0x{0:x}".format(
                        flat(bank["basebank"], bank["baseaddr"] + i * 2))
                ]
                wikitext.append("|-")
                wikitext.append("|0x{0:x}".format(
                    flat(bank["basebank"], bank["baseaddr"] + i * 2)))

                rom.seek(flat(bank["basebank"], bank["baseaddr"] + i * 2))
                read_ptr = PTR.unpack(rom.read(2))[0]

                #Attempt to autodetect "holes" in the text data.
                next_ptr = PTR.unpack(rom.read(2))[0]
                expected_length = next_ptr - read_ptr
                if i >= tbl_length - 1:
                    expected_length = -1  #maximum length by far

                #Two different alias detects:

                #First, we try to see if this pointer matches another pointer
                #in the table.
                rom.seek(flat(bank["basebank"], bank["baseaddr"]))
                for j in range(i):
                    if read_ptr == PTR.unpack(rom.read(2))[0]:
                        #Aliased pointer!
                        csvrow.append("<ALIAS ROW 0x{0:x}>".format(j))
                        wikitext.append("|«ALIAS ROW 0x{0:x}»".format(j))
                        print(
                            "Pointer at 0x{0:x} fully aliases pointer 0x{1:x}".
                            format(
                                flat(bank["basebank"],
                                     bank["baseaddr"] + i * 2),
                                flat(bank["basebank"],
                                     bank["baseaddr"] + j * 2)))
                        break
                else:
                    #Second, we try to see if this pointer is in the middle of
                    #the last string.

                    #This alias detection breaks when the previous row uses the
                    #overflow code, so disable it if so.
                    if i > 0 and read_ptr < last_end - 1 and not redirected:
                        print(
                            "Pointer at 0x{0:x} partially aliases previous pointer"
                            .format(rom.tell() - 2))
                        csvrow.append(
                            "<ALIAS ROW 0x{0:x} INTO 0x{1:x}>".format(
                                last_nonaliasing_row, read_ptr - last_start))
                        wikitext.append(
                            "|«ALIAS ROW 0x{0:x} INTO 0x{1:x}»".format(
                                last_nonaliasing_row, read_ptr - last_start))
                        continue

                    read_length = 1
                    first_read = True
                    rom.seek(flat(bank["basebank"], read_ptr))

                    #Now we can initialize these...
                    redirected = False
                    old_loc = None

                    while (rom.tell() % 0x4000 < 0x3FFF or rom.tell() == flat(
                            bank["basebank"], bank["baseaddr"])):
                        next_chara = CHARA.unpack(rom.read(1))[0]
                        while (rom.tell() % 0x4000 < 0x3FFF or rom.tell() ==
                               flat(bank["basebank"], bank["baseaddr"])) and (
                                   read_length <= expected_length or first_read
                                   or redirected
                               ) and next_chara != 0xE0:  #E0 is end-of-string
                            if next_chara < 0xE0 and next_chara in charmap[
                                    1]:  #Control codes are the E0 block
                                string.append(charmap[1][next_chara])
                            elif next_chara in reverse_specials and specials[
                                    reverse_specials[next_chara]].redirect:
                                #Redirecting opcodes are transparently removed from the extracted text.
                                this_special = specials[
                                    reverse_specials[next_chara]]

                                if this_special.bts:
                                    read_length += this_special.bts
                                    fmt = "<" + ("", "B",
                                                 "H")[this_special.bts]
                                    word = struct.unpack(
                                        fmt, rom.read(this_special.bts))[0]

                                    if word < 0x4000 or word > 0x7FFF:
                                        #Overflowing into RAM is illegal - use the jump opcode.
                                        #Overflowing into ROM0 is technically not illegal, but
                                        #unorthodox enough that we're going to disallow it.
                                        string.append(
                                            format_literal(this_special.byte))
                                        string.append(
                                            format_literal(
                                                word & 0xFF, charmap[1]))
                                        string.append(
                                            format_literal(
                                                word >> 8, charmap[1]))
                                    else:
                                        #We need to do this right now to avoid breaking hole detection
                                        old_loc = rom.tell()
                                        read_length = rom.tell() - flat(
                                            bank["basebank"], read_ptr)

                                        rom.seek(flat(args.overflow_bank,
                                                      word))
                                        redirected = True
                                else:
                                    raise RuntimeError(
                                        "Invalid specials dictionary. Redirecting special character is missing bts."
                                    )
                            elif next_chara in reverse_specials:
                                #This must be the work of an 「ENEMY STAND」
                                this_special = specials[
                                    reverse_specials[next_chara]]

                                if this_special.bts:
                                    read_length += this_special.bts
                                    fmt = "<" + ("", "B",
                                                 "H")[this_special.bts]
                                    word = struct.unpack(
                                        fmt, rom.read(this_special.bts))[0]
                                    string.append(
                                        format_control_code(
                                            reverse_specials[next_chara],
                                            word))
                                else:
                                    string.append(
                                        format_control_code(
                                            reverse_specials[next_chara]))

                                if this_special.end:
                                    first_read = False
                                    break
                            #elif next_chara == 0xE2:
                            #Literal newline
                            #    string.append(u"\n")
                            else:
                                #Literal specials
                                string.append(format_literal(next_chara))

                            next_chara = CHARA.unpack(rom.read(1))[0]

                            #Explicitly stop updating read_length if the
                            #overflow opcode is used. Otherwise we'd think we
                            #read thousands or negative thousands of chars
                            if not redirected:
                                read_length = rom.tell() - flat(
                                    bank["basebank"], read_ptr)

                        #After the main extraction loop
                        if read_length >= expected_length:
                            break
                        else:
                            #Detect nulls (spaces) after the end of a string
                            #and append them to avoid creating a new pointer row
                            loc = rom.tell()
                            if redirected:
                                loc = old_loc

                            while CHARA.unpack(rom.read(1))[0] == charmap[0][
                                    " "] and read_length < expected_length:
                                string.append(" ")
                                loc += 1
                                read_length += 1

                            rom.seek(loc)  #cleanup

                            if read_length >= expected_length:
                                break
                            else:
                                #There's a hole in the ROM!
                                #Disassemble the next string.
                                print("Inaccessible data found at 0x{0:x}".
                                      format(flat(bank["basebank"], read_ptr)))

                                csvrow.append("".join(string))
                                wikitext.append("|" + "".join(string))
                                string = []

                                csvdata.append(csvrow)
                                csvrow = ["(No pointer)"]
                                wikitext.append("|-")
                                wikitext.append("|(No pointer)")

                                read_length += 1

                    csvrow.append("".join(string))
                    wikitext.append("|" + "".join(string))
                    string = []

                    #Store the actual end pointer for later use.
                    last_start = read_ptr
                    last_end = read_ptr + read_length
                    last_nonaliasing_row = i

                csvdata.append(csvrow)

            wikitext.append("|-")
            wikitext.append("|}")

            wikitext = "\n".join(wikitext)

            wikidir = os.path.join(args.input, bank["basedir"])
            wikipath = os.path.join(args.input, bank["legacy_filename"])
            csvpath = os.path.join(args.input, bank["filename"])

            install_path(wikidir)
            #with open(wikipath, "w+", encoding="utf-8") as bank_wikitext:
            #bank_wikitext.write(wikitext)

            with open(csvpath, "w+", encoding="utf-8") as bank_csvtext:
                csvwriter = csv.writer(bank_csvtext)

                for csvrow in csvdata:
                    csvwriter.writerow(csvrow)
コード例 #10
0
ファイル: commands.py プロジェクト: kmeisthax/telefang
def make_tbl(args):
    """Compile the entire script into a single RGBDS file, and then write it to
    disk.
    
    `args` is the arguments passed to the program as processed by `argparse`."""
    charmap = parse_charmap(args.charmap)
    banknames = parse_bank_names(args.banknames)

    overflow_bank_ids = []

    if args.language == "Japanese":
        metrics = None
    else:
        metrics_file = open(args.metrics, 'r')
        metrics = parse_font_metrics(metrics_file)

    #Some CSV files in the patch branch are merged together.
    #We'll parse these first and add their row data to each individual bank...
    for filename in args.filenames:
        with open(filename, "r", encoding="utf-8") as csvfile:
            rowdata = parse_csv(csvfile, args.language)

        split_rowdata = omnibus_bank_split(rowdata, banknames)

        for bankid, rowdata in split_rowdata.items():
            banknames[bankid]["textdata"] = rowdata

    overflow_strings = {}
    bank_sources = {}

    for h, bank in enumerate(banknames):
        if bank["filename"].startswith("script/overflow") or bank[
                "filename"].startswith("script\\overflow"):
            #Don't attempt to compile the overflow bank. That's a separate pass
            overflow_bank_ids.append(h)
            continue

        #If filenames are specified, don't bother because we can't do on-demand
        #compilation anyway and the makefile gets the path wrong

        print("Compiling " + bank["filename"])
        #Open and parse the data
        if "textdata" not in bank.keys():
            with open(os.path.join(args.input, bank["filename"]),
                      "r",
                      encoding='utf-8') as csvfile:
                bank["textdata"] = parse_csv(csvfile, args.language)

        bank_window_width = args.window_width
        if "window_width" in list(bank.keys()):
            bank_window_width = bank["window_width"]

        srcdata, overflow = generate_table_section(bank, bank["textdata"],
                                                   charmap, metrics,
                                                   bank_window_width)
        bank_sources[bank["objname"]] = srcdata
        overflow_strings.update(overflow)

    number_symbols_exported = 0
    overflow_sources = {}

    for overflow_bank_id in overflow_bank_ids:
        overflow_src = format_section("Overflow Bank %d" % overflow_bank_id,
                                      flat(args.overflow_bank, 0x4000))
        overflow_offset = 0

        current_symbol_id = 0
        break_after_adding = False
        for symname, packed_str in overflow_strings.items():
            if current_symbol_id < number_symbols_exported:
                current_symbol_id += 1
                continue

            if overflow_offset + len(packed_str) > 0x4000:
                number_symbols_exported = current_symbol_id
                break

            current_symbol_id += 1

            overflow_src += format_symbol(symname, True)
            overflow_src += format_directives(packed_str)
            overflow_offset += len(packed_str)
        else:
            break_after_adding = True

        overflow_sources[overflow_bank_id] = overflow_src

        if break_after_adding:
            break

    with open(args.output_filename, "wb") as srcfile:
        for bank_id, bank_source in overflow_sources.items():
            srcfile.write(bank_source.encode("utf-8"))

        for bank_id, bank_source in bank_sources.items():
            srcfile.write(bank_source.encode("utf-8"))