Esempio n. 1
0
def main():
    start = time()
    iron_horse.main()
    print(iron_horse.vacant_numeric_ids_formatted())

    generated_nml_path = os.path.join(generated_files_path, 'nml')
    if not os.path.exists(generated_nml_path):
        # reminder to self: inside main() to avoid modifying filesystem simply by importing module
        os.mkdir(generated_nml_path)
    grf_nml = codecs.open(os.path.join(generated_files_path, 'iron-horse.nml'),
                          'w', 'utf8')

    consists = iron_horse.get_consists_in_buy_menu_order()

    header_items = [
        'header', 'cargo_table', 'railtype_table', 'spriteset_templates',
        'tail_lights', 'haulage_bonus', 'intermodal_containers'
    ]
    for header_item in header_items:
        grf_nml.write(render_header_item_nml(header_item, consists))

    # multiprocessing was tried here and removed as it was empirically slower in testing (due to overhead of starting extra pythons probably)
    for consist in consists:
        grf_nml.write(render_consist_nml(consist))

    grf_nml.close()

    print(format((time() - start), '.2f') + 's')
Esempio n. 2
0
def main():
    print("[RENDER GRAPHICS] render_graphics.py")
    start = time()
    iron_horse.main()
    # get args passed by makefile
    makefile_args = utils.get_makefile_args(sys)
    # default to no mp, makes debugging easier (mp fails to pickle errors correctly)
    num_pool_workers = makefile_args.get('num_pool_workers', 0)
    if num_pool_workers == 0:
        use_multiprocessing = False
        # just print, no need for a coloured echo_message
        print('Multiprocessing disabled: (PW=0)')
    else:
        use_multiprocessing = True
        # just print, no need for a coloured echo_message
        print('Multiprocessing enabled: (PW=' + str(num_pool_workers) + ')')

    graphics_input_path = os.path.join(currentdir, 'src', 'graphics')
    graphics_output_path = os.path.join(iron_horse.generated_files_path,
                                        'graphics')
    if not os.path.exists(graphics_output_path):
        os.mkdir(graphics_output_path)

    hint_file = codecs.open(
        os.path.join(graphics_output_path,
                     '_graphics_files_here_are_generated.txt'), 'w', 'utf8')
    hint_file.write(
        "Don't edit the graphics files here.  They're generated by the build script. \n Edit sources in src/graphics."
    )
    hint_file.close()

    consists = iron_horse.get_consists_in_buy_menu_order()
    intermodal_container_gestalts = iron_horse.intermodal_containers.registered_container_gestalts

    if use_multiprocessing == False:
        for consist in consists:
            run_consist_pipelines(consist)
        for intermodal_container_gestalt in intermodal_container_gestalts:
            run_intermodal_container_pipelines(intermodal_container_gestalt)
    else:
        # Would this go faster if the pipelines from each consist were placed in MP pool, not just the consist?
        # probably potato / potato tbh
        pool = Pool(processes=num_pool_workers)
        pool.map(run_consist_pipelines, consists)
        pool.map(run_intermodal_container_pipelines,
                 intermodal_container_gestalts)
        pool.close()
        pool.join()

    report_sprites_complete(consists)

    for dir_name in ['tail_lights']:
        target_path = os.path.join(graphics_input_path, dir_name)
        dest_path = os.path.join(graphics_output_path, dir_name)
        if os.path.exists(dest_path):
            shutil.rmtree(dest_path)
        shutil.copytree(target_path, dest_path)

    print(format((time() - start), '.2f') + 's')
Esempio n. 3
0
def main():
    start = time()
    iron_horse.main()
    print(iron_horse.vacant_numeric_ids_formatted())

    generated_nml_path = os.path.join(generated_files_path, "nml")
    if not os.path.exists(generated_nml_path):
        # reminder to self: inside main() to avoid modifying filesystem simply by importing module
        os.mkdir(generated_nml_path)
    grf_nml = codecs.open(
        os.path.join(generated_files_path, "iron-horse.nml"), "w", "utf8"
    )

    spritelayer_cargos = iron_horse.get_spritelayer_cargos()
    consists = iron_horse.get_consists_in_buy_menu_order()

    header_items = [
        "header",
        "cargo_table",
        "railtype_table",
        "spriteset_templates",
        "spritelayer_cargo_empty_ss",
        "tail_lights",
        "recolour_sprites",
        "procedures_alternative_var_41",
        "procedures_alternative_var_random_bits",
        "procedures_box_car_with_opening_doors",
        "procedures_capacity",
        "procedures_cargo_subtypes",
        "procedures_colour_randomisation_strategies",
        "procedures_consist_specific_liveries",
        "procedures_haulage_bonus",
        "procedures_opening_doors",
        "procedures_restaurant_cars",
        "procedures_rulesets",
        "procedures_visible_cargo",
    ]
    for header_item in header_items:
        grf_nml.write(render_header_item_nml(header_item, consists))

    # multiprocessing was tried here and removed as it was empirically slower in testing (due to overhead of starting extra pythons probably)
    for spritelayercargo in spritelayer_cargos:
        grf_nml.write(render_item_nml(spritelayercargo))
    for consist in consists:
        grf_nml.write(render_item_nml(consist))

    grf_nml.close()

    print(format((time() - start), ".2f") + "s")
Esempio n. 4
0
def main():
    start = time()
    iron_horse.main()
    consists = iron_horse.get_consists_in_buy_menu_order()

    languages_with_generation = ('english', )
    for i in languages_with_generation:
        # compile strings to single lang file - english
        lang_template = templates[i + '.pylng']

        src_file = codecs.open(os.path.join(lang_src, i + '.lng'), 'r', 'utf8')
        dst_file = codecs.open(os.path.join(lang_dst, i + '.lng'), 'w', 'utf8')
        lang_content = src_file.read()
        lang_content = lang_content + \
            lang_template(consists=consists, makefile_args=makefile_args)
        dst_file.write(lang_content)
        dst_file.close()

    print(format((time() - start), '.2f') + 's')
Esempio n. 5
0
def main():
    start = time()
    iron_horse.main()
    consists = iron_horse.get_consists_in_buy_menu_order()

    languages_with_generation = ("english", )
    for i in languages_with_generation:
        # compile strings to single lang file - english
        lang_template = templates[i + ".pylng"]

        src_file = codecs.open(os.path.join(lang_src, i + ".lng"), "r", "utf8")
        dst_file = codecs.open(os.path.join(lang_dst, i + ".lng"), "w", "utf8")
        lang_content = src_file.read()
        lang_content = lang_content + lang_template(
            consists=consists,
            makefile_args=makefile_args,
            git_info=git_info,
            utils=utils,
        )
        dst_file.write(lang_content)
        dst_file.close()

    print(format((time() - start), ".2f") + "s")
Esempio n. 6
0
def main():
    print("[RENDER DOCS] render_docs.py")
    start = time()
    iron_horse.main()

    # default to no mp, makes debugging easier (mp fails to pickle errors correctly)
    num_pool_workers = makefile_args.get('num_pool_workers', 0)
    if num_pool_workers == 0:
        use_multiprocessing = False
        # just print, no need for a coloured echo_message
        print('Multiprocessing disabled: (PW=0)')
    else:
        use_multiprocessing = True
        #logger = multiprocessing.log_to_stderr()
        #logger.setLevel(25)
        # just print, no need for a coloured echo_message
        print('Multiprocessing enabled: (PW=' + str(num_pool_workers) + ')')

    # setting up a cache for compiled chameleon templates can significantly speed up template rendering
    chameleon_cache_path = os.path.join(currentdir,
                                        global_constants.chameleon_cache_dir)
    if not os.path.exists(chameleon_cache_path):
        os.mkdir(chameleon_cache_path)
    os.environ['CHAMELEON_CACHE'] = chameleon_cache_path

    docs_output_path = os.path.join(currentdir, 'docs')
    if os.path.exists(docs_output_path):
        shutil.rmtree(docs_output_path)
    os.mkdir(docs_output_path)

    shutil.copy(os.path.join(docs_src, 'index.html'), docs_output_path)

    static_dir_src = os.path.join(docs_src, 'html', 'static')
    static_dir_dst = os.path.join(docs_output_path, 'html', 'static')
    shutil.copytree(static_dir_src, static_dir_dst)

    # import iron_horse inside main() as it's so slow to import, and should only be imported explicitly
    consists = iron_horse.get_consists_in_buy_menu_order()
    # default sort for docs is by intro date
    consists = sorted(consists, key=lambda consist: consist.intro_date)
    dates = sorted([i.intro_date for i in consists])
    metadata['dates'] = (dates[0], dates[-1])

    # render standard docs from a list
    html_docs = [
        'trains', 'tech_tree_table', 'code_reference', 'get_started',
        'translations'
    ]
    txt_docs = ['license', 'readme']
    markdown_docs = ['changelog']
    graph_docs = ['tech_tree_linkgraph']

    render_docs(html_docs, 'html', docs_output_path, iron_horse, consists)
    render_docs(txt_docs, 'txt', docs_output_path, iron_horse, consists)
    # just render the markdown docs twice to get txt and html versions, simples no?
    render_docs(markdown_docs, 'txt', docs_output_path, iron_horse, consists)
    render_docs(markdown_docs,
                'html',
                docs_output_path,
                iron_horse,
                consists,
                use_markdown=True)
    render_docs(graph_docs, 'dotall', docs_output_path, iron_horse, consists)

    # process images for use in docs
    # yes, I really did bother using a pool to save at best a couple of seconds, because FML :)
    slow_start = time()
    if use_multiprocessing == False:
        for consist in consists:
            render_docs_images(consist)
    else:
        # Would this go faster if the pipelines from each consist were placed in MP pool, not just the consist?
        # probably potato / potato tbh
        pool = multiprocessing.Pool(processes=num_pool_workers)
        pool.map(render_docs_images, consists)
        pool.close()
        pool.join()
    print('render_docs_images', time() - slow_start)

    print(format((time() - start), '.2f') + 's')
Esempio n. 7
0
def main():
    print("[RENDER DOCS] render_docs.py")
    start = time()
    iron_horse.main()

    # default to no mp, makes debugging easier (mp fails to pickle errors correctly)
    num_pool_workers = makefile_args.get("num_pool_workers", 0)
    if num_pool_workers == 0:
        use_multiprocessing = False
        # just print, no need for a coloured echo_message
        print("Multiprocessing disabled: (PW=0)")
    else:
        use_multiprocessing = True
        # logger = multiprocessing.log_to_stderr()
        # logger.setLevel(25)
        # just print, no need for a coloured echo_message
        print("Multiprocessing enabled: (PW=" + str(num_pool_workers) + ")")

    # setting up a cache for compiled chameleon templates can significantly speed up template rendering
    chameleon_cache_path = os.path.join(currentdir,
                                        global_constants.chameleon_cache_dir)
    if not os.path.exists(chameleon_cache_path):
        os.mkdir(chameleon_cache_path)
    os.environ["CHAMELEON_CACHE"] = chameleon_cache_path

    docs_output_path = os.path.join(currentdir, "docs")
    if os.path.exists(docs_output_path):
        shutil.rmtree(docs_output_path)
    os.mkdir(docs_output_path)

    shutil.copy(os.path.join(docs_src, "index.html"), docs_output_path)

    static_dir_src = os.path.join(docs_src, "html", "static")
    static_dir_dst = os.path.join(docs_output_path, "html", "static")
    shutil.copytree(static_dir_src, static_dir_dst)

    # import iron_horse inside main() as it's so slow to import, and should only be imported explicitly
    consists = iron_horse.get_consists_in_buy_menu_order()
    # default sort for docs is by intro date
    consists = sorted(consists, key=lambda consist: consist.intro_date)
    dates = sorted([i.intro_date for i in consists])
    metadata["dates"] = (dates[0], dates[-1])

    # render standard docs from a list
    html_docs = [
        "code_reference",
        "get_started",
        "translations",
        "tech_tree_table_blue",
        "tech_tree_table_red",
        "tech_tree_table_blue_simplified",
        "tech_tree_table_red_simplified",
        "train_whack",
        "trains",
    ]
    txt_docs = ["readme"]
    license_docs = ["license"]
    markdown_docs = ["changelog"]
    graph_docs = ["tech_tree_linkgraph"]

    render_docs(html_docs, "html", docs_output_path, iron_horse, consists)
    render_docs(txt_docs, "txt", docs_output_path, iron_horse, consists)
    render_docs(
        license_docs,
        "txt",
        docs_output_path,
        iron_horse,
        consists,
        source_is_repo_root=True,
    )
    # just render the markdown docs twice to get txt and html versions, simples no?
    render_docs(markdown_docs, "txt", docs_output_path, iron_horse, consists)
    render_docs(markdown_docs,
                "html",
                docs_output_path,
                iron_horse,
                consists,
                use_markdown=True)
    render_docs(graph_docs, "dotall", docs_output_path, iron_horse, consists)

    # render vehicle details
    for roster in iron_horse.registered_rosters:
        for consist in roster.engine_consists:
            consist.assert_description_foamer_facts()
            render_docs_vehicle_details(consist, docs_output_path, consists)

    # process images for use in docs
    # yes, I really did bother using a pool to save at best a couple of seconds, because FML :)
    slow_start = time()
    if use_multiprocessing == False:
        for consist in consists:
            render_docs_images(consist)
    else:
        # Would this go faster if the pipelines from each consist were placed in MP pool, not just the consist?
        # probably potato / potato tbh
        pool = multiprocessing.Pool(processes=num_pool_workers)
        pool.map(render_docs_images, consists)
        pool.close()
        pool.join()
    print("render_docs_images", time() - slow_start)

    print(format((time() - start), ".2f") + "s")
Esempio n. 8
0
def main():
    print("[RENDER GRAPHICS] render_graphics.py")
    start = time()
    iron_horse.main()
    # get args passed by makefile
    makefile_args = utils.get_makefile_args(sys)
    # default to no mp, makes debugging easier (mp fails to pickle errors correctly)
    num_pool_workers = makefile_args.get("num_pool_workers", 0)
    if num_pool_workers == 0:
        use_multiprocessing = False
        # just print, no need for a coloured echo_message
        print("Multiprocessing disabled: (PW=0)")
    else:
        use_multiprocessing = True
        # just print, no need for a coloured echo_message
        print("Multiprocessing enabled: (PW=" + str(num_pool_workers) + ")")

    graphics_input_path = os.path.join(currentdir, "src", "graphics")
    graphics_output_path = os.path.join(iron_horse.generated_files_path,
                                        "graphics")
    if not os.path.exists(graphics_output_path):
        os.mkdir(graphics_output_path)

    hint_file = codecs.open(
        os.path.join(graphics_output_path,
                     "_graphics_files_here_are_generated.txt"),
        "w",
        "utf8",
    )
    hint_file.write(
        "Don't edit the graphics files here.  They're generated by the build script. \n Edit sources in src/graphics."
    )
    hint_file.close()

    consists = iron_horse.get_consists_in_buy_menu_order()

    # get a list of 2-tuple pairs for spritelayer cargos + cargo sets
    # a list format is wanted for convenience with graphics multiprocessing pool
    # the parent spritelayer_cargo object must be passed with the cargo set as cargo sets have render-time properties which change according to context
    # but cargo_sets are global and reused across spritelayer_cargos, so they can't just store a single reference to their spritelayer_cargo parent
    spritelayer_cargo_set_pairs = []
    for spritelayer_cargo in iron_horse.registered_spritelayer_cargos:
        for cargo_set in spritelayer_cargo.cargo_sets:
            spritelayer_cargo_set_pairs.append((spritelayer_cargo, cargo_set))

    if use_multiprocessing == False:
        for spritelayer_cargo_set_pair in spritelayer_cargo_set_pairs:
            run_spritelayer_cargo_set_pipelines(spritelayer_cargo_set_pair)
        for consist in consists:
            run_consist_pipelines(consist)
    else:
        # Would this go faster if the pipelines from each consist were placed in MP pool, not just the consist?
        # probably potato / potato tbh
        pool = Pool(processes=num_pool_workers)
        pool.map(run_spritelayer_cargo_set_pipelines,
                 spritelayer_cargo_set_pairs)
        pool.close()
        pool.join()
        # wait for first pool job to finish before starting
        pool = Pool(processes=num_pool_workers)
        pool.map(run_consist_pipelines, consists)
        pool.close()
        pool.join()

    report_sprites_complete(consists)

    for dir_name in ["tail_lights"]:
        target_path = os.path.join(graphics_input_path, dir_name)
        dest_path = os.path.join(graphics_output_path, dir_name)
        if os.path.exists(dest_path):
            shutil.rmtree(dest_path)
        shutil.copytree(target_path, dest_path)

    print(format((time() - start), ".2f") + "s")
Esempio n. 9
0
import os.path
currentdir = os.curdir

import sys
sys.path.append(os.path.join('src'))  # add to the module search path
import shutil

import iron_horse
import global_constants
from PIL import Image, ImageDraw

consists = iron_horse.get_consists_in_buy_menu_order()
input_graphics_dir = os.path.join('src', 'graphics')
output_graphics_dir = os.path.join('src', 'graphics_migrated')
base_template_spritesheet = Image.open(
    os.path.join('graphics_sources', 'base_10_8_spritesheet.png'))
spriterow_height = 30
DOS_PALETTE = Image.open('palette_key.png').palette

col_insertion_points = ([60, 4], [76, 2], [107, 1], [156, 2], [188, 4],
                        [200, 2], [235, 1], [284, 2], [316, 1])


def get_legacy_bounding_boxes(y=0):
    return [[60, y, 8, 25], [76, y, 22, 22], [107, y, 32, 15],
            [156, y, 22, 22], [188, y, 8, 25], [204, y, 22, 22],
            [235, y, 32, 15], [284, y, 22, 22], [316, y, 64, 15]]


def recomp_legacy_spriterows(row_count, spriterow, migrated_spritesheet):
    migrated_spriterow = base_template_spritesheet.crop((0, 10, 400, 40))