Esempio n. 1
0
def main():
	moddirs = (g_emf_path,)
	parser = ck2parser.SimpleParser(*moddirs)
	loc = ck2parser.get_localisation(moddirs=moddirs)

	global g_traits
	g_traits = {}

	for _, tree in parser.parse_files('common/traits/*.txt'):
		for n, v in tree:
			if v.has_pair('leader', 'yes'):
				archetype = Trait.LEADER
			elif v.has_pair('personality', 'yes'):
				archetype = Trait.PERSONALITY
			elif v.has_pair('childhood', 'yes'):
				archetype = Trait.CHILDHOOD
			elif v.has_pair('lifestyle', 'yes'):
				archetype = Trait.LIFESTYLE
			elif v.has_pair('education', 'yes'):
				archetype = Trait.EDUCATION
			else:
				archetype = Trait.GENERAL
			g_traits[n.val] = Trait(n.val, archetype)

	with g_core_effect_path.open('w', encoding='cp1252', newline='\n') as f:
		print_file_header(f, 'ck2.scripted_effects')
		print_effect_remove_all_traits(f, loc)

	return 0
Esempio n. 2
0
def keys_overridden_in_mod(basedir, *moddirs):
    base_keys = set(get_localisation(basedir=basedir))
    seen = set()
    result = set()
    for path in files('localisation/*.csv', moddirs=moddirs, basedir=basedir):
        for key, *_ in csv_rows(path):
            if key not in seen:
                seen.add(key)
                if basedir not in path.parents and key in base_keys:
                    result.add(key)
    return result
Esempio n. 3
0
def keys_overridden_in_mod(basedir, *moddirs):
    base_keys = set(get_localisation(basedir=basedir))
    seen = set()
    result = set()
    for path in files('localisation/*.csv', moddirs=moddirs, basedir=basedir):
        for key, *_ in csv_rows(path):
            if key not in seen:
                seen.add(key)
                if basedir not in path.parents and key in base_keys:
                    result.add(key)
    return result
Esempio n. 4
0
def main():
    moddirs = (g_emf_path, )
    parser = ck2parser.SimpleParser(*moddirs)
    loc = ck2parser.get_localisation(moddirs=moddirs)
    for _, tree in parser.parse_files('common/traits/*.txt'):
        for n, v in tree:
            for n2, v2 in v:
                try:
                    if n2.val == 'inherit_chance':
                        loc_val = loc.get(n.val)
                        loc_val = ' # ' + loc_val if loc_val else ''
                        print('''\
if = {{
	limit = {{ trait = {0} }}{2}
	random = {{ chance = {1} PREV = {{ add_trait = {0} }} }}
}}'''.format(n.val, v2.val, loc_val))
                except:
                    pass
    return 0
Esempio n. 5
0
def main():
    moddirs = (g_emf_path, )
    parser = ck2parser.SimpleParser(*moddirs)
    loc = ck2parser.get_localisation(moddirs=moddirs)

    global g_traits
    g_traits = load_traits(parser, loc)

    with g_core_effect_path.open('w', encoding='cp1252', newline='\n') as f:
        print_file_header(f, 'ck2.scripted_effects')
        print_effect_remove_all_traits(f)
        print_effect_remove_leader_traits(f)
        print_effect_remove_personality_traits(f)
        print_effect_remove_symptom_traits(f)
        print_effect_remove_illness_traits(f)
        print_effect_remove_epidemic_traits(f)
        print_effect_remove_incapacitating_traits(f)

    return 0
Esempio n. 6
0
def main():
	for i, md in enumerate(mod_dirs):
		loc = ck2parser.get_localisation(moddirs=md)
		parser = ck2parser.SimpleParser(*md)
		cultures = load_cultures(parser, loc)
		new_loc = {}

		with trigger_paths[i].open('w', encoding='cp1252', newline='\n') as f:
			print_header(f, 'ck2.scripted_triggers')
			print_trigger_uses_founder_named_dynasties(f, cultures)

		with cloc_paths[i].open('w', encoding='cp1252', newline='\n') as f:
			print_header(f, 'ck2.custom_loc')
			print_cloc_GetFromDynastyPrefix(f, cultures, new_loc)
			print_cloc_GetFromDynastySuffix(f, cultures, new_loc)

		with loc_paths[i].open('w', encoding='cp1252', newline='\n') as f:
			print('#CODE;ENGLISH;FRENCH;GERMAN;;SPANISH;;;;;;;;;x', file=f)
			print('#### WARNING! THIS FILE IS CODE-GENERATED. DO NOT MANUALLY MODIFY! ####', file=f)
			for k in sorted(new_loc):
				print('{};{};;;;;;;;;;;;;x'.format(k, new_loc[k]), file=f)

	return 0
Esempio n. 7
0
def main():
    # fill titles before calling
    def should_override(key):
        title_match = re.match(r'[ekdcb]_((?!_adj($|_)).)*', key)
        if title_match is not None:
            title = title_match.group()
            return (title in titles and title[0] != 'b' and
                    re.fullmatch(r'c_((?!_adj($|_)).)*', key) is None)
        if key in keys_to_override:
            return True
        noble_match = re.fullmatch(noble_regex, key)
        return noble_match is not None

    def recurse(tree):
        for n, v in tree:
            if is_codename(n.val):
                titles.add(n.val)
                items = []
                for n2, v2 in v:
                    if n2.val in lt_keys:
                        if isinstance(v2, Obj):
                            value = ' '.join(s.val for s in v2)
                        else:
                            value = v2.val
                        items.append((n2.val, value))
                yield n.val, items
                yield from recurse(v)

    with tempfile.TemporaryDirectory() as td:
        parser = SimpleParser(strict=False)
        parser.moddirs = [swmhpath]
        prov_id, prov_title = get_province_id(parser)
        max_provs = get_max_provinces(parser)
        cultures, cult_groups = get_cultures(parser)
        religions, rel_groups = get_religions(parser)
        dynamics = get_dynamics(parser, cultures, prov_id)
        vanilla = get_localisation()
        swmh_loc = get_localisation(basedir=swmhpath)
        localisation = get_localisation([swmhpath])
        keys_to_override, keys_to_add, ul_titles = get_more_keys_to_override(
            parser, localisation, max_provs)
        keys_to_override.update(cultures, cult_groups, religions, rel_groups)
        overridden_keys = set()
        titles = set()
        prev_loc = collections.defaultdict(str)
        prev_lt = collections.defaultdict(str)

        templates = rootpath / 'sed2/templates'
        templates_sed2 = templates / 'SED2'
        for path in files('localisation/*.csv', basedir=templates_sed2):
            prev_loc.update({row[0].strip(): row[1].strip()
                             for row in csv_rows(path)})
        for path in files('common/landed_titles/*.csv',
                          basedir=templates_sed2):
            prev_lt.update({(row[0].strip(), row[1].strip()): row[2].strip()
                            for row in csv_rows(path)})

        gov_prefixes, gov_names = get_gov_locs(parser)
        keys_to_override.update(gov_names)
        noble_regex = make_noble_title_regex(cultures + cult_groups,
            religions + rel_groups, ul_titles, gov_prefixes)

        templates_t = pathlib.Path(td)
        templates_t_sed2 = templates_t / 'SED2'
        (templates_t_sed2 / 'localisation').mkdir(parents=True)
        (templates_t_sed2 / 'common/landed_titles').mkdir(parents=True)
        (templates_t / 'SED2+EMF/localisation').mkdir(parents=True)
        swmh_files = set()
        for inpath in files('localisation/*.csv', basedir=swmhpath):
            swmh_files.add(inpath.name)
            outpath = templates_t_sed2 / inpath.relative_to(swmhpath)
            out_rows = [
                ['#CODE', 'SED', 'SWMH', 'OTHER', 'VANILLA']]
            col_width = [0, 8]
            for row in csv_rows(inpath, comments=True):
                if not row[0].startswith('#'):
                    overridden_keys.add(row[0])
                if not row[0].startswith('b_'):
                    if row[0].startswith('#'):
                        row = [','.join(row), '']
                    else:
                        col_width[0] = max(len(row[0]), col_width[0])
                    out_row = [row[0],
                               prev_loc[row[0]],
                               row[1],
                               ','.join(dynamics[row[0]]),
                               vanilla.get(row[0], '')]
                    out_rows.append(out_row)
            for i, out_row in enumerate(out_rows):
                if not out_row[0].startswith('#') or i == 0:
                    for col, width in enumerate(col_width):
                        out_row[col] = out_row[col].ljust(width)
            with outpath.open('w', newline='', encoding='cp1252') as csvfile:
                csv.writer(csvfile, dialect='ckii').writerows(out_rows)

        lt_keys_not_cultures = [
            'title', 'title_female', 'foa', 'title_prefix', 'short_name',
            'name_tier', 'location_ruler_title', 'dynasty_title_names',
            'male_names']
        lt_keys = lt_keys_not_cultures + cultures

        for inpath, tree in parser.parse_files('common/landed_titles/*.txt',
                                               memcache=True):
            out_rows = [['#TITLE', 'KEY', 'SED2', 'SWMH']]
            col_width = [0, 0, 8]
            for title, pairs in recurse(tree):
                # here disabled for now: preservation of modifiers added to
                # template and not found in landed_titles (slow)
                # for (t, k), v in prev_lt.items():
                #     if t == title and not any(k == k2 for k2, _ in pairs):
                #         pairs.append((k, ''))
                # also disabled: barony stuff
                if not title.startswith('b_'):
                    for key, value in sorted(
                        pairs, key=lambda p: lt_keys.index(p[0])):
                        out_row = [title, key, prev_lt[title, key], value]
                        # don't allow changes to anything but dynamic names...
                        # just for now
                        if key in lt_keys_not_cultures:
                            out_row[2] = out_row[3]
                        out_rows.append(out_row)
                        for c in range(2):
                            col_width[c] = max(len(out_row[c]), col_width[c])
            for out_row in out_rows:
                for col, width in enumerate(col_width):
                    out_row[col] = out_row[col].ljust(width)
            outpath = (templates_t_sed2 / inpath.with_suffix('.csv').
                       relative_to(inpath.parents[2]))
            with outpath.open('w', newline='', encoding='cp1252') as csvfile:
                csv.writer(csvfile, dialect='ckii').writerows(out_rows)
            parser.flush(inpath)

        override_rows = [
            ['#CODE', 'SED', 'SWMH', 'OTHER', 'VANILLA']]
        col_width = [0, 8]
        for key in keys_to_add:
            out_row = [key, prev_loc[key], '', '', key]
            override_rows.append(out_row)
            col_width[0] = max(len(key), col_width[0])
        for path in files('localisation/*.csv'):
            if path.name not in swmh_files:
                override_rows.append(['#' + path.name, '', '', '', ''])
                for row in csv_rows(path):
                    key, val = row[:2]
                    if should_override(key) and key not in overridden_keys:
                        out_row = [key,
                                   prev_loc[key],
                                   '',
                                   ','.join(dynamics[key]),
                                   val]
                        override_rows.append(out_row)
                        overridden_keys.add(key)
                        col_width[0] = max(len(key), col_width[0])
        for i, out_row in enumerate(override_rows):
            if not out_row[0].startswith('#') or i == 0:
                for col, width in enumerate(col_width):
                    out_row[col] = out_row[col].ljust(width)
        outpath = templates_t_sed2 / 'localisation' / 'A_SED.csv'
        with outpath.open('w', newline='', encoding='cp1252') as csvfile:
            csv.writer(csvfile, dialect='ckii').writerows(override_rows)

        # EMF
        parser.moddirs.extend((emfpath, emfswmhpath))
        overridden_keys = set()
        loc_emf = get_localisation(parser.moddirs)
        cultures, cult_groups = get_cultures(parser)
        religions, rel_groups = get_religions(parser)
        keys_to_override, keys_to_add_emf, ul_titles = (
            get_more_keys_to_override(parser, loc_emf, max_provs))
        keys_to_override.update(cultures, cult_groups, religions, rel_groups)
        keys_to_override.update(keys_overridden_in_mod(*parser.moddirs))
        keys_to_add_emf = [x for x in keys_to_add_emf if x not in keys_to_add]
        prev_loc_emf = collections.defaultdict(str)
        inpath = templates / 'SED2+EMF/localisation/0_SED+EMF.csv'
        prev_loc_emf.update({row[0].strip(): row[1].strip()
                             for row in csv_rows(inpath)})
        gov_prefixes, gov_names = get_gov_locs(parser)
        keys_to_override.update(gov_names)
        noble_regex = make_noble_title_regex(cultures + cult_groups,
            religions + rel_groups, ul_titles, gov_prefixes)
        for _, tree in parser.parse_files('common/landed_titles/*.txt',
                                          emfpath, [emfswmhpath]):
            # iterate for side effects (add to titles)
            for _ in recurse(tree):
                pass
        emf_rows = [
            ['#CODE', 'SED+EMF', 'EMF', 'SWMH', 'OTHER', 'SED', 'VANILLA']]
        col_width = [0, 8]
        for key in keys_to_add_emf:
            out_row = [key, prev_loc_emf[key], key, '', '', '', '']
            emf_rows.append(out_row)
            col_width[0] = max(len(key), col_width[0])
        for path in files('localisation/*.csv', [emfswmhpath], basedir=emfpath):
            emf_rows.append(['#' + path.name, '', '', '', '', ''])
            for row in csv_rows(path):
                key, val = row[:2]
                if should_override(key) and key not in overridden_keys:
                    out_row = [key,
                               prev_loc_emf[key],
                               val,
                               swmh_loc.get(key, ''),
                               ','.join(dynamics[key]),
                               prev_loc[key],
                               vanilla.get(key, '')]
                    emf_rows.append(out_row)
                    overridden_keys.add(key)
                    col_width[0] = max(len(key), col_width[0])
            for i, out_row in enumerate(emf_rows):
                if not out_row[0].startswith('#') or i == 0:
                    for col, width in enumerate(col_width):
                        out_row[col] = out_row[col].ljust(width)
        outpath = templates_t / inpath.relative_to(templates)
        with outpath.open('w', newline='', encoding='cp1252') as csvfile:
            csv.writer(csvfile, dialect='ckii').writerows(emf_rows)

        while templates.exists():
            print('Removing old templates...')
            shutil.rmtree(str(templates), ignore_errors=True)
        shutil.copytree(str(templates_t), str(templates))
Esempio n. 8
0
def main():
    global g_religions, g_rg_religions_map

    # grab a list of religions & a map of religion_groups to their religions from the religions folder
    g_religions = []
    g_rg_religions_map = defaultdict(list)

    for _, tree in ck2parser.SimpleParser(emf_path).parse_files(
            'common/religions/*.txt'):
        for n, v in tree:
            if n.val.endswith('_trigger'):
                continue
            for n2, v2 in v:
                if isinstance(v2, ck2parser.Obj) and n2.val not in [
                        'color', 'male_names', 'female_names'
                ]:
                    if v2.has_pair('secret_religion', 'no'):
                        continue
                    g_religions.append(n2.val)
                    g_rg_religions_map[n.val].append(n2.val)

    # remove the old code-generated SR localisation file & then load all of localisation for vanilla & EMF
    if sr_localisation_path.exists():
        sr_localisation_path.unlink()
    loc = ck2parser.get_localisation(moddirs=(emf_path, ))
    new_loc = {}

    # create SR community event modifiers
    with sr_modifier_path.open('w', encoding='cp1252', newline='\n') as f:
        print_file_header(f, 'ck2.event_modifiers')
        print_modifiers_secret_community(f, loc, new_loc)

    # generate SR scripted triggers
    with sr_trigger_path.open('w', encoding='cp1252', newline='\n') as f:
        print_file_header(f, 'ck2.scripted_triggers')
        print_trigger_has_any_religion_char_flag(f)
        print_trigger_is_in_PREVs_interesting_society(f)
        print_trigger_has_any_char_old_religion(f)
        print_trigger_has_secret_community_of_ROOT(f)
        print_trigger_can_have_new_secret_community_of_FROM(f)
        print_trigger_has_not_religion_or_community_of_ROOT_sr(f)
        print_triggers_event_desc(f)
        print_triggers_does_cult_need_DLC(f)
        print_trigger_old_religion_is_liege_sr(f)

    # generate SR scripted effects
    with sr_effect_path.open('w', encoding='cp1252', newline='\n') as f:
        print_file_header(f, 'ck2.scripted_effects')
        print_effect_set_sr_and_clr_religion_char_flag(f)
        print_effect_add_religion_char_flag(f)
        print_effect_clr_religion_char_flag(f)
        print_effect_event_target_old_religion_from_flag(f)
        print_effect_flip_secret_community_provinces(f)
        print_effect_flip_secret_community_provinces_of_PREV(f)
        print_effect_flip_secret_community_provinces_to_my_religion(f)
        print_effect_set_adopt_faith_flag_of_my_cult_on_ROOT(f)
        print_effect_adopt_faith_from_flag(f)
        print_effect_clr_adopt_faith_flag(f)
        print_effect_set_prov_flip_char_flag_of_my_cult_on_ROOT(f)
        print_effect_flip_secret_community_provinces_by_prov_flip_char_flag(f)
        print_effect_add_secret_community_to_target_province(f)
        print_effect_ai_try_to_join_society(f)

    # generate "secretly convert to this holy site's religion" decisions
    with sr_holy_site_decisions_path.open('w', encoding='cp1252',
                                          newline='\n') as f:
        print_file_header(f, 'ck2.decisions')
        print_decisions_secretly_convert_to_holy_site(f, loc, new_loc)

    with sr_custom_loc_path.open('w', encoding='cp1252', newline='\n') as f:
        print_file_header(f, 'ck2.custom_loc')
        print_custom_loc_GetTrueReligionAdherent(f, loc, new_loc)
        print_custom_loc_GetReligionAdherent(f, loc, new_loc)

    # write default SR localisation
    generate_default_sr_localisation(loc, new_loc)

    with sr_localisation_path.open('w', encoding='cp1252', newline='\n') as f:
        print('#CODE;ENGLISH;FRENCH;GERMAN;;SPANISH;;;;;;;;;x', file=f)
        for k in sorted(new_loc):
            print('{};{};;;;;;;;;;;;;x'.format(k, new_loc[k]), file=f)

    return 0
Esempio n. 9
0
def main():
    global g_religions, g_rg_religions_map, g_relhead_title_map

    # grab a list of religions & a map of religion_groups to their religions from the religions folder
    g_religions = []
    g_rg_religions_map = defaultdict(list)
    g_relhead_title_map = {}
    parser = ck2parser.SimpleParser(emf_path)

    for _, tree in parser.parse_files('common/religions/*.txt'):
        for n, v in tree:
            if n.val.endswith('_trigger'):
                continue
            for n2, v2 in v:
                if isinstance(v2, ck2parser.Obj) and n2.val not in [
                        'color', 'male_names', 'female_names',
                        'interface_skin', 'alternate_start'
                ]:
                    if v2.has_pair('secret_religion', 'no'):
                        continue
                    g_religions.append(n2.val)
                    g_rg_religions_map[n.val].append(n2.val)

    for _, tree in parser.parse_files('common/landed_titles/*.txt'):
        for n, v in tree:
            landless = False
            reformed = False
            if v.has_pair('landless', 'yes'):
                landless = True
            if re.search(r'_(pagan_)?reformed$', n.val):
                reformed = True
            for n2, v2 in v:
                if n2.val == 'controls_religion':
                    religion = v2.val
                    if religion not in g_religions and religion != 'hip_religion':
                        print("religion " + religion + " not recognized",
                              file=sys.stderr)
                    if religion in g_relhead_title_map:
                        g_relhead_title_map[religion].landless = landless
                    else:
                        g_relhead_title_map[religion] = RelHeadTitle(
                            n.val, religion, landless, reformed)

    assert g_rg_religions_map.get('pagan_group')

    # remove the old code-generated SR localisation file & then load all of localisation for vanilla & EMF
    if sr_localisation_path.exists():
        sr_localisation_path.unlink()
    loc = ck2parser.get_localisation(moddirs=(emf_path, ))
    new_loc = {}

    # create SR community event modifiers
    with sr_modifier_path.open('w', encoding='cp1252', newline='\n') as f:
        print_file_header(f, 'ck2.event_modifiers')
        print_modifiers_secret_community(f, loc, new_loc)

    # generate SR scripted triggers
    with sr_trigger_path.open('w', encoding='cp1252', newline='\n') as f:
        print_file_header(f, 'ck2.scripted_triggers')
        print_trigger_has_any_religion_char_flag(f)
        print_trigger_is_in_PREVs_interesting_society(f)
        print_trigger_has_any_char_old_religion(f)
        print_trigger_has_secret_community_of_ROOT(f)
        print_trigger_can_have_new_secret_community_of_FROM(f)
        print_trigger_has_not_religion_or_community_of_ROOT_sr(f)
        print_triggers_event_desc(f)
        print_triggers_does_cult_need_DLC(f)
        print_trigger_old_religion_is_liege_sr(f)

    # generate SR scripted effects
    with sr_effect_path.open('w', encoding='cp1252', newline='\n') as f:
        print_file_header(f, 'ck2.scripted_effects')
        print_effect_set_sr_and_clr_religion_char_flag(f)
        print_effect_add_religion_char_flag(f)
        print_effect_clr_religion_char_flag(f)
        print_effect_event_target_old_religion_from_flag(f)
        print_effect_flip_secret_community_provinces(f)
        print_effect_flip_secret_community_provinces_of_PREV(f)
        print_effect_flip_secret_community_provinces_to_my_religion(f)
        print_effect_set_adopt_faith_flag_of_my_cult_on_ROOT(f)
        print_effect_adopt_faith_from_flag(f)
        print_effect_clr_adopt_faith_flag(f)
        print_effect_set_prov_flip_char_flag_of_my_cult_on_ROOT(f)
        print_effect_flip_secret_community_provinces_by_prov_flip_char_flag(f)
        print_effect_add_secret_community_to_target_province(f)
        print_effect_ai_try_to_join_society(f)

    # generate religion scripted effects
    with rel_effect_path.open('w', encoding='cp1252', newline='\n') as f:
        print_file_header(f, 'ck2.scripted_effects')
        print_effect_calc_realm_province_religion_breakdown_of_THIS_for_ROOT(
            f, loc)

    # generate bloodline scripted triggers
    with bl_trigger_path.open('w', encoding='cp1252', newline='\n') as f:
        print_file_header(f, 'ck2.scripted_triggers')
        print_trigger_religion_same_as_bloodline_founder(f)

    # generate bloodline scripted effects
    with bl_effect_path.open('w', encoding='cp1252', newline='\n') as f:
        print_file_header(f, 'ck2.scripted_effects')
        print_effect_set_bloodline_founder_religion_flag(f)

    # generate alt. start scripted effects
    with as_effect_path.open('w', encoding='cp1252', newline='\n') as f:
        print_file_header(f, 'ck2.scripted_effects')
        print_effect_randomize_relhead_title_names(f)
        print_effect_activate_randomized_relhead_titles(f, loc)

    # generate "secretly convert to this holy site's religion" decisions
    with sr_holy_site_decisions_path.open('w', encoding='cp1252',
                                          newline='\n') as f:
        print_file_header(f, 'ck2.decisions')
        print_decisions_secretly_convert_to_holy_site(f, loc, new_loc)

    with sr_custom_loc_path.open('w', encoding='cp1252', newline='\n') as f:
        print_file_header(f, 'ck2.custom_loc')
        print_custom_loc_GetTrueReligionAdjective(f, loc, new_loc)
        print_custom_loc_GetTrueReligionAdherent(f, loc, new_loc)
        print_custom_loc_GetReligionAdherent(f, loc, new_loc)

    # write default SR localisation
    generate_default_sr_localisation(loc, new_loc)

    with sr_localisation_path.open('w', encoding='cp1252', newline='\n') as f:
        print('#CODE;ENGLISH;FRENCH;GERMAN;;SPANISH;;;;;;;;;x', file=f)
        for k in sorted(new_loc):
            print('{};{};;;;;;;;;;;;;x'.format(k, new_loc[k]), file=f)

    return 0
Esempio n. 10
0
def main():
    # fill titles before calling
    def should_override(key):
        title_match = re.match(r'[ekdcb]_((?!_adj($|_)).)*', key)
        if title_match is not None:
            title = title_match.group()
            return (title in titles and title[0] != 'b'
                    and re.fullmatch(r'c_((?!_adj($|_)).)*', key) is None)
        if key in keys_to_override:
            return True
        noble_match = re.fullmatch(noble_regex, key)
        return noble_match is not None

    def recurse(tree):
        for n, v in tree:
            if is_codename(n.val):
                titles.add(n.val)
                items = []
                for n2, v2 in v:
                    if n2.val in lt_keys:
                        if isinstance(v2, Obj):
                            value = ' '.join(s.val for s in v2)
                        else:
                            value = v2.val
                        items.append((n2.val, value))
                yield n.val, items
                yield from recurse(v)

    with tempfile.TemporaryDirectory() as td:
        parser = SimpleParser(strict=False)
        parser.moddirs = [swmhpath]
        prov_id, prov_title = get_province_id(parser)
        max_provs = get_max_provinces(parser)
        cultures, cult_groups = get_cultures(parser)
        religions, rel_groups = get_religions(parser)
        dynamics = get_dynamics(parser, cultures, prov_id)
        vanilla = get_localisation()
        swmh_loc = get_localisation(basedir=swmhpath)
        localisation = get_localisation([swmhpath])
        keys_to_override, keys_to_add, ul_titles = get_more_keys_to_override(
            parser, localisation, max_provs)
        keys_to_override.update(cultures, cult_groups, religions, rel_groups)
        overridden_keys = set()
        titles = set()
        prev_loc = collections.defaultdict(str)
        prev_lt = collections.defaultdict(str)

        templates = rootpath / 'sed2/templates'
        templates_sed2 = templates / 'SED2'
        for path in files('localisation/*.csv', basedir=templates_sed2):
            prev_loc.update(
                {row[0].strip(): row[1].strip()
                 for row in csv_rows(path)})
        for path in files('common/landed_titles/*.csv',
                          basedir=templates_sed2):
            prev_lt.update({(row[0].strip(), row[1].strip()): row[2].strip()
                            for row in csv_rows(path)})

        gov_prefixes, gov_names = get_gov_locs(parser)
        keys_to_override.update(gov_names)
        noble_regex = make_noble_title_regex(cultures + cult_groups,
                                             religions + rel_groups, ul_titles,
                                             gov_prefixes)

        templates_t = pathlib.Path(td)
        templates_t_sed2 = templates_t / 'SED2'
        (templates_t_sed2 / 'localisation').mkdir(parents=True)
        (templates_t_sed2 / 'common/landed_titles').mkdir(parents=True)
        (templates_t / 'SED2+EMF/localisation').mkdir(parents=True)
        swmh_files = set()
        for inpath in files('localisation/*.csv', basedir=swmhpath):
            swmh_files.add(inpath.name)
            outpath = templates_t_sed2 / inpath.relative_to(swmhpath)
            out_rows = [['#CODE', 'SED', 'SWMH', 'OTHER', 'VANILLA']]
            col_width = [0, 8]
            for row in csv_rows(inpath, comments=True):
                if not row[0].startswith('#'):
                    overridden_keys.add(row[0])
                if not row[0].startswith('b_'):
                    if row[0].startswith('#'):
                        row = [','.join(row), '']
                    else:
                        col_width[0] = max(len(row[0]), col_width[0])
                    out_row = [
                        row[0], prev_loc[row[0]], row[1],
                        ','.join(dynamics[row[0]]),
                        vanilla.get(row[0], '')
                    ]
                    out_rows.append(out_row)
            for i, out_row in enumerate(out_rows):
                if not out_row[0].startswith('#') or i == 0:
                    for col, width in enumerate(col_width):
                        out_row[col] = out_row[col].ljust(width)
            with outpath.open('w', newline='', encoding='cp1252') as csvfile:
                csv.writer(csvfile, dialect='ckii').writerows(out_rows)

        lt_keys_not_cultures = [
            'title', 'title_female', 'foa', 'title_prefix', 'short_name',
            'name_tier', 'location_ruler_title', 'dynasty_title_names',
            'male_names'
        ]
        lt_keys = lt_keys_not_cultures + cultures

        for inpath, tree in parser.parse_files('common/landed_titles/*.txt',
                                               memcache=True):
            out_rows = [['#TITLE', 'KEY', 'SED2', 'SWMH']]
            col_width = [0, 0, 8]
            for title, pairs in recurse(tree):
                # here disabled for now: preservation of modifiers added to
                # template and not found in landed_titles (slow)
                # for (t, k), v in prev_lt.items():
                #     if t == title and not any(k == k2 for k2, _ in pairs):
                #         pairs.append((k, ''))
                # also disabled: barony stuff
                if not title.startswith('b_'):
                    for key, value in sorted(
                            pairs, key=lambda p: lt_keys.index(p[0])):
                        out_row = [title, key, prev_lt[title, key], value]
                        # don't allow changes to anything but dynamic names...
                        # just for now
                        if key in lt_keys_not_cultures:
                            out_row[2] = out_row[3]
                        out_rows.append(out_row)
                        for c in range(2):
                            col_width[c] = max(len(out_row[c]), col_width[c])
            for out_row in out_rows:
                for col, width in enumerate(col_width):
                    out_row[col] = out_row[col].ljust(width)
            outpath = (
                templates_t_sed2 /
                inpath.with_suffix('.csv').relative_to(inpath.parents[2]))
            with outpath.open('w', newline='', encoding='cp1252') as csvfile:
                csv.writer(csvfile, dialect='ckii').writerows(out_rows)
            parser.flush(inpath)

        override_rows = [['#CODE', 'SED', 'SWMH', 'OTHER', 'VANILLA']]
        col_width = [0, 8]
        for key in keys_to_add:
            out_row = [key, prev_loc[key], '', '', key]
            override_rows.append(out_row)
            col_width[0] = max(len(key), col_width[0])
        for path in files('localisation/*.csv'):
            if path.name not in swmh_files:
                override_rows.append(['#' + path.name, '', '', '', ''])
                for row in csv_rows(path):
                    key, val = row[:2]
                    if should_override(key) and key not in overridden_keys:
                        out_row = [
                            key, prev_loc[key], '', ','.join(dynamics[key]),
                            val
                        ]
                        override_rows.append(out_row)
                        overridden_keys.add(key)
                        col_width[0] = max(len(key), col_width[0])
        for i, out_row in enumerate(override_rows):
            if not out_row[0].startswith('#') or i == 0:
                for col, width in enumerate(col_width):
                    out_row[col] = out_row[col].ljust(width)
        outpath = templates_t_sed2 / 'localisation' / 'A_SED.csv'
        with outpath.open('w', newline='', encoding='cp1252') as csvfile:
            csv.writer(csvfile, dialect='ckii').writerows(override_rows)

        # EMF
        parser.moddirs.extend((emfpath, emfswmhpath))
        overridden_keys = set()
        loc_emf = get_localisation(parser.moddirs)
        cultures, cult_groups = get_cultures(parser)
        religions, rel_groups = get_religions(parser)
        keys_to_override, keys_to_add_emf, ul_titles = (
            get_more_keys_to_override(parser, loc_emf, max_provs))
        keys_to_override.update(cultures, cult_groups, religions, rel_groups)
        keys_to_override.update(keys_overridden_in_mod(*parser.moddirs))
        keys_to_add_emf = [x for x in keys_to_add_emf if x not in keys_to_add]
        prev_loc_emf = collections.defaultdict(str)
        inpath = templates / 'SED2+EMF/localisation/0_SED+EMF.csv'
        prev_loc_emf.update(
            {row[0].strip(): row[1].strip()
             for row in csv_rows(inpath)})
        gov_prefixes, gov_names = get_gov_locs(parser)
        keys_to_override.update(gov_names)
        noble_regex = make_noble_title_regex(cultures + cult_groups,
                                             religions + rel_groups, ul_titles,
                                             gov_prefixes)
        for _, tree in parser.parse_files('common/landed_titles/*.txt',
                                          emfpath, [emfswmhpath]):
            # iterate for side effects (add to titles)
            for _ in recurse(tree):
                pass
        emf_rows = [[
            '#CODE', 'SED+EMF', 'EMF', 'SWMH', 'OTHER', 'SED', 'VANILLA'
        ]]
        col_width = [0, 8]
        for key in keys_to_add_emf:
            out_row = [key, prev_loc_emf[key], key, '', '', '', '']
            emf_rows.append(out_row)
            col_width[0] = max(len(key), col_width[0])
        for path in files('localisation/*.csv', [emfswmhpath],
                          basedir=emfpath):
            emf_rows.append(['#' + path.name, '', '', '', '', ''])
            for row in csv_rows(path):
                key, val = row[:2]
                if should_override(key) and key not in overridden_keys:
                    out_row = [
                        key, prev_loc_emf[key], val,
                        swmh_loc.get(key, ''), ','.join(dynamics[key]),
                        prev_loc[key],
                        vanilla.get(key, '')
                    ]
                    emf_rows.append(out_row)
                    overridden_keys.add(key)
                    col_width[0] = max(len(key), col_width[0])
            for i, out_row in enumerate(emf_rows):
                if not out_row[0].startswith('#') or i == 0:
                    for col, width in enumerate(col_width):
                        out_row[col] = out_row[col].ljust(width)
        outpath = templates_t / inpath.relative_to(templates)
        with outpath.open('w', newline='', encoding='cp1252') as csvfile:
            csv.writer(csvfile, dialect='ckii').writerows(emf_rows)

        while templates.exists():
            print('Removing old templates...')
            shutil.rmtree(str(templates), ignore_errors=True)
        shutil.copytree(str(templates_t), str(templates))
Esempio n. 11
0
def main():
    parser = ck2parser.SimpleParser(ck2parser.rootpath / 'SWMH-BETA/SWMH')
    ck2localize = ck2parser.get_localisation(parser.moddirs)
    title_key = {
        title: 'PROV{}'.format(prov)
        for prov, title, _ in ck2parser.get_provinces(parser)
    }
    parser.moddirs = []

    eu4root = pathlib.Path(
        '/cygdrive/c/SteamLibrary/steamapps/common/Europa Universalis IV')
    localize = {}
    for path in (eu4root / 'localisation').glob('*_l_english.yml'):
        with path.open(encoding='utf-8-sig') as f:
            for line in f:
                match = re.fullmatch(r'\s*([^#\s:]+):\d?\s*"(.*)"[^"]*', line)
                if match:
                    localize[match.group(1)] = match.group(2)
    areas, regions, superregions = {}, collections.OrderedDict(
    ), collections.OrderedDict()
    for n, v in parser.parse_file(eu4root / 'map/area.txt'):
        areas[n.val] = [v2.val for v2 in v]
    with (eu4root / 'map/region.txt').open(encoding='cp1252') as f:
        # klugey
        region_text = f.read().split('#Sea Regions', 1)[0]
    for n, v in parser.parse(region_text):
        if v.contents:
            regions[n.val] = [v3.val for _, v2 in v for v3 in v2]
    for n, v in parser.parse_file(eu4root / 'map/superregion.txt'):
        if v.contents:
            superregions[n.val] = [v2.val for v2 in v]
    orphan_regions = [
        rn for rn, rv in regions.items()
        if not any(rn in srv for srv in superregions.values())
    ]
    history = {}
    for path in (eu4root / 'history/provinces').iterdir():
        num = int(re.match(r'\d+', path.stem).group())
        history[num] = path.stem
    names = collections.defaultdict(list)
    for path in (eu4root / 'common/province_names').iterdir():
        with path.open() as f:
            for line in f:
                match = re.fullmatch(r' *(\d+) *= *"([^"]*)"\n?', line)
                if match and match.group(2) not in names[int(match.group(1))]:
                    names[int(match.group(1))].append(match.group(2))
    for n, v in localize.items():
        if re.fullmatch(r'PROV\d+', n):
            num = int(n[4:])
            if v in names[num]:
                names[num].remove(v)
            names[num].insert(0, v)

    # count = 0
    off_map = [set(), set(), set(), set()]
    prov_mappings = collections.defaultdict(list)
    try:
        with open('province_table_new.csv', encoding='cp1252') as f:
            for line in f:
                match = re.match(r'([^#;]*);([^#;]*)', line)
                if match:
                    prov_mappings[int(match.group(2))].append(match.group(1))
                else:
                    match = re.match(r'(#+) ([^;]*) \(off-map\)', line)
                    if match:
                        off_map[len(match.group(1)) - 1].add(match.group(2))
    except FileNotFoundError:
        pass

    prev = None

    def write_line(line):
        nonlocal prev
        cur = line.split(';')[1] if ';' in line else '#'
        if prev != cur:
            print(file=f)
        print(line, file=f)
        prev = cur

    def write_region(region_name):
        if localize[region_name] in off_map[1]:
            write_line('## {} (off-map)'.format(localize[region_name]))
            return
        write_line('## {}'.format(localize[region_name]))
        for area_name in regions[region_name]:
            if localize[area_name] in off_map[2]:
                write_line('### {} (off-map)'.format(localize[area_name]))
                continue
            write_line('### {}'.format(localize[area_name]))
            for province in areas[area_name]:
                if names[province][0] in off_map[3]:
                    write_line('#### {} (off-map)'.format(names[province][0]))
                    continue
                if prov_mappings[province]:
                    seen_title = set()
                    for ck2title in prov_mappings[province]:
                        if ck2title in seen_title:
                            comment = 'Duplicate to increase weight'
                        else:
                            comment = ''
                            if ck2title.startswith('d_'):
                                comment += 'Duchy ' + ck2localize[ck2title]
                            else:
                                comment += ck2localize[title_key[ck2title]]
                            comment += ' - ' + names[province][0]
                            seen_title.add(ck2title)
                        write_line('{};{};{};{}'.format(
                            ck2title, province, history[province], comment))
                else:
                    write_line('####;{};{};{}'.format(
                        province, history[province],
                        '/'.join(names[province])))

    with open('province_table_new.csv', 'w', encoding='cp1252',
              newline='\r\n') as f:
        print('# CK2TITLE;EU4ID;Filename;Comment', file=f)
        for region_name in orphan_regions:
            write_region(region_name)
        for superregion_name, superregion in superregions.items():
            if localize[superregion_name] in off_map[0]:
                write_line('# {} (off-map)'.format(localize[superregion_name]))
                continue
            write_line('# {}'.format(localize[superregion_name]))
            for region_name in superregion:
                write_region(region_name)