예제 #1
0
파일: make_csvs.py 프로젝트: escalonn/sed2
def keys_overridden_in_mod(basedir, *moddirs):
    base_keys = set(get_localisation(basedir=basedir))
    seen = set()
    result = set()
    for path in files('localisation/*.csv', moddirs=moddirs, basedir=basedir):
        for key, *_ in csv_rows(path):
            if key not in seen:
                seen.add(key)
                if basedir not in path.parents and key in base_keys:
                    result.add(key)
    return result
예제 #2
0
def keys_overridden_in_mod(basedir, *moddirs):
    base_keys = set(get_localisation(basedir=basedir))
    seen = set()
    result = set()
    for path in files('localisation/*.csv', moddirs=moddirs, basedir=basedir):
        for key, *_ in csv_rows(path):
            if key not in seen:
                seen.add(key)
                if basedir not in path.parents and key in base_keys:
                    result.add(key)
    return result
예제 #3
0
파일: make_csvs.py 프로젝트: escalonn/SLD
def process_provinces(parser, default_tree):
    id_name_map = {}
    defs_path = parser.file('map/' + default_tree['definitions'].val)
    for row in ck2parser.csv_rows(defs_path):
        try:
            id_name_map[row[0]] = row[4]
        except IndexError:
            continue
    prov_title = {}
    for path in parser.files('history/provinces/* - *.txt'):
        number, name = path.stem.split(' - ')
        if id_name_map.get(number) == name:
            the_id = 'PROV{}'.format(number)
            tree = parser.parse_file(path)
            try:
                prov_title[the_id] = tree['title'].val
            except KeyError:
                pass
    return prov_title
예제 #4
0
파일: make_csvs.py 프로젝트: escalonn/SLD
def process_localisation(parser, title_attrs, prov_title):
    other_locs = {}
    seen = set()
    for path in parser.files('localisation/*', reverse=True):
        for row in ck2parser.csv_rows(path):
            key, value = row[0:2]
            if key not in seen:
                seen.add(key)
                if re.match('[ekdcb]_', key):
                    adj_match = re.match('(.+)_adj(_|$)', key)
                    title = key if not adj_match else adj_match.group(1) 
                elif re.match('PROV\d+', key):
                    if key in prov_title:
                        title = prov_title[key]
                    else:
                        other_locs[key] = value
                        continue
                else:
                    continue
                try:
                    title_attrs[title][key] = value
                except KeyError:
                    pass
    return other_locs
예제 #5
0
파일: make_csvs.py 프로젝트: escalonn/sed2
def main():
    # fill titles before calling
    def should_override(key):
        title_match = re.match(r'[ekdcb]_((?!_adj($|_)).)*', key)
        if title_match is not None:
            title = title_match.group()
            return (title in titles and title[0] != 'b' and
                    re.fullmatch(r'c_((?!_adj($|_)).)*', key) is None)
        if key in keys_to_override:
            return True
        noble_match = re.fullmatch(noble_regex, key)
        return noble_match is not None

    def recurse(tree):
        for n, v in tree:
            if is_codename(n.val):
                titles.add(n.val)
                items = []
                for n2, v2 in v:
                    if n2.val in lt_keys:
                        if isinstance(v2, Obj):
                            value = ' '.join(s.val for s in v2)
                        else:
                            value = v2.val
                        items.append((n2.val, value))
                yield n.val, items
                yield from recurse(v)

    with tempfile.TemporaryDirectory() as td:
        parser = SimpleParser(strict=False)
        parser.moddirs = [swmhpath]
        prov_id, prov_title = get_province_id(parser)
        max_provs = get_max_provinces(parser)
        cultures, cult_groups = get_cultures(parser)
        religions, rel_groups = get_religions(parser)
        dynamics = get_dynamics(parser, cultures, prov_id)
        vanilla = get_localisation()
        swmh_loc = get_localisation(basedir=swmhpath)
        localisation = get_localisation([swmhpath])
        keys_to_override, keys_to_add, ul_titles = get_more_keys_to_override(
            parser, localisation, max_provs)
        keys_to_override.update(cultures, cult_groups, religions, rel_groups)
        overridden_keys = set()
        titles = set()
        prev_loc = collections.defaultdict(str)
        prev_lt = collections.defaultdict(str)

        templates = rootpath / 'sed2/templates'
        templates_sed2 = templates / 'SED2'
        for path in files('localisation/*.csv', basedir=templates_sed2):
            prev_loc.update({row[0].strip(): row[1].strip()
                             for row in csv_rows(path)})
        for path in files('common/landed_titles/*.csv',
                          basedir=templates_sed2):
            prev_lt.update({(row[0].strip(), row[1].strip()): row[2].strip()
                            for row in csv_rows(path)})

        gov_prefixes, gov_names = get_gov_locs(parser)
        keys_to_override.update(gov_names)
        noble_regex = make_noble_title_regex(cultures + cult_groups,
            religions + rel_groups, ul_titles, gov_prefixes)

        templates_t = pathlib.Path(td)
        templates_t_sed2 = templates_t / 'SED2'
        (templates_t_sed2 / 'localisation').mkdir(parents=True)
        (templates_t_sed2 / 'common/landed_titles').mkdir(parents=True)
        (templates_t / 'SED2+EMF/localisation').mkdir(parents=True)
        swmh_files = set()
        for inpath in files('localisation/*.csv', basedir=swmhpath):
            swmh_files.add(inpath.name)
            outpath = templates_t_sed2 / inpath.relative_to(swmhpath)
            out_rows = [
                ['#CODE', 'SED', 'SWMH', 'OTHER', 'VANILLA']]
            col_width = [0, 8]
            for row in csv_rows(inpath, comments=True):
                if not row[0].startswith('#'):
                    overridden_keys.add(row[0])
                if not row[0].startswith('b_'):
                    if row[0].startswith('#'):
                        row = [','.join(row), '']
                    else:
                        col_width[0] = max(len(row[0]), col_width[0])
                    out_row = [row[0],
                               prev_loc[row[0]],
                               row[1],
                               ','.join(dynamics[row[0]]),
                               vanilla.get(row[0], '')]
                    out_rows.append(out_row)
            for i, out_row in enumerate(out_rows):
                if not out_row[0].startswith('#') or i == 0:
                    for col, width in enumerate(col_width):
                        out_row[col] = out_row[col].ljust(width)
            with outpath.open('w', newline='', encoding='cp1252') as csvfile:
                csv.writer(csvfile, dialect='ckii').writerows(out_rows)

        lt_keys_not_cultures = [
            'title', 'title_female', 'foa', 'title_prefix', 'short_name',
            'name_tier', 'location_ruler_title', 'dynasty_title_names',
            'male_names']
        lt_keys = lt_keys_not_cultures + cultures

        for inpath, tree in parser.parse_files('common/landed_titles/*.txt',
                                               memcache=True):
            out_rows = [['#TITLE', 'KEY', 'SED2', 'SWMH']]
            col_width = [0, 0, 8]
            for title, pairs in recurse(tree):
                # here disabled for now: preservation of modifiers added to
                # template and not found in landed_titles (slow)
                # for (t, k), v in prev_lt.items():
                #     if t == title and not any(k == k2 for k2, _ in pairs):
                #         pairs.append((k, ''))
                # also disabled: barony stuff
                if not title.startswith('b_'):
                    for key, value in sorted(
                        pairs, key=lambda p: lt_keys.index(p[0])):
                        out_row = [title, key, prev_lt[title, key], value]
                        # don't allow changes to anything but dynamic names...
                        # just for now
                        if key in lt_keys_not_cultures:
                            out_row[2] = out_row[3]
                        out_rows.append(out_row)
                        for c in range(2):
                            col_width[c] = max(len(out_row[c]), col_width[c])
            for out_row in out_rows:
                for col, width in enumerate(col_width):
                    out_row[col] = out_row[col].ljust(width)
            outpath = (templates_t_sed2 / inpath.with_suffix('.csv').
                       relative_to(inpath.parents[2]))
            with outpath.open('w', newline='', encoding='cp1252') as csvfile:
                csv.writer(csvfile, dialect='ckii').writerows(out_rows)
            parser.flush(inpath)

        override_rows = [
            ['#CODE', 'SED', 'SWMH', 'OTHER', 'VANILLA']]
        col_width = [0, 8]
        for key in keys_to_add:
            out_row = [key, prev_loc[key], '', '', key]
            override_rows.append(out_row)
            col_width[0] = max(len(key), col_width[0])
        for path in files('localisation/*.csv'):
            if path.name not in swmh_files:
                override_rows.append(['#' + path.name, '', '', '', ''])
                for row in csv_rows(path):
                    key, val = row[:2]
                    if should_override(key) and key not in overridden_keys:
                        out_row = [key,
                                   prev_loc[key],
                                   '',
                                   ','.join(dynamics[key]),
                                   val]
                        override_rows.append(out_row)
                        overridden_keys.add(key)
                        col_width[0] = max(len(key), col_width[0])
        for i, out_row in enumerate(override_rows):
            if not out_row[0].startswith('#') or i == 0:
                for col, width in enumerate(col_width):
                    out_row[col] = out_row[col].ljust(width)
        outpath = templates_t_sed2 / 'localisation' / 'A_SED.csv'
        with outpath.open('w', newline='', encoding='cp1252') as csvfile:
            csv.writer(csvfile, dialect='ckii').writerows(override_rows)

        # EMF
        parser.moddirs.extend((emfpath, emfswmhpath))
        overridden_keys = set()
        loc_emf = get_localisation(parser.moddirs)
        cultures, cult_groups = get_cultures(parser)
        religions, rel_groups = get_religions(parser)
        keys_to_override, keys_to_add_emf, ul_titles = (
            get_more_keys_to_override(parser, loc_emf, max_provs))
        keys_to_override.update(cultures, cult_groups, religions, rel_groups)
        keys_to_override.update(keys_overridden_in_mod(*parser.moddirs))
        keys_to_add_emf = [x for x in keys_to_add_emf if x not in keys_to_add]
        prev_loc_emf = collections.defaultdict(str)
        inpath = templates / 'SED2+EMF/localisation/0_SED+EMF.csv'
        prev_loc_emf.update({row[0].strip(): row[1].strip()
                             for row in csv_rows(inpath)})
        gov_prefixes, gov_names = get_gov_locs(parser)
        keys_to_override.update(gov_names)
        noble_regex = make_noble_title_regex(cultures + cult_groups,
            religions + rel_groups, ul_titles, gov_prefixes)
        for _, tree in parser.parse_files('common/landed_titles/*.txt',
                                          emfpath, [emfswmhpath]):
            # iterate for side effects (add to titles)
            for _ in recurse(tree):
                pass
        emf_rows = [
            ['#CODE', 'SED+EMF', 'EMF', 'SWMH', 'OTHER', 'SED', 'VANILLA']]
        col_width = [0, 8]
        for key in keys_to_add_emf:
            out_row = [key, prev_loc_emf[key], key, '', '', '', '']
            emf_rows.append(out_row)
            col_width[0] = max(len(key), col_width[0])
        for path in files('localisation/*.csv', [emfswmhpath], basedir=emfpath):
            emf_rows.append(['#' + path.name, '', '', '', '', ''])
            for row in csv_rows(path):
                key, val = row[:2]
                if should_override(key) and key not in overridden_keys:
                    out_row = [key,
                               prev_loc_emf[key],
                               val,
                               swmh_loc.get(key, ''),
                               ','.join(dynamics[key]),
                               prev_loc[key],
                               vanilla.get(key, '')]
                    emf_rows.append(out_row)
                    overridden_keys.add(key)
                    col_width[0] = max(len(key), col_width[0])
            for i, out_row in enumerate(emf_rows):
                if not out_row[0].startswith('#') or i == 0:
                    for col, width in enumerate(col_width):
                        out_row[col] = out_row[col].ljust(width)
        outpath = templates_t / inpath.relative_to(templates)
        with outpath.open('w', newline='', encoding='cp1252') as csvfile:
            csv.writer(csvfile, dialect='ckii').writerows(emf_rows)

        while templates.exists():
            print('Removing old templates...')
            shutil.rmtree(str(templates), ignore_errors=True)
        shutil.copytree(str(templates_t), str(templates))
예제 #6
0
def main():
    # fill titles before calling
    def should_override(key):
        title_match = re.match(r'[ekdcb]_((?!_adj($|_)).)*', key)
        if title_match is not None:
            title = title_match.group()
            return (title in titles and title[0] != 'b'
                    and re.fullmatch(r'c_((?!_adj($|_)).)*', key) is None)
        if key in keys_to_override:
            return True
        noble_match = re.fullmatch(noble_regex, key)
        return noble_match is not None

    def recurse(tree):
        for n, v in tree:
            if is_codename(n.val):
                titles.add(n.val)
                items = []
                for n2, v2 in v:
                    if n2.val in lt_keys:
                        if isinstance(v2, Obj):
                            value = ' '.join(s.val for s in v2)
                        else:
                            value = v2.val
                        items.append((n2.val, value))
                yield n.val, items
                yield from recurse(v)

    with tempfile.TemporaryDirectory() as td:
        parser = SimpleParser(strict=False)
        parser.moddirs = [swmhpath]
        prov_id, prov_title = get_province_id(parser)
        max_provs = get_max_provinces(parser)
        cultures, cult_groups = get_cultures(parser)
        religions, rel_groups = get_religions(parser)
        dynamics = get_dynamics(parser, cultures, prov_id)
        vanilla = get_localisation()
        swmh_loc = get_localisation(basedir=swmhpath)
        localisation = get_localisation([swmhpath])
        keys_to_override, keys_to_add, ul_titles = get_more_keys_to_override(
            parser, localisation, max_provs)
        keys_to_override.update(cultures, cult_groups, religions, rel_groups)
        overridden_keys = set()
        titles = set()
        prev_loc = collections.defaultdict(str)
        prev_lt = collections.defaultdict(str)

        templates = rootpath / 'sed2/templates'
        templates_sed2 = templates / 'SED2'
        for path in files('localisation/*.csv', basedir=templates_sed2):
            prev_loc.update(
                {row[0].strip(): row[1].strip()
                 for row in csv_rows(path)})
        for path in files('common/landed_titles/*.csv',
                          basedir=templates_sed2):
            prev_lt.update({(row[0].strip(), row[1].strip()): row[2].strip()
                            for row in csv_rows(path)})

        gov_prefixes, gov_names = get_gov_locs(parser)
        keys_to_override.update(gov_names)
        noble_regex = make_noble_title_regex(cultures + cult_groups,
                                             religions + rel_groups, ul_titles,
                                             gov_prefixes)

        templates_t = pathlib.Path(td)
        templates_t_sed2 = templates_t / 'SED2'
        (templates_t_sed2 / 'localisation').mkdir(parents=True)
        (templates_t_sed2 / 'common/landed_titles').mkdir(parents=True)
        (templates_t / 'SED2+EMF/localisation').mkdir(parents=True)
        swmh_files = set()
        for inpath in files('localisation/*.csv', basedir=swmhpath):
            swmh_files.add(inpath.name)
            outpath = templates_t_sed2 / inpath.relative_to(swmhpath)
            out_rows = [['#CODE', 'SED', 'SWMH', 'OTHER', 'VANILLA']]
            col_width = [0, 8]
            for row in csv_rows(inpath, comments=True):
                if not row[0].startswith('#'):
                    overridden_keys.add(row[0])
                if not row[0].startswith('b_'):
                    if row[0].startswith('#'):
                        row = [','.join(row), '']
                    else:
                        col_width[0] = max(len(row[0]), col_width[0])
                    out_row = [
                        row[0], prev_loc[row[0]], row[1],
                        ','.join(dynamics[row[0]]),
                        vanilla.get(row[0], '')
                    ]
                    out_rows.append(out_row)
            for i, out_row in enumerate(out_rows):
                if not out_row[0].startswith('#') or i == 0:
                    for col, width in enumerate(col_width):
                        out_row[col] = out_row[col].ljust(width)
            with outpath.open('w', newline='', encoding='cp1252') as csvfile:
                csv.writer(csvfile, dialect='ckii').writerows(out_rows)

        lt_keys_not_cultures = [
            'title', 'title_female', 'foa', 'title_prefix', 'short_name',
            'name_tier', 'location_ruler_title', 'dynasty_title_names',
            'male_names'
        ]
        lt_keys = lt_keys_not_cultures + cultures

        for inpath, tree in parser.parse_files('common/landed_titles/*.txt',
                                               memcache=True):
            out_rows = [['#TITLE', 'KEY', 'SED2', 'SWMH']]
            col_width = [0, 0, 8]
            for title, pairs in recurse(tree):
                # here disabled for now: preservation of modifiers added to
                # template and not found in landed_titles (slow)
                # for (t, k), v in prev_lt.items():
                #     if t == title and not any(k == k2 for k2, _ in pairs):
                #         pairs.append((k, ''))
                # also disabled: barony stuff
                if not title.startswith('b_'):
                    for key, value in sorted(
                            pairs, key=lambda p: lt_keys.index(p[0])):
                        out_row = [title, key, prev_lt[title, key], value]
                        # don't allow changes to anything but dynamic names...
                        # just for now
                        if key in lt_keys_not_cultures:
                            out_row[2] = out_row[3]
                        out_rows.append(out_row)
                        for c in range(2):
                            col_width[c] = max(len(out_row[c]), col_width[c])
            for out_row in out_rows:
                for col, width in enumerate(col_width):
                    out_row[col] = out_row[col].ljust(width)
            outpath = (
                templates_t_sed2 /
                inpath.with_suffix('.csv').relative_to(inpath.parents[2]))
            with outpath.open('w', newline='', encoding='cp1252') as csvfile:
                csv.writer(csvfile, dialect='ckii').writerows(out_rows)
            parser.flush(inpath)

        override_rows = [['#CODE', 'SED', 'SWMH', 'OTHER', 'VANILLA']]
        col_width = [0, 8]
        for key in keys_to_add:
            out_row = [key, prev_loc[key], '', '', key]
            override_rows.append(out_row)
            col_width[0] = max(len(key), col_width[0])
        for path in files('localisation/*.csv'):
            if path.name not in swmh_files:
                override_rows.append(['#' + path.name, '', '', '', ''])
                for row in csv_rows(path):
                    key, val = row[:2]
                    if should_override(key) and key not in overridden_keys:
                        out_row = [
                            key, prev_loc[key], '', ','.join(dynamics[key]),
                            val
                        ]
                        override_rows.append(out_row)
                        overridden_keys.add(key)
                        col_width[0] = max(len(key), col_width[0])
        for i, out_row in enumerate(override_rows):
            if not out_row[0].startswith('#') or i == 0:
                for col, width in enumerate(col_width):
                    out_row[col] = out_row[col].ljust(width)
        outpath = templates_t_sed2 / 'localisation' / 'A_SED.csv'
        with outpath.open('w', newline='', encoding='cp1252') as csvfile:
            csv.writer(csvfile, dialect='ckii').writerows(override_rows)

        # EMF
        parser.moddirs.extend((emfpath, emfswmhpath))
        overridden_keys = set()
        loc_emf = get_localisation(parser.moddirs)
        cultures, cult_groups = get_cultures(parser)
        religions, rel_groups = get_religions(parser)
        keys_to_override, keys_to_add_emf, ul_titles = (
            get_more_keys_to_override(parser, loc_emf, max_provs))
        keys_to_override.update(cultures, cult_groups, religions, rel_groups)
        keys_to_override.update(keys_overridden_in_mod(*parser.moddirs))
        keys_to_add_emf = [x for x in keys_to_add_emf if x not in keys_to_add]
        prev_loc_emf = collections.defaultdict(str)
        inpath = templates / 'SED2+EMF/localisation/0_SED+EMF.csv'
        prev_loc_emf.update(
            {row[0].strip(): row[1].strip()
             for row in csv_rows(inpath)})
        gov_prefixes, gov_names = get_gov_locs(parser)
        keys_to_override.update(gov_names)
        noble_regex = make_noble_title_regex(cultures + cult_groups,
                                             religions + rel_groups, ul_titles,
                                             gov_prefixes)
        for _, tree in parser.parse_files('common/landed_titles/*.txt',
                                          emfpath, [emfswmhpath]):
            # iterate for side effects (add to titles)
            for _ in recurse(tree):
                pass
        emf_rows = [[
            '#CODE', 'SED+EMF', 'EMF', 'SWMH', 'OTHER', 'SED', 'VANILLA'
        ]]
        col_width = [0, 8]
        for key in keys_to_add_emf:
            out_row = [key, prev_loc_emf[key], key, '', '', '', '']
            emf_rows.append(out_row)
            col_width[0] = max(len(key), col_width[0])
        for path in files('localisation/*.csv', [emfswmhpath],
                          basedir=emfpath):
            emf_rows.append(['#' + path.name, '', '', '', '', ''])
            for row in csv_rows(path):
                key, val = row[:2]
                if should_override(key) and key not in overridden_keys:
                    out_row = [
                        key, prev_loc_emf[key], val,
                        swmh_loc.get(key, ''), ','.join(dynamics[key]),
                        prev_loc[key],
                        vanilla.get(key, '')
                    ]
                    emf_rows.append(out_row)
                    overridden_keys.add(key)
                    col_width[0] = max(len(key), col_width[0])
            for i, out_row in enumerate(emf_rows):
                if not out_row[0].startswith('#') or i == 0:
                    for col, width in enumerate(col_width):
                        out_row[col] = out_row[col].ljust(width)
        outpath = templates_t / inpath.relative_to(templates)
        with outpath.open('w', newline='', encoding='cp1252') as csvfile:
            csv.writer(csvfile, dialect='ckii').writerows(emf_rows)

        while templates.exists():
            print('Removing old templates...')
            shutil.rmtree(str(templates), ignore_errors=True)
        shutil.copytree(str(templates_t), str(templates))
예제 #7
0
파일: build.py 프로젝트: escalonn/sed2
def main():
    simple_parser = SimpleParser()
    simple_parser.moddirs = [swmhpath]
    full_parser = FullParser()
    full_parser.newlines_to_depth = 0
    templates = sed2path / 'templates'
    templates_sed2 = templates / 'SED2'
    templates_loc = templates_sed2 / 'localisation'
    templates_lt = templates_sed2 / 'common/landed_titles'
    templates_emf_loc = templates / 'SED2+EMF/localisation'
    build = sed2path / 'build'
    build_sed2 = build / 'SED2'
    build_loc = build_sed2 / 'localisation'
    build_lt = build_sed2 / 'common/landed_titles'
    build_emf_loc = build / 'SED2+EMF/localisation'
    build_mini_lt = build / 'SED2+MiniSWMH/common/landed_titles'
    # build_emf_lt = build / 'SED2+EMF/common/landed_titles'
    # build_emfmini_lt = build / 'SED2+EMF+MiniSWMH/common/landed_titles'
    if build.exists():
        print('Removing old build...')
        shutil.rmtree(str(build))
    build_loc.mkdir(parents=True)
    build_lt.mkdir(parents=True)
    build_emf_loc.mkdir(parents=True)
    build_mini_lt.mkdir(parents=True)
    # build_emf_lt.mkdir(parents=True)
    # build_emfmini_lt.mkdir(parents=True)
    swmh_files = set()
    sed2 = {}
    keys_to_blank = set()

    province_id, province_title = get_province_id(simple_parser)

    for path in files('localisation/*', basedir=swmhpath):
        swmh_files.add(path.name)

    for inpath in files('*', basedir=templates_loc):
        for row in csv_rows(inpath, comments=True):
            key, val = row[0].strip(), row[1].strip()
            if not val:
                if re.fullmatch(r' +', row[2]):
                    val = ' '
                elif not row[2] or inpath.name not in swmh_files:
                    keys_to_blank.add(key)
            if not key.startswith('#'):
                if key not in sed2:
                    sed2[key] = val
                else:
                    print('Duplicate localisations for ' + key)
        if inpath.name not in swmh_files:
            outpath = build_loc / inpath.name
            sed2rows = [[''] * 15]
            sed2rows[0][:6] = [
                '#CODE', 'ENGLISH', 'FRENCH', 'GERMAN', '', 'SPANISH'
            ]
            sed2rows[0][-1] = 'x'
            for row in csv_rows(inpath):
                if no_provinces and re.match(r'[cb]_|PROV\d+', row[0]):
                    continue
                sed2row = [''] * 15
                sed2row[0] = row[0].strip()
                sed2row[1] = row[1].strip()
                sed2row[-1] = 'x'
                if sed2row[1] or sed2row[0] in keys_to_blank:
                    sed2rows.append(sed2row)
                elif not sed2row[1]:
                    match = re.fullmatch(r'([ekdcb]_.*)_adj', sed2row[0])
                    if match:
                        title = match.group(1)
                        if title.startswith('c'):
                            if title in province_id:
                                the_id = province_id[title]
                                if the_id in sed2:
                                    sed2row[1] = sed2[the_id]
                                    sed2rows.append(sed2row)
                        else:
                            if title in sed2:
                                sed2row[1] = sed2[title]
                                sed2rows.append(sed2row)
            print('Writing {}'.format(outpath))
            with outpath.open('w', encoding='cp1252', newline='') as csvfile:
                csv.writer(csvfile, dialect='ckii').writerows(sed2rows)

    # EMF
    # determine files overriding SWMH locs
    overridden_files = swmh_files & {
        path.name
        for path in files('localisation/*', [emfswmhpath], basedir=emfpath)
    }
    inpath = templates_emf_loc / '0_SED+EMF.csv'
    original_file = None
    sed2rows = [[''] * 15]
    sed2rows[0][:6] = ['#CODE', 'ENGLISH', 'FRENCH', 'GERMAN', '', 'SPANISH']
    sed2rows[0][-1] = 'x'
    for row in csv_rows(inpath, comments=True):
        if row[0].startswith('#CODE'):
            continue
        if row[0].startswith('#'):
            original_file = row[0][1:]
            continue
        if no_provinces and re.match(r'[cb]_|PROV\d+', row[0]):
            continue
        sed2row = [''] * 15
        sed2row[0] = row[0].strip()
        sed2row[1] = row[1].strip()
        sed2row[-1] = 'x'
        if sed2row[1] or sed2row[0] in keys_to_blank:
            sed2rows.append(sed2row)
        elif (original_file in overridden_files
              or row[2] == row[3] and sed2.get(sed2row[0], row[2]) != row[2]):
            sed2row[1] = sed2.get(sed2row[0], '')
            sed2rows.append(sed2row)
    outpath = build_emf_loc / inpath.name
    print('Writing {}'.format(outpath))
    with outpath.open('w', encoding='cp1252', newline='') as csvfile:
        csv.writer(csvfile, dialect='ckii').writerows(sed2rows)

    for inpath in files('localisation/*', basedir=swmhpath):
        if no_provinces and inpath.name in province_loc_files:
            continue
        outpath = build_loc / inpath.name
        sed2rows = [[''] * 15]
        sed2rows[0][:6] = [
            '#CODE', 'ENGLISH', 'FRENCH', 'GERMAN', '', 'SPANISH'
        ]
        sed2rows[0][-1] = 'x'
        for row in csv_rows(inpath):
            sed2row = [''] * 15
            sed2row[0] = row[0]
            sed2row[1] = sed2.get(row[0], row[1])
            sed2row[-1] = 'x'
            if sed2row[1] or sed2row[0] in keys_to_blank:
                sed2rows.append(sed2row)
            elif not sed2row[1]:
                match = re.fullmatch(r'([ekdcb]_.*)_adj', sed2row[0])
                if match:
                    title = match.group(1)
                    if title.startswith('c'):
                        if title in province_id:
                            the_id = province_id[title]
                            if the_id in sed2:
                                sed2row[1] = sed2[the_id]
                                sed2rows.append(sed2row)
                    else:
                        if title in sed2:
                            sed2row[1] = sed2[title]
                            sed2rows.append(sed2row)
        print('Writing {}'.format(outpath))
        with outpath.open('w', encoding='cp1252', newline='') as csvfile:
            csv.writer(csvfile, dialect='ckii').writerows(sed2rows)

    cultures = get_cultures(simple_parser, groups=False)
    lt_keys = [
        'title', 'title_female', 'foa', 'title_prefix', 'short_name',
        'name_tier', 'location_ruler_title', 'dynasty_title_names',
        'male_names'
    ] + cultures
    full_parser.fq_keys = cultures

    def update_tree(v, sed2, lt_keys):
        for n2, v2 in v:
            if is_codename(n2.val):
                if n2.val.startswith('b_') and not no_provinces:
                    for p3 in reversed(v2.contents):
                        if p3.key.val in cultures:
                            v2.contents.remove(p3)
                elif not no_provinces or re.match(r'[ekd]_', n2.val):
                    for p3 in reversed(v2.contents):
                        if p3.key.val in lt_keys:
                            v2.contents.remove(p3)
                    if sed2[n2.val]:
                        index = next((i for i, (n3, _) in enumerate(v2)
                                      if is_codename(n3.val)), len(v2))
                        v2.contents[index:index] = sed2[n2.val]
                update_tree(v2, sed2, lt_keys)

    sed2 = {}
    for inpath, tree in full_parser.parse_files('common/landed_titles/*',
                                                basedir=swmhpath):
        template = templates_lt / inpath.with_suffix('.csv').name
        outpath = build_lt / inpath.name
        sed2[template] = collections.defaultdict(list)
        prev_title = None
        seen_title_female = False
        title_female_to_set = None
        title_title_index = -1
        for row in csv_rows(template):
            title, key, val = (s.strip() for s in row[:3])
            # default title_female to title
            if prev_title != title:
                if title_female_to_set and not seen_title_female:
                    sed2[template][prev_title].insert(
                        title_title_index,
                        Pair('title_female', title_female_to_set))
                title_female_to_set = None
                seen_title_female = False
            if val:
                if key in ['male_names', 'female_names']:
                    val = Obj([
                        String(x.strip('"'))
                        for x in re.findall(r'[^"\s]+|"[^"]*"', val)
                    ])
                sed2[template][title].append(Pair(key, val))
                if key == 'title':
                    title_title_index = len(sed2[template][title])
                    title_female_to_set = val
            if key == 'title_female':
                seen_title_female = True
            prev_title = title
        if title_female_to_set and not seen_title_female:
            sed2[template][title].insert(
                title_title_index, Pair('title_female', title_female_to_set))
        update_tree(tree, sed2[template], lt_keys)
        print('Writing {}'.format(outpath))
        with outpath.open('w', encoding='cp1252', newline='\r\n') as f:
            f.write(tree.str(full_parser))

    # for moddir, builddir in zip([emfswmhpath, minipath, emfminipath],
    #     [build_emf_lt, build_mini_lt, build_emfmini_lt]):
    #     for inpath, tree in full_parser.parse_files('common/landed_titles/*',
    #                                                 basedir=moddir):
    #         if (inpath.name == 'emf_heresy_titles_SWMH.txt' and
    #             moddir == build_emf_lt):
    #             continue
    #             # lame hardcoded exception since we still don't have
    #             # templates for any non-SWMH landed_titles
    #         template = templates_lt / inpath.with_suffix('.csv').name
    #         if template in sed2:
    #             out = builddir / inpath.name
    #             update_tree(tree, sed2[template], lt_keys)
    #             with out.open('w', encoding='cp1252', newline='\r\n') as f:
    #                 f.write(tree.str(full_parser))

    for inpath, tree in full_parser.parse_files('common/landed_titles/*',
                                                basedir=minipath):
        template = templates_lt / inpath.with_suffix('.csv').name
        if template in sed2:
            outpath = build_mini_lt / inpath.name
            update_tree(tree, sed2[template], lt_keys)
            print('Writing {}'.format(outpath))
            with outpath.open('w', encoding='cp1252', newline='\r\n') as f:
                f.write(tree.str(full_parser))

    with (build_sed2 / 'version.txt').open('w',
                                           encoding='cp1252',
                                           newline='\r\n') as f:
        print('Writing {}'.format(build_sed2 / 'version.txt'))
        print('{} - {}'.format(version, datetime.date.today()), file=f)