コード例 #1
0
def report(*tags, search=None, ignore=None, fmt=None, outfile=None, **kwargs):
    """
    Create a report for the given tags

    The tabular report shows how many characters have each unique value for each
    tag.

    Args:
        tag (list): Tag names to report on. Can contain strings and lists of
            strings.
        search (list): Paths to search for character files. Items can be strings
            or lists of strings.
        ignore (list): Paths to ignore
        fmt (str|None): Output format to use. Recognized values are defined in
            formatters.get_report_formatter. Pass "default" or None to get the
            format from settings.
        outfile (string|None): Filename to put the listed data. None and "-"
            print to stdout.
        prefs (Settings): Settings object to use. Uses internal settings by
            default.

    Returns:
        Result object. Openable will contain the output file if given.
    """
    prefs = kwargs.get('prefs', settings.InternalSettings())

    if not search:
        search = ['.']
    if not ignore:
        ignore = []
    ignore.extend(prefs.get_ignored_paths('report'))
    if not fmt or fmt == 'default':
        fmt = prefs.get('report.default_format')

    # use a list so we can iterate more than once
    characters = list(parser.get_characters(flatten(search), ignore))

    # Construct a dict keyed by tag name whose values are Counters. Each Counter
    # is initialized with a flattened list of lists and we let it count the
    # duplicates.
    table_data = {
        tag: Counter(flatten([c.tags.get(tag, 'None') for c in characters]))
        for tag in flatten(tags)
    }

    formatter = formatters.get_report_formatter(fmt)
    if not formatter:
        return result.OptionError(
            errmsg="Cannot create output of format '{}'".format(fmt))
    with util.smart_open(outfile,
                         binary=(fmt in formatters.BINARY_TYPES)) as outstream:
        response = formatter(table_data, outstream=outstream, prefs=prefs)

    # pass errors straight through
    if not response.success:
        return response

    openable = [outfile] if outfile and outfile != '-' else None

    return result.Success(openable=openable)
コード例 #2
0
def find(*rules, search=None, ignore=None, **kwargs):
    """
    Find characters in the campaign that match certain rules

    Searches for character objects in the campaign that match the given
    rules. To search an arbitrary list of Character objects, use
    find_characters.

    Args:
        rules (str): One or more strings that describe which characters to
            find. Passed directly to find_characters.
        search (list): Paths to search for character files. Items can be strings
            or lists of strings.
        ignore (list): Paths to ignore
        dryrun (bool): Whether to print the character file paths instead of
            opening them
        prefs (Settings): Settings object to use. Uses internal settings by
            default.

    Returns:
        Result object. Openable will contain a list of file paths to the
        matching Character objects.
    """
    prefs = kwargs.get('prefs', settings.InternalSettings())
    dryrun = kwargs.get('dryrun', False)
    if search is None:
        search = []

    if not ignore:
        ignore = []
    ignore.extend(prefs.get_ignored_paths('find'))

    rules = list(flatten(rules))

    # use a list so we can iterate more than once
    characters = list(parser.get_characters(flatten(search), ignore))

    filtered_chars = find_characters(rules, characters=characters)

    paths = [char.get('path') for char in filtered_chars]

    if dryrun:
        openable = []
        printables = paths
    else:
        openable = paths
        printables = []

    return result.Success(openable=openable, printables=printables)
コード例 #3
0
def reorg(*search, ignore=None, purge=False, verbose=False, commit=False, **kwargs):
    """
    Move character files into the correct paths.

    Character files are moved so that their path matches the ideal path as
    closely as possible. No new directories are created.

    This function ignores tags not found in Character.KNOWN_TAGS.

    Args:
        search (list): Paths to search for character files. Items can be strings
            or lists of strings.
        ignore (list): Paths to ignore
        purge (bool): Whether empty directories should be deleted after all
            files have been moved.
        verbose (bool): Whether to print changes as they are made
        commit (bool): Whether to actually move files around
        prefs (Settings): Settings object to use. Uses internal settings by
            default.

    Returns:
        Result object. Openable will be empty.
    """
    prefs = kwargs.get('prefs', settings.InternalSettings())
    if not ignore:
        ignore = []
    ignore.extend(prefs.get_ignored_paths('reorg'))
    show_changes = verbose or not commit

    changelog = []

    base_path = prefs.get('paths.required.characters')
    if not path.exists(base_path):
        return result.FSError(errmsg="Cannot access '{}'".format(base_path))

    if show_changes:
        changelog.append("Move characters")
    for parsed_character in parser.get_characters(flatten(search), ignore):
        new_path = util.create_path_from_character(parsed_character, base_path=base_path)
        if path.normcase(path.normpath(new_path)) != path.normcase(path.normpath(path.dirname(parsed_character['path']))):
            if show_changes:
                changelog.append("* Move {} to {}".format(parsed_character['path'], new_path))
            if commit:
                try:
                    shmove(parsed_character['path'], new_path)
                except OSError as e:
                    if show_changes:
                        changelog.append("\t- dest path already exists; skipping")

    if purge:
        if show_changes:
            changelog.append("Purge empty directories")
        for empty_path in util.find_empty_dirs(base_path):
            if show_changes:
                changelog.append("* Remove empty directory {}".format(empty_path))
            if commit:
                rmdir(empty_path)

    return result.Success(printables=changelog)
コード例 #4
0
def _get_mantle(data):
    std_mantle_re = re.compile(STANDARD_MANTLE_REGEX,
                               re.MULTILINE | re.IGNORECASE)
    alt_mantle_re = re.compile(ALT_MANTLE_REGEX, re.MULTILINE | re.IGNORECASE)

    std_matches = std_mantle_re.finditer(data)
    alt_matches = alt_mantle_re.finditer(data)
    return [m.group('court') for m in flatten([std_matches, alt_matches])]
コード例 #5
0
def lint(*search, ignore=None, fix=False, strict=False, report=True, **kwargs):
    """
    Check character files for completeness and correctness.

    This function checks that every character file has a few required tags, and
    applies extra checking for some character types. See util.Character.validate
    for details.

    This command normally ignores unknown tags. In strict mode, it will report
    the presence of any tag not expected by the character class.

    Args:
        search (list): Paths to search for character files. Items can be strings
            or lists of strings.
        ignore (list): Paths to ignore
        fix (bool): Whether to automatically fix errors when possible
        strict (bool): Whether to include non-critical errors and omissions
        report (bool): Do not include files in the return data, only problem
            descriptions
        prefs (Settings): Settings object to use. Uses internal settings by
            default.

    Returns:
        Result object. On success, openable attribute will contain a list of all
        files that had errors.
    """
    prefs = kwargs.get('prefs', settings.InternalSettings())
    if not ignore:
        ignore = []
    ignore.extend(prefs.get_ignored_paths('lint'))

    openable = []
    printable = []

    # check each character
    characters = parser.get_characters(flatten(search), ignore)
    for character in characters:
        if character.tags('nolint').present:
            continue

        character.validate(strict)
        character.problems.extend(
            linters.lint(character, fix=fix, strict=strict, prefs=prefs))

        # Report problems on one line if possible, or as a block if there's more than one
        if not character.valid:
            charpath = character.path
            if not report:
                openable.append(charpath)
            if len(character.problems) > 1:
                printable.append("File '{}':".format(charpath))
                for detail in character.problems:
                    printable.append("    {}".format(detail))
            else:
                printable.append("{} in '{}'".format(character.problems[0],
                                                     charpath))

    return result.Success(openable=openable, printables=printable)
コード例 #6
0
def _get_goodwill(data):
    std_goodwill_re = re.compile(STANDARD_GOODWILL_REGEX,
                                 re.MULTILINE | re.IGNORECASE)
    alt_goodwill_re = re.compile(ALT_GOODWILL_REGEX,
                                 re.MULTILINE | re.IGNORECASE)

    std_matches = std_goodwill_re.finditer(data)
    alt_matches = alt_goodwill_re.finditer(data)
    return [m.group('court') for m in flatten([std_matches, alt_matches])]
コード例 #7
0
ファイル: changeling.py プロジェクト: aurule/npc
    def check_kiths_have_blessings(self):
        blessings = set(self.prefs.get('changeling.blessings', {}).keys())
        kith_indexes = self.prefs.get('changeling.kiths', {})
        kiths = set(flatten(kith_indexes.values()))

        if not blessings.issuperset(kiths):
            with self.error_section(
                    'Kiths must all have a blessing. Kiths without a blessing:'
            ):
                self.add_errors(kiths.difference(blessings))
コード例 #8
0
def dump(*search,
         ignore=None,
         do_sort=False,
         metadata=False,
         outfile=None,
         **kwargs):
    """
    Dump the raw character data, unaltered.

    Always formats the data as json.

    Args:
        search (List): Paths to search for character files
        ignore (List): Paths to ignore
        do_sort (bool): Whether to sort the characters before dumping
        metadata (bool): Whether to prepend metadata to the output
        outfile (string|None): Filename to put the dumped data. None and "-"
            print to stdout.
        prefs (Settings): Settings object to use. Uses internal settings by
            default.

    Returns:
        Result object. If outfile pointed to a real file, the openable attribute
        will contain that filename.
    """
    prefs = kwargs.get('prefs', settings.InternalSettings())
    if not ignore:
        ignore = []
    ignore.extend(prefs.get_ignored_paths('dump'))
    sort_by = kwargs.get('sort_by', prefs.get('dump.sort_by'))

    characters = parser.get_characters(flatten(search), ignore)
    if do_sort:
        sorter = util.character_sorter.CharacterSorter(sort_by, prefs=prefs)
        characters = sorter.sort(characters)

    characters = [c.dump() for c in characters]

    # make some json
    if metadata:
        meta = {'meta': True, **prefs.get_metadata('json')}
        characters = itertools.chain([meta], characters)

    with util.smart_open(outfile) as outstream:
        json.dump([c for c in characters], outstream, cls=CharacterEncoder)

    openable = [outfile] if outfile and outfile != '-' else None

    return result.Success(openable=openable)
コード例 #9
0
ファイル: listing.py プロジェクト: Arent128/npc
def make_list(*search,
              ignore=None,
              fmt=None,
              metadata=None,
              title=None,
              outfile=None,
              **kwargs):
    """
    Generate a listing of NPCs.

    The default listing templates ignore tags not found in Character.KNOWN_TAGS.

    Args:
        search (list): Paths to search for character files. Items can be strings
            or lists of strings.
        ignore (list): Paths to ignore
        fmt (str): Format of the output. Supported types are 'markdown', 'md',
            'htm', 'html', and 'json'. Pass 'default' or None to get format from
            settings.
        metadata (str|None): Whether to include metadata in the output and what
            kind of metadata to use. Pass 'default' to use the format configured
            in Settings.

            The markdown format allows either 'mmd' (MultiMarkdown) or
            'yfm'/'yaml' (Yaml Front Matter) metadata.

            The json format only allows one form of metadata, so pass any truthy
            value to include the metadata keys.
        title (str|None): The title to put in the metadata, if included.
            Overrides the title from settings.
        outfile (string|None): Filename to put the listed data. None and "-"
            print to stdout.
        do_sort (bool): Whether to avoid sorting altogether. Defaults to True.
        sort_by (string|None): Sort order for characters. Defaults to the value of
            "list_sort" in settings.
        headings (List[string]): List of tag names to group characters by
        partial (bool): Whether to omit headers and footers and just render body
            content. Defaults to false.
        prefs (Settings): Settings object to use. Uses internal settings by
            default.
        progress (function): Callback function to track the progress of
            generating a listing. Must accept the current count and total count.
            Should print to stderr. Not used by all formatters.

    Returns:
        Result object. Openable will contain the output file if given.
    """
    prefs = kwargs.get('prefs', settings.InternalSettings())
    if not ignore:
        ignore = []
    ignore.extend(prefs.get_ignored_paths('listing'))
    do_sort = kwargs.get('do_sort', True)
    partial = kwargs.get('partial', False)
    update_progress = kwargs.get('progress', lambda i, t: False)

    sort_order = kwargs.get('sort_by',
                            prefs.get('listing.sort_by')) if do_sort else []
    headings = kwargs.get('headings', sort_order)

    characters = _process_directives(
        parser.get_characters(flatten(search), ignore))
    if do_sort:
        sorter = util.character_sorter.CharacterSorter(sort_order, prefs=prefs)
        characters = sorter.sort(characters)

    if fmt == "default" or not fmt:
        fmt = prefs.get('listing.default_format')
    out_type = formatters.get_canonical_format_name(fmt)

    formatter = formatters.get_listing_formatter(out_type)
    if not formatter:
        return result.OptionError(
            errmsg="Cannot create output of format '{}'".format(out_type))

    if metadata == 'default' and out_type != 'json':
        # Ensure 'default' metadata type gets replaced with the right default
        # metadata format. Irrelevant for json format.
        metadata_type = prefs.get(
            'listing.metadata.{}.default_format'.format(out_type))
    else:
        metadata_type = metadata

    meta = prefs.get_metadata(out_type)
    if title:
        meta['title'] = title

    header_offset = int(prefs.get('listing.base_header_level'))
    sectioners = [
        formatters.sectioners.get_sectioner(key=g,
                                            heading_level=i + header_offset,
                                            prefs=prefs)
        for i, g in enumerate(headings)
    ]

    with util.smart_open(outfile,
                         binary=(out_type
                                 in formatters.BINARY_TYPES)) as outstream:
        response = formatter(characters,
                             outstream,
                             metadata_format=metadata_type,
                             metadata=meta,
                             prefs=prefs,
                             sectioners=sectioners,
                             partial=partial,
                             update_progress=update_progress)

    # pass errors straight through
    if not response.success:
        return response

    openable = [outfile] if outfile and outfile != '-' else None

    return result.Success(openable=openable)
コード例 #10
0
ファイル: test_npc_util.py プロジェクト: Arent128/npc
def test_flatten():
    nested = ['some text', 5, ['yet more']]
    flatted = ['some text', 5, 'yet more']
    assert [x for x in util.flatten(nested)] == flatted
コード例 #11
0
def lint(character, fix=False, *, strict=False, sk_data=None):
    """
    Verify the more complex elements in a changeling sheet.

    Checks for changeling-specific problems within the rules blocks of the
    character sheet. The problems it checks for are as follows:

    1. Seeming and kith appear in sk_data
    2. Entitlement tag appears at most one time
    2. Mantle merit matches court tag and appears at most one time
    3. Court Goodwill merit must not match mantle or court tag
    4. Seeming and kith appear in the sheet's body, not just the tags.
    5. Seeming and kith match the value of their corresponding tag.
    6. Seeming and kith have correct notes for their blessing (and curse for
        Seeming)

    Additional checks when strict is true:

    1. Virtue and Vice are present
    2. Mantle merit appears exactly once for the court in their tags
    3. The Unseen Sense merit must not be present

    Missing or incorrect notes can be fixed automatically if desired.

    Args:
        character (dict): Character data to lint
        fix (bool): Whether to automatically correct certain problems
        strict (bool): Whether to report non-critical errors and omissions
        sk_data (dict): Seeming and kith data, as from the
            settings/settings-changeling.json file.

    Returns:
        List of problem descriptions. If no problems were found, the list will
        be empty.
    """
    problems = []
    dirty = False

    # Check that seeming tag exists and is valid
    for seeming_name in character['seeming']:
        if seeming_name.lower() not in sk_data['seemings']:
            problems.append("Unrecognized @seeming '{}'".format(seeming_name))

    # Check that kith tag exists and is valid
    all_kiths = flatten(sk_data['kiths'].values())
    for kith_name in character['kith']:
        if kith_name.lower() not in all_kiths:
            print(sk_data['kiths'].values())
            problems.append("Unrecognized @kith '{}'".format(kith_name))

    # If the character has no sheet, we're done
    if not character.has_path:
        return problems

    # Load the sheet for deep linting
    with open(character['path'], 'r') as char_file:
        data = char_file.read()

    # STRICT: Check that they have a virtue and a vice
    if strict:
        problems.extend(nwod.lint_vice_virtue(data))

    # Check that the mantle matches the court if given
    court = character.get_first('court')
    court_key = court.lower() if court else 'none'
    mantle_courts = _get_mantle(data)
    mantle_court_keys = [c.lower() for c in mantle_courts]
    if mantle_courts:
        if len(mantle_courts) > 1:
            problems.append("Multiple mantle merits: {}".format(
                ', '.join(mantle_courts)))
        elif court_key != mantle_court_keys[0]:
            problems.append(
                "Court mantle '{}' does not match court tag '{}'".format(
                    mantle_courts[0], court))

    # Check that goodwill does not match the character's own court or mantle
    goodwill_courts = _get_goodwill(data)
    goodwill_court_keys = [c.lower() for c in goodwill_courts]
    if goodwill_courts:
        if court_key in goodwill_court_keys:
            problems.append(
                "Court goodwill listed for court tag '{}'".format(court))
        if mantle_courts and mantle_court_keys[0] in goodwill_court_keys:
            problems.append(
                "Court goodwill listed for court mantle '{}'".format(
                    mantle_courts[0]))

    # STRICT: Check that they have a mantle for their court
    if strict and court and court_key not in mantle_court_keys:
        problems.append("No mantle for court '{}'".format(court))

    # STRICT: Check that they do not have the Unseen Sense merit
    unseen_sense = _get_unseen_sense(data)
    if strict and unseen_sense:
        problems.append("Changelings cannot have the Unseen Sense merit")

    # Check that seeming tag matches listed seeming with correct notes
    seeming_tags = [t.lower() for t in character['seeming']]
    if seeming_tags:
        # ensure the listed seemings match our seeming tags
        seeming_re = re.compile(SEEMING_REGEX.format(r'\w+'),
                                re.MULTILINE | re.IGNORECASE)
        seeming_matches = list(seeming_re.finditer(data))
        seeming_stat_names = [
            m.group('seeming').lower() for m in seeming_matches
        ]
        if set(seeming_tags) != set(seeming_stat_names):
            problems.append("Seeming stats do not match @seeming tags")
            if (len(seeming_stat_names) == 1 and len(seeming_tags) == 1
                    and seeming_stat_names[0] in REPLACEABLE):
                if fix:
                    seeming_tag = seeming_tags[0]
                    try:
                        seeming_parts = {
                            'title': seeming_tag.title(),
                            'seeming': sk_data['blessings'][seeming_tag],
                            'kith': sk_data['curses'][seeming_tag]
                        }
                        seeming_line = "{title} ({seeming}; {kith})".format(
                            **seeming_parts)
                    except IndexError:
                        seeming_line = seeming_tag.title()

                    data = seeming_re.sub(r'\g<1>{}'.format(seeming_line),
                                          data)
                    problems[-1] += ' (placeholder; FIXED)'
                    dirty = True
                else:
                    problems[-1] += ' (placeholder; can fix)'
        else:
            # Tags and stats match. Iterate through each seeming and make sure
            # the notes are right.
            for match in list(seeming_matches):
                seeming_tag = match.group('seeming').lower()
                if not seeming_tag in sk_data['seemings']:
                    continue

                loaded_seeming_notes = match.group('notes')
                seeming_notes = "({}; {})".format(
                    sk_data['blessings'][seeming_tag],
                    sk_data['curses'][seeming_tag])
                if not loaded_seeming_notes:
                    problems.append("Missing notes for Seeming '{}'".format(
                        match.group('seeming')))
                    if fix:
                        data = _fix_seeming_notes(match.group('seeming'),
                                                  seeming_notes, data)
                        problems[-1] += ' (FIXED)'
                        dirty = True
                    else:
                        problems[-1] += ' (can fix)'
                else:
                    if loaded_seeming_notes != seeming_notes:
                        problems.append(
                            "Incorrect notes for Seeming '{}'".format(
                                match.group('seeming')))
                        if fix:
                            data = _fix_seeming_notes(match.group('seeming'),
                                                      seeming_notes, data)
                            problems[-1] += ' (FIXED)'
                            dirty = True
                        else:
                            problems[-1] += ' (can fix)'

    # Check that kith tag matches listed kith with correct notes
    kith_tags = [t.lower() for t in character['kith']]
    if kith_tags:
        # ensure the listed kiths match our kith tags
        kith_re = re.compile(KITH_REGEX.format(r'\w+( \w+)?'),
                             re.MULTILINE | re.IGNORECASE)
        kith_matches = list(kith_re.finditer(data))
        kith_stat_names = [m.group('kith').lower() for m in kith_matches]
        if set(kith_tags) != set(
            [m.group('kith').lower() for m in kith_matches]):
            problems.append("Kith stats do not match @kith tags")
            if (len(kith_stat_names) == 1 and len(kith_tags) == 1
                    and kith_stat_names[0] in REPLACEABLE):
                if fix:
                    kith_tag = kith_tags[0]
                    try:
                        kith_line = "{} ({})".format(
                            kith_tag.title(), sk_data['blessings'][kith_tag])
                    except IndexError:
                        kith_line = kith_tag.title()

                    data = kith_re.sub(r'\g<1>{}'.format(kith_line), data)
                    problems[-1] += ' (placeholder; FIXED)'
                    dirty = True
                else:
                    problems[-1] += ' (placeholder; can fix)'
        else:
            # tags and stats match. iterate through each kith and make sure the
            # notes are right
            for match in list(kith_matches):
                kith_tag = match.group('kith').lower()
                if not kith_tag in sk_data['kiths'][seeming_tag]:
                    continue

                loaded_kith_notes = match.group('notes')
                kith_notes = "({})".format(sk_data['blessings'][kith_tag])
                if not loaded_kith_notes:
                    problems.append("Missing notes for Kith '{}'".format(
                        match.group('kith')))
                    if fix:
                        data = _fix_kith_notes(match.group('kith'), kith_notes,
                                               data)
                        problems[-1] += ' (FIXED)'
                        dirty = True
                    else:
                        problems[-1] += ' (can fix)'
                else:
                    if loaded_kith_notes != kith_notes:
                        problems.append("Incorrect notes for Kith '{}'".format(
                            match.group('kith')))
                        if fix:
                            data = _fix_kith_notes(match.group('kith'),
                                                   kith_notes, data)
                            problems[-1] += ' (FIXED)'
                            dirty = True
                        else:
                            problems[-1] += ' (can fix)'

    if dirty and data:
        with open(character['path'], 'w') as char_file:
            char_file.write(data)

    return problems
コード例 #12
0
ファイル: description_tag.py プロジェクト: aurule/npc
 def update(self, values):
     values = flatten([values])
     super().update(list(values))
コード例 #13
0
def session(**kwargs):
    """
    Create the files for a new game session.

    Finds the plot and session log files for the last session, copies the plot,
    and creates a new empty session log. If the latest plot file is ahead of
    the latest session, a new plot file will *not* be created. Likewise if the
    latest session file is ahead, a new session file will *not* be created.

    Args:
        prefs (Settings): Settings object to use. Uses internal settings by
            default.

    Returns:
        Result object. Openable will contain the current and previous session
        log and plot planning files.
    """
    prefs = kwargs.get('prefs', settings.InternalSettings())
    plot_dir = Path(prefs.get('paths.required.plot'))
    session_dir = Path(prefs.get('paths.required.session'))

    if not plot_dir.exists():
        return result.FSError(
            errmsg="Cannot access plot path '{}'".format(plot_dir))

    if not session_dir.exists():
        return result.FSError(
            errmsg="Cannot access session path '{}'".format(session_dir))

    plot_template = prefs.get('story.templates.plot')
    if SEQUENCE_KEYWORD not in str(plot_template):
        return result.ConfigError(
            errmsg="Plot template has no number placeholder ({})".format(
                SEQUENCE_KEYWORD))
    plot_regex = regex_from_template(plot_template)
    latest_plot = latest_file(plot_dir, plot_regex)

    session_template = prefs.get('story.templates.session')
    if SEQUENCE_KEYWORD not in str(session_template):
        return result.ConfigError(
            errmsg="Session template has no number placeholder ({})".format(
                SEQUENCE_KEYWORD))
    session_regex = regex_from_template(session_template)
    latest_session = latest_file(session_dir, session_regex)

    new_number = min(latest_plot.number, latest_session.number) + 1

    def copy_templates(dest_dir, templates):
        """
        Create new story files from templates.

        This is responsible for creating the new file name based on
        `new_number`, loading the template contents, substituting the "NNN" and
        "((COPY))" keywords, and writing the result to the new file.
        """
        def old_file_contents(old_file_path):
            """
            Get the previous file's contents.


            """
            try:
                with open(old_file_path, 'r') as old_file:
                    return old_file.read()
            except (FileNotFoundError, IsADirectoryError):
                return ''

        for template_path in templates:
            if SEQUENCE_KEYWORD not in str(template_path):
                print_err("Template {} has no number placeholder ({})".format(
                    template_path, SEQUENCE_KEYWORD))
                continue

            new_file_name = template_path.name.replace(SEQUENCE_KEYWORD,
                                                       str(new_number))
            destination = dest_dir.joinpath(new_file_name)
            if destination.exists():
                continue

            with open(template_path, 'r') as f:
                data = f.read()

            data = data.replace(SEQUENCE_KEYWORD, str(new_number))
            if COPY_KEYWORD in data:
                file_regex = regex_from_template(template_path)
                old_file_path = latest_file(dest_dir, file_regex).path
                data = data.replace(COPY_KEYWORD,
                                    old_file_contents(old_file_path))

            with open(destination, 'w') as f:
                f.write(data)

    plot_templates = flatten([
        prefs.get('story.templates.plot'),
        prefs.get('story.templates.plot_extras')
    ])
    copy_templates(plot_dir, plot_templates)

    session_templates = flatten([
        prefs.get('story.templates.session'),
        prefs.get('story.templates.session_extras')
    ])
    copy_templates(session_dir, session_templates)

    openable = [
        str(latest_file(session_dir, session_regex).path),
        str(latest_file(plot_dir, plot_regex).path)
    ]
    old_session_name = session_template.name.replace(SEQUENCE_KEYWORD,
                                                     str(new_number - 1))
    old_session = session_dir.joinpath(old_session_name)
    if old_session.exists():
        openable.append(str(old_session))
    old_plot_name = plot_template.name.replace(SEQUENCE_KEYWORD,
                                               str(new_number - 1))
    old_plot = plot_dir.joinpath(old_plot_name)
    if old_plot.exists():
        openable.append(str(old_plot))

    return result.Success(openable=openable)