def report(*tags, search=None, ignore=None, fmt=None, outfile=None, **kwargs): """ Create a report for the given tags The tabular report shows how many characters have each unique value for each tag. Args: tag (list): Tag names to report on. Can contain strings and lists of strings. search (list): Paths to search for character files. Items can be strings or lists of strings. ignore (list): Paths to ignore fmt (str|None): Output format to use. Recognized values are defined in formatters.get_report_formatter. Pass "default" or None to get the format from settings. outfile (string|None): Filename to put the listed data. None and "-" print to stdout. prefs (Settings): Settings object to use. Uses internal settings by default. Returns: Result object. Openable will contain the output file if given. """ prefs = kwargs.get('prefs', settings.InternalSettings()) if not search: search = ['.'] if not ignore: ignore = [] ignore.extend(prefs.get_ignored_paths('report')) if not fmt or fmt == 'default': fmt = prefs.get('report.default_format') # use a list so we can iterate more than once characters = list(parser.get_characters(flatten(search), ignore)) # Construct a dict keyed by tag name whose values are Counters. Each Counter # is initialized with a flattened list of lists and we let it count the # duplicates. table_data = { tag: Counter(flatten([c.tags.get(tag, 'None') for c in characters])) for tag in flatten(tags) } formatter = formatters.get_report_formatter(fmt) if not formatter: return result.OptionError( errmsg="Cannot create output of format '{}'".format(fmt)) with util.smart_open(outfile, binary=(fmt in formatters.BINARY_TYPES)) as outstream: response = formatter(table_data, outstream=outstream, prefs=prefs) # pass errors straight through if not response.success: return response openable = [outfile] if outfile and outfile != '-' else None return result.Success(openable=openable)
def reorg(*search, ignore=None, purge=False, verbose=False, commit=False, **kwargs): """ Move character files into the correct paths. Character files are moved so that their path matches the ideal path as closely as possible. No new directories are created. This function ignores tags not found in Character.KNOWN_TAGS. Args: search (list): Paths to search for character files. Items can be strings or lists of strings. ignore (list): Paths to ignore purge (bool): Whether empty directories should be deleted after all files have been moved. verbose (bool): Whether to print changes as they are made commit (bool): Whether to actually move files around prefs (Settings): Settings object to use. Uses internal settings by default. Returns: Result object. Openable will be empty. """ prefs = kwargs.get('prefs', settings.InternalSettings()) if not ignore: ignore = [] ignore.extend(prefs.get_ignored_paths('reorg')) show_changes = verbose or not commit changelog = [] base_path = prefs.get('paths.required.characters') if not path.exists(base_path): return result.FSError(errmsg="Cannot access '{}'".format(base_path)) if show_changes: changelog.append("Move characters") for parsed_character in parser.get_characters(flatten(search), ignore): new_path = util.create_path_from_character(parsed_character, base_path=base_path) if path.normcase(path.normpath(new_path)) != path.normcase(path.normpath(path.dirname(parsed_character['path']))): if show_changes: changelog.append("* Move {} to {}".format(parsed_character['path'], new_path)) if commit: try: shmove(parsed_character['path'], new_path) except OSError as e: if show_changes: changelog.append("\t- dest path already exists; skipping") if purge: if show_changes: changelog.append("Purge empty directories") for empty_path in util.find_empty_dirs(base_path): if show_changes: changelog.append("* Remove empty directory {}".format(empty_path)) if commit: rmdir(empty_path) return result.Success(printables=changelog)
def lint(*search, ignore=None, fix=False, strict=False, report=True, **kwargs): """ Check character files for completeness and correctness. This function checks that every character file has a few required tags, and applies extra checking for some character types. See util.Character.validate for details. This command normally ignores unknown tags. In strict mode, it will report the presence of any tag not expected by the character class. Args: search (list): Paths to search for character files. Items can be strings or lists of strings. ignore (list): Paths to ignore fix (bool): Whether to automatically fix errors when possible strict (bool): Whether to include non-critical errors and omissions report (bool): Do not include files in the return data, only problem descriptions prefs (Settings): Settings object to use. Uses internal settings by default. Returns: Result object. On success, openable attribute will contain a list of all files that had errors. """ prefs = kwargs.get('prefs', settings.InternalSettings()) if not ignore: ignore = [] ignore.extend(prefs.get_ignored_paths('lint')) openable = [] printable = [] # check each character characters = parser.get_characters(flatten(search), ignore) for character in characters: if character.tags('nolint').present: continue character.validate(strict) character.problems.extend( linters.lint(character, fix=fix, strict=strict, prefs=prefs)) # Report problems on one line if possible, or as a block if there's more than one if not character.valid: charpath = character.path if not report: openable.append(charpath) if len(character.problems) > 1: printable.append("File '{}':".format(charpath)) for detail in character.problems: printable.append(" {}".format(detail)) else: printable.append("{} in '{}'".format(character.problems[0], charpath)) return result.Success(openable=openable, printables=printable)
def find(*rules, search=None, ignore=None, **kwargs): """ Find characters in the campaign that match certain rules Searches for character objects in the campaign that match the given rules. To search an arbitrary list of Character objects, use find_characters. Args: rules (str): One or more strings that describe which characters to find. Passed directly to find_characters. search (list): Paths to search for character files. Items can be strings or lists of strings. ignore (list): Paths to ignore dryrun (bool): Whether to print the character file paths instead of opening them prefs (Settings): Settings object to use. Uses internal settings by default. Returns: Result object. Openable will contain a list of file paths to the matching Character objects. """ prefs = kwargs.get('prefs', settings.InternalSettings()) dryrun = kwargs.get('dryrun', False) if search is None: search = [] if not ignore: ignore = [] ignore.extend(prefs.get_ignored_paths('find')) rules = list(flatten(rules)) # use a list so we can iterate more than once characters = list(parser.get_characters(flatten(search), ignore)) filtered_chars = find_characters(rules, characters=characters) paths = [char.get('path') for char in filtered_chars] if dryrun: openable = [] printables = paths else: openable = paths printables = [] return result.Success(openable=openable, printables=printables)
def dump(*search, ignore=None, do_sort=False, metadata=False, outfile=None, **kwargs): """ Dump the raw character data, unaltered. Always formats the data as json. Args: search (List): Paths to search for character files ignore (List): Paths to ignore do_sort (bool): Whether to sort the characters before dumping metadata (bool): Whether to prepend metadata to the output outfile (string|None): Filename to put the dumped data. None and "-" print to stdout. prefs (Settings): Settings object to use. Uses internal settings by default. Returns: Result object. If outfile pointed to a real file, the openable attribute will contain that filename. """ prefs = kwargs.get('prefs', settings.InternalSettings()) if not ignore: ignore = [] ignore.extend(prefs.get_ignored_paths('dump')) sort_by = kwargs.get('sort_by', prefs.get('dump.sort_by')) characters = parser.get_characters(flatten(search), ignore) if do_sort: sorter = util.character_sorter.CharacterSorter(sort_by, prefs=prefs) characters = sorter.sort(characters) characters = [c.dump() for c in characters] # make some json if metadata: meta = {'meta': True, **prefs.get_metadata('json')} characters = itertools.chain([meta], characters) with util.smart_open(outfile) as outstream: json.dump([c for c in characters], outstream, cls=CharacterEncoder) openable = [outfile] if outfile and outfile != '-' else None return result.Success(openable=openable)
def make_list(*search, ignore=None, fmt=None, metadata=None, title=None, outfile=None, **kwargs): """ Generate a listing of NPCs. The default listing templates ignore tags not found in Character.KNOWN_TAGS. Args: search (list): Paths to search for character files. Items can be strings or lists of strings. ignore (list): Paths to ignore fmt (str): Format of the output. Supported types are 'markdown', 'md', 'htm', 'html', and 'json'. Pass 'default' or None to get format from settings. metadata (str|None): Whether to include metadata in the output and what kind of metadata to use. Pass 'default' to use the format configured in Settings. The markdown format allows either 'mmd' (MultiMarkdown) or 'yfm'/'yaml' (Yaml Front Matter) metadata. The json format only allows one form of metadata, so pass any truthy value to include the metadata keys. title (str|None): The title to put in the metadata, if included. Overrides the title from settings. outfile (string|None): Filename to put the listed data. None and "-" print to stdout. do_sort (bool): Whether to avoid sorting altogether. Defaults to True. sort_by (string|None): Sort order for characters. Defaults to the value of "list_sort" in settings. headings (List[string]): List of tag names to group characters by partial (bool): Whether to omit headers and footers and just render body content. Defaults to false. prefs (Settings): Settings object to use. Uses internal settings by default. progress (function): Callback function to track the progress of generating a listing. Must accept the current count and total count. Should print to stderr. Not used by all formatters. Returns: Result object. Openable will contain the output file if given. """ prefs = kwargs.get('prefs', settings.InternalSettings()) if not ignore: ignore = [] ignore.extend(prefs.get_ignored_paths('listing')) do_sort = kwargs.get('do_sort', True) partial = kwargs.get('partial', False) update_progress = kwargs.get('progress', lambda i, t: False) sort_order = kwargs.get('sort_by', prefs.get('listing.sort_by')) if do_sort else [] headings = kwargs.get('headings', sort_order) characters = _process_directives( parser.get_characters(flatten(search), ignore)) if do_sort: sorter = util.character_sorter.CharacterSorter(sort_order, prefs=prefs) characters = sorter.sort(characters) if fmt == "default" or not fmt: fmt = prefs.get('listing.default_format') out_type = formatters.get_canonical_format_name(fmt) formatter = formatters.get_listing_formatter(out_type) if not formatter: return result.OptionError( errmsg="Cannot create output of format '{}'".format(out_type)) if metadata == 'default' and out_type != 'json': # Ensure 'default' metadata type gets replaced with the right default # metadata format. Irrelevant for json format. metadata_type = prefs.get( 'listing.metadata.{}.default_format'.format(out_type)) else: metadata_type = metadata meta = prefs.get_metadata(out_type) if title: meta['title'] = title header_offset = int(prefs.get('listing.base_header_level')) sectioners = [ formatters.sectioners.get_sectioner(key=g, heading_level=i + header_offset, prefs=prefs) for i, g in enumerate(headings) ] with util.smart_open(outfile, binary=(out_type in formatters.BINARY_TYPES)) as outstream: response = formatter(characters, outstream, metadata_format=metadata_type, metadata=meta, prefs=prefs, sectioners=sectioners, partial=partial, update_progress=update_progress) # pass errors straight through if not response.success: return response openable = [outfile] if outfile and outfile != '-' else None return result.Success(openable=openable)