Ejemplo n.º 1
0
    def from_markdown(cls, markdown_filepath):
        with open(markdown_filepath) as fh:
            metadata, _ = frontmatter.parse(fh.read())

        input_types = collections.OrderedDict()
        for input_ in metadata['inputs']:
            name, type_tuple = list(input_.items())[0]
            if len(type_tuple) != 2:
                raise TypeError(
                    "Bad input section formatting, need two items per key"
                )
            input_types[name] = cls._split_type_tuple(type_tuple, 'semantic')

        param_types = collections.OrderedDict()
        for parameter in metadata['parameters']:
            name, type_tuple = list(parameter.items())[0]
            if len(type_tuple) != 2:
                raise TypeError(
                    "Bad parameters section formatting, need two items per key"
                )
            param_types[name] = cls._split_type_tuple(type_tuple, 'primitive')

        output_types = collections.OrderedDict()
        for output in metadata['outputs']:
            name, type_tuple = list(output.items())[0]
            if len(type_tuple) != 2:
                raise TypeError(
                    "Bad outputs section formatting, need two items per key"
                )
            output_types[name] = cls._split_type_tuple(type_tuple, 'semantic')

        # TODO come up with a nice way to format default values in markdown
        return cls(input_types, param_types, {}, output_types)
Ejemplo n.º 2
0
    def _from_markdown(self, markdown_filepath, package, pid=None):
        signature = qtype.MethodSignature.from_markdown(markdown_filepath)

        with open(markdown_filepath) as fh:
            _, template = frontmatter.parse(fh.read())

        # TODO: verify that `id` is a valid Python identifier
        id = os.path.splitext(os.path.basename(markdown_filepath))[0]

        # TODO handle default values for optional parameters when that's
        # supported
        function_def_line = 'def %s(%s, %s):' % (
            id, ', '.join(signature.inputs), ', '.join(signature.parameters))
        indent = ' ' * 4
        function_body = ipymd.convert(template, from_='markdown', to='python')
        function_body = textwrap.indent(function_body, indent)
        function_return_line = '%sreturn %s' % (
            indent, ', '.join(signature.outputs))

        function_str = '\n'.join([function_def_line,
                                  function_body,
                                  function_return_line])

        scope = {}
        exec(function_str, scope)
        function = scope[id]

        self.__init__(package, id, signature, function,
                      ('markdown', markdown_filepath), pid=pid)
Ejemplo n.º 3
0
    def from_markdown(cls, markdown_filepath):
        with open(markdown_filepath) as fh:
            metadata, _ = frontmatter.parse(fh.read())

        input_types = collections.OrderedDict()
        for input_ in metadata['inputs']:
            name, type_tuple = list(input_.items())[0]
            if len(type_tuple) != 2:
                raise TypeError(
                    "Bad input section formatting, need two items per key"
                )
            input_types[name] = cls._split_type_tuple(type_tuple, 'semantic')

        param_types = collections.OrderedDict()
        for parameter in metadata['parameters']:
            name, type_tuple = list(parameter.items())[0]
            if len(type_tuple) != 2:
                raise TypeError(
                    "Bad parameters section formatting, need two items per key"
                )
            param_types[name] = cls._split_type_tuple(type_tuple, 'primitive')

        output_types = collections.OrderedDict()
        for output in metadata['outputs']:
            name, type_tuple = list(output.items())[0]
            if len(type_tuple) != 2:
                raise TypeError(
                    "Bad outputs section formatting, need two items per key"
                )
            output_types[name] = cls._split_type_tuple(type_tuple, 'semantic')

        return cls(input_types, param_types, output_types)
Ejemplo n.º 4
0
    def test_has_title(self):
        "Parse frontmatter and only the frontmatter"
        with open('tests/article_with_id.md') as f:
            metadata, content = frontmatter.parse(f.read())

        self.assertTrue('title' in metadata,
                        "Article does not have an 'titl' in the frontmatter")
Ejemplo n.º 5
0
    def from_markdown(cls, markdown):
        """
           Parameters
           ----------
           markdown : filepath
        """
        id_ = os.path.splitext(os.path.split(markdown)[1])[0]
        # TODO: verify that `id_` is machine-friendly
        with open(markdown) as fh:
            metadata, template = frontmatter.parse(fh.read())

        type_imports = metadata['type-imports']
        input_types = {}
        for name, type_expr in metadata['inputs'].items():
            input_types[name] = cls._parse_type(type_imports, type_expr)

        output_types = collections.OrderedDict()
        for output in metadata['outputs']:
            # TODO validate each nested dict has exactly one item
            name, type_expr = list(output.items())[0]
            output_types[name] = cls._parse_type(type_imports, type_expr)

        name = metadata['name']
        signature = Signature(name, input_types, output_types)
        return cls(signature, template, id_)
Ejemplo n.º 6
0
    def _from_markdown(self, markdown_filepath, package, pid=None):
        signature = qtype.MethodSignature.from_markdown(markdown_filepath)

        with open(markdown_filepath) as fh:
            _, template = frontmatter.parse(fh.read())

        # TODO: verify that `id` is a valid Python identifier
        id = os.path.splitext(os.path.basename(markdown_filepath))[0]

        # TODO handle default values for optional parameters when that's
        # supported
        function_def_line = 'def %s(%s, %s):' % (
            id, ', '.join(signature.inputs), ', '.join(signature.parameters))
        indent = ' ' * 4
        function_body = ipymd.convert(template, from_='markdown', to='python')
        function_body = textwrap.indent(function_body, indent)
        function_return_line = '%sreturn %s' % (
            indent, ', '.join(signature.outputs))

        function_str = '\n'.join([function_def_line,
                                  function_body,
                                  function_return_line])

        scope = {}
        exec(function_str, scope)
        function = scope[id]

        self.__init__(package, id, signature, function,
                      ('markdown', markdown_filepath), pid=pid)
Ejemplo n.º 7
0
    def load(self, filepath: Path) -> Post:
        try:
            metadata, content = frontmatter.parse(
                filepath.read_text(encoding="utf-8"))
        except TomlDecodeError as e:
            raise PostDecodeError(f"Error in TOML Frontmatter: {e}")
        except ScannerError as e:
            raise PostDecodeError(f"Error in YAML Frontmatter: {e}")
        except JSONDecodeError as e:
            raise PostDecodeError(f"Error in JSON Frontmatter: {e}")

        if not metadata:
            raise PostDecodeError("Frontmatter not found.")
        if not content:
            raise PostDecodeError("Content not found.")

        post_publisher = metadata.get("post_publisher") or dict()

        return self._get_post(
            metadata,
            {
                "filepath": filepath,
                "post_publisher": PostPublisher(**post_publisher),
                "content": markdown(content),
            },
        )
Ejemplo n.º 8
0
def load(pathlike, relative_to=""):
    """
    Loads a basic doc dictionary from a file path. This dictionary
    contains content string, and some basic information about the file.
    Typically, you decorate the doc later with meta and other fields.
    Create a doc dict, populating it with sensible defaults

    Returns a dictionary.
    """
    # TODO need to grab date from meta
    file_created, file_modified = read_file_times(pathlike)
    with open(str(pathlike)) as f:
        meta, content = frontmatter.parse(f.read())
        input_path = PurePath(pathlike)
        id_path = input_path.relative_to(relative_to)
        output_path = pathtools.to_nice_path(id_path)
        section = pathtools.tld(id_path)
        title = meta.get("title", pathtools.to_title(input_path))
        created = meta.get("created", file_created)
        modified = meta.get("modified", file_modified)

        return doc(id_path=id_path,
                   output_path=output_path,
                   input_path=input_path,
                   created=created,
                   modified=modified,
                   title=title,
                   section=section,
                   meta=meta,
                   content=content)
Ejemplo n.º 9
0
    def from_markdown(cls, markdown):
        """
           Parameters
           ----------
           markdown : filepath
        """
        id_ = os.path.splitext(os.path.split(markdown)[1])[0]
        # TODO: verify that `id_` is machine-friendly
        with open(markdown) as fh:
            metadata, template = frontmatter.parse(fh.read())

        type_imports = metadata['type-imports']
        input_types = {}
        for name, type_expr in metadata['inputs'].items():
            input_types[name] = cls._parse_type(type_imports, type_expr)

        output_types = collections.OrderedDict()
        for output in metadata['outputs']:
            # TODO validate each nested dict has exactly one item
            name, type_expr = list(output.items())[0]
            output_types[name] = cls._parse_type(type_imports, type_expr)

        name = metadata['name']
        signature = Signature(name, input_types, output_types)
        return cls(signature, template, id_)
def main(show_categories: bool = typer.Option(False,
                                              '--categories/--no-categories',
                                              help='show categories'),
         show_tags: bool = typer.Option(False,
                                        '--tags/--no-tags',
                                        help='show tags')):
    if not (show_categories or show_tags):
        raise typer.Exit('Did not choose to print categories or tags')

    categories = defaultdict(int)
    tags = defaultdict(int)

    for file in Path('.').glob('_posts/*.md'):
        metadata, _ = frontmatter.parse(file.open().read())
        post_categories = metadata.get('categories') or []

        if isinstance(post_categories, str):
            post_categories = [post_categories]

        for i in post_categories:
            categories[i] += 1

        post_tags = metadata.get('tags') or []
        for i in post_tags:
            tags[i] += 1

    if show_categories:
        print(
            tabulate.tabulate(Counter(categories).most_common(),
                              headers=['category', 'count']))

    if show_tags:
        print(
            tabulate.tabulate(Counter(tags).most_common(),
                              headers=['tag', 'count']))
def build_indexer_lists():
    post_files = glob.glob(os.path.join(root_dir, "_posts/*.*"))

    for post_file in post_files:
        print('Parsing: %s' % post_file)
        with open(post_file, "r") as f:
            metadata, content = frontmatter.parse(f.read())

            draft = False

            if "draft" in metadata and metadata["draft"]:
                print('\t**Draft**')
                draft = True

            if not draft:
                tags = metadata["tags"] if "tags" in metadata else []
                categories = metadata[
                    "categories"] if "categories" in metadata else []

                if len(tags) == 0:
                    print('\tNo tags found')
                if len(categories) == 0:
                    print('\tNo categories found')

                for tag in tags:
                    indexer = PostIndexer(tag)
                    if indexer.slug not in tags_dict:
                        tags_dict[indexer.slug] = indexer
                for category in categories:
                    indexer = PostIndexer(category)
                    if indexer.slug not in categories_dict:
                        categories_dict[indexer.slug] = indexer
Ejemplo n.º 12
0
 def _parse_page(self):
     with open(self.fname, "r") as f:
         metadata, content = frontmatter.parse(f.read())
         self.metadata = metadata
         self.content = content
         self.file_ext = os.path.splitext(self.fname)[1]
         if not "date" in metadata:
             self.metadata["date"] = datetime.now()
Ejemplo n.º 13
0
    def process_file(self, path, file):
        metadata, contents = parse(file['contents'].read())

        if metadata.pop('path', None) or metadata.pop('collections', None):
            self.log_error('{}: path and collections are reserved keywords'.format(path))

        file.update(metadata)
        file['contents'] = io.BytesIO(bytes(contents, 'UTF-8'))
Ejemplo n.º 14
0
def main():
    with open('build.yaml') as f:
        m, _ = frontmatter.parse(f.read())

        code = """
        pandoc default.yaml -i {} --bibliography=mwc_growth.bib --filter=pandoc-eqnos --filter=pandoc-crossref -o {}.pdf
        """.format(m['include'], m['name'])
        os.system(code)
Ejemplo n.º 15
0
    def process_file(self, path, file):
        metadata, contents = parse(file["contents"].read())

        if metadata.pop("path", None) or metadata.pop("collections", None):
            self.log_error("{}: path and collections are reserved keywords".format(path))

        file.update(metadata)
        file["contents"] = io.BytesIO(bytes(contents, "UTF-8"))
Ejemplo n.º 16
0
 def _list_types(text: TextIO,
                 filename: Optional[str]) -> Iterable[FileValue]:
     # log.debug(f"{text=} {filename=}")
     try:
         metadata, content = frontmatter.parse(text.read())
         yield FileValue(metadata["type"], filename)
     except (KeyError, yaml.parser.ParserError,
             json.decoder.JSONDecodeError):
         pass
Ejemplo n.º 17
0
def yaml_parse_file(args, initial_content):
    message = None

    if args.file and args.file != "-":
        if not os.path.isfile(args.file):
            raise Exception("File %s does not exist!" % args.file)
        with open(args.file) as fp:
            message = fp.read()
    elif args.file == "-":
        message = sys.stdin.read()
    else:
        import tempfile
        from subprocess import Popen
        EDITOR = os.environ.get('EDITOR', 'vim')
        # prefix = ""
        # if "permlink" in initial_content.metadata:
        #   prefix = initial_content.metadata["permlink"]
        with tempfile.NamedTemporaryFile(
                suffix=b".md",
                prefix=b"steem-",
                delete=False
        ) as fp:
            # Write initial content
            fp.write(bytes(frontmatter.dumps(initial_content), 'utf-8'))
            fp.flush()
            # Define parameters for command
            args = [EDITOR]
            if re.match("gvim", EDITOR):
                args.append("-f")
            args.append(fp.name)
            # Execute command
            Popen(args).wait()
            # Read content of file
            fp.seek(0)
            message = fp.read().decode('utf-8')

    try:
        meta, body = frontmatter.parse(message)
    except:
        meta = initial_content.metadata
        body = message

    # make sure that at least the metadata keys of initial_content are
    # present!
    for key in initial_content.metadata:
        if key not in meta:
            meta[key] = initial_content.metadata[key]

    # Extract anything that is not steem-libs meta and return it separately
    # for json_meta field
    json_meta = {key: meta[key] for key in meta if key not in [
        "title",
        "category",
        "author"
    ]}

    return meta, json_meta, body
Ejemplo n.º 18
0
def parse_frontmatter(files, metadata):
    for path, file in files:
        if file["is_binary"]:
            yield path, file
            continue
        file_metadata, contents = frontmatter.parse(file["contents"])
        file.update(file_metadata)
        file["contents"] = contents.strip()
        yield path, file
Ejemplo n.º 19
0
def _file_metadata(file_abspath):
    with open(file_abspath) as input_fd:
        slug_name = os.path.basename(os.path.dirname(file_abspath))
        metadata, content = frontmatter.parse(input_fd.read())

        title = metadata['title']
        published_at = metadata['published_at']

        return (title, slug_name, published_at)
Ejemplo n.º 20
0
def main():
    opts, _ = getopt.getopt(sys.argv[1:], 'b:')
    with open('headers/' + opts[0][1]) as f:
        m, _ = frontmatter.parse(f.read())

        code = """
        pandoc headers/{} -i {} --bibliography=./mscl_refs.bib  --filter=pandoc-eqnos --columns 6 --filter=pandoc-crossref -o {}
        """.format(m['header'], m['include'], m['name'])
        os.system(code)
Ejemplo n.º 21
0
def yaml_parse_file(args, initial_content):
    message = None

    if args.file and args.file != "-":
        if not os.path.isfile(args.file):
            raise Exception("File %s does not exist!" % args.file)
        with open(args.file) as fp:
            message = fp.read()
    elif args.file == "-":
        message = sys.stdin.read()
    else:
        import tempfile
        from subprocess import Popen
        EDITOR = os.environ.get('EDITOR', 'vim')
        # prefix = ""
        # if "permlink" in initial_content.metadata:
        #   prefix = initial_content.metadata["permlink"]
        with tempfile.NamedTemporaryFile(
            suffix=b".md",
            prefix=b"piston-",
            delete=False
        ) as fp:
            # Write initial content
            fp.write(bytes(frontmatter.dumps(initial_content), 'utf-8'))
            fp.flush()
            # Define parameters for command
            args = [EDITOR]
            if re.match("gvim", EDITOR):
                args.append("-f")
            args.append(fp.name)
            # Execute command
            Popen(args).wait()
            # Read content of file
            fp.seek(0)
            message = fp.read().decode('utf-8')

    try :
        meta, body = frontmatter.parse(message)
    except:
        meta = initial_content.metadata
        body = message

    # make sure that at least the metadata keys of initial_content are
    # present!
    for key in initial_content.metadata:
        if key not in meta:
            meta[key] = initial_content.metadata[key]

    # Extract anything that is not piston meta and return it separately
    # for json_meta field
    json_meta = {key: meta[key] for key in meta if key not in [
        "title",
        "category",
        "author"
    ]}

    return meta, json_meta, body
Ejemplo n.º 22
0
 def build(self, build_info):
     for file_name, file_info in build_info.get_files_by_pattern(
             self._match_pattern):
         try:
             metadata, contents = frontmatter.parse(file_info.contents)
             file_info.metadata.update(metadata)
             file_info.contents = contents.encode()
         except Exception:
             logger.error(
                 "Error parsing frontmatter for {}".format(file_name))
Ejemplo n.º 23
0
def get_posts(folder='./_posts'):
    result = {}
    for filepath in glob.glob(folder + "/*"):
        filename = filepath.split('/')[-1]
        slug = filename[11:-3]
        post = frontmatter.parse(filepath)
        if "slug" in post.keys():
            slug = post["slug"]
        result[slug] = post['content'].replace('\n', ' ').replace('  ', ' ')
    return result
def verify_features(folder, valid_features):
    success = True
    for filename in Path(folder).glob("*.md"):
        with open(filename, "rt") as f:
            metadata, content = frontmatter.parse(f.read())
        features = metadata.get('features') or ()
        for feature in sorted(set(features) - valid_features):
            print(f"{filename}:0: Non-standard feature: {feature}")
            success = False
    return success
Ejemplo n.º 25
0
def parse_with_frontmatter(text):
    """ Parse, using the 'frontmatter' package. Reference function mostly.
    Note: The `frontmatter` package actually has a full "post" object class, which
    looks very similar to my `document` object.
    """
    import frontmatter
    from frontmatter import YAMLHandler
    # Unfortunately, frontmatter.parse doesn't have any way to determine if splitting gives an error (only YAML load).
    metadata, content = frontmatter.parse(text, handler=YAMLHandler)
    return metadata, content
Ejemplo n.º 26
0
def markdown_preview(request):
    if request.method == 'POST':
        assert 'text' in request.POST

        text = request.POST['text']
        metadata, content = frontmatter.parse(text)

        return render(request, 'playground/markdown_preview.html', {
            'content': content,
            'metadata': metadata
        })
Ejemplo n.º 27
0
def verify_contribute_not_present(folder):
    valid = True
    contribute = re.compile(r".*\n## Contribute", re.MULTILINE | re.DOTALL)
    for filename in Path(folder).glob("*.md"):
        with open(filename, "rt") as f:
            metadata, content = frontmatter.parse(f.read())
        board_id = metadata.get('board_id') or ()
        result = contribute.match(content)
        if result is not None:
            print(f"Contribute Section found for {board_id} in {folder}")
    return valid
Ejemplo n.º 28
0
def get_order_card(cardfile, default_sorted):
    pos_base = int32_max - len(default_sorted)
    with open(cardfile, 'r') as f:
        metadata, content = frontmatter.parse(f.read())
        if metadata and 'order' in metadata:
            order = metadata['order']
        elif metadata and 'Order' in metadata:
            order = metadata['Order']
        else:
            order = pos_base + default_sorted.index(cardfile)
    return order
Ejemplo n.º 29
0
    def _test_one_file(self, data_file, **kwargs):
        """Tests the conversion of the data in a testdata file.

        Args:
            data_file: str.  Basename of the file to be loaded.
            **kwargs: dict.  Keyword arguments to pass to convert().
        """
        markdown, gplus = self._load_data_file(data_file)
        metadata, content = frontmatter.parse(markdown)
        gplus_actual = converter.convert(metadata, content, **kwargs)
        self.assertListEqual(gplus.split("\n"), gplus_actual.split("\n"))
Ejemplo n.º 30
0
    def parse_md_metadata(self, md_string):
        self.logger.debug(f"Parsing markdown front matter meta-data")
        metadata, content = frontmatter.parse(md_string)
        if self._metadata_schema == ['']:
            self._metadata = metadata
            return content

        self._metadata = {
            key: value
            for key, value in metadata.items() if key in self._metadata_schema
        }
        return content
Ejemplo n.º 31
0
 def _load(self):
     with open(self.path) as f:
         metadata, content = frontmatter.parse(f.read())
     self.meta = metadata
     body = content
     self.parts = _split(body)
     self.hrefs = []
     self._hashtags = set()
     for parsable, part in self.parts:
         if parsable:
             self.hrefs.extend(_extract_hrefs(part))
             self._hashtags.update(_extract_hashtags(part))
Ejemplo n.º 32
0
    def register_markdown(self, markdown_filepath):
        markdown_filepath = pkg_resources.resource_filename(
            self._plugin.package, markdown_filepath)

        callable = MethodCallable.from_markdown(markdown_filepath,
                                                self._package)

        with open(markdown_filepath) as fh:
            metadata, source = frontmatter.parse(fh.read())

        self._register_callable(callable, metadata['name'],
                                metadata['description'], source)
Ejemplo n.º 33
0
 def from_markdown(cls, markdown):
     """
        Parameters
        ----------
        markdown : filepath
     """
     with open(markdown) as fh:
         metadata, workflow_template = frontmatter.parse(fh.read())
     inputs = metadata['inputs']
     outputs = metadata['outputs'].items()
     name = metadata['name']
     return cls(inputs, outputs, workflow_template, name)
Ejemplo n.º 34
0
    def register_markdown(self, markdown_filepath):
        markdown_filepath = pkg_resources.resource_filename(
            self._plugin.package, markdown_filepath)

        callable = MethodCallable.from_markdown(markdown_filepath,
                                                self._package)

        with open(markdown_filepath) as fh:
            metadata, source = frontmatter.parse(fh.read())

        self._register_callable(callable, metadata['name'],
                                metadata['description'], source)
Ejemplo n.º 35
0
def parse_frontmatter(doc):
    """
    Parse frontmatter as YAML. Set frontmatter on meta field, and
    remaining content on content field.

    If there is no frontmatter, will set an empty object on meta field,
    and leave content as-is.
    """
    meta, content = frontmatter.parse(doc.content)
    return doc._replace(
        meta=meta,
        content=content
    )
Ejemplo n.º 36
0
def get_metas(blog):
    all_metas = []
    files = [
        blog + f for f in os.listdir(blog)
        if f not in ["_index.md", "feed.xml"]
    ]
    for f in files:
        with open(f) as fx:
            meta, _ = fm.parse(fx.read())
            all_metas.append(meta)

    all_metas.sort(key=lambda x: x['date'], reverse=True)
    return all_metas
Ejemplo n.º 37
0
    def __init__(self, file_path: Text) -> None:
        self.file_path = file_path

        try:
            with open(file_path, encoding="utf-8") as f:
                self.file_content = f.read(MAX_FILE_SIZE)

                if f.read(1):
                    raise DevCliError(f'File "{file_path}" is too big')
        except IOError as e:
            raise DevCliError(f'Could not open "{file_path}": {e}')

        self.front_matter, self.markdown = frontmatter.parse(self.file_content)
Ejemplo n.º 38
0
    def get_metadata_content_tuple(self):
        """Tries to load the frontmatter data and content for the current action type for the operation
        """
        view_docs = formatting.dedent(self.view.__doc__)

        docs = list(map(lambda x: f'---{x}', view_docs.split('===')[1:]))
        method = self.method
        action = getattr(self.view, 'action_map', {}).get(method.lower(), None) if self.view.action_map else None
        for doc_bloc in docs:
            metadata, content = frontmatter.parse(doc_bloc)
            if not metadata:
                continue
            action_to_map = metadata.get('action', 'default')
            if action_to_map == action or (isinstance(action_to_map, list) and action in action_to_map):
                return metadata, content
        if action and hasattr(self.view, action):
            action_func = getattr(self.view, action, None)
            action_docs = formatting.dedent(action_func.__doc__)
            if '===' in action_docs:
                metadata, content = frontmatter.parse(action_docs.replace('===', '---'))
                return metadata, content
            return None, action_docs
        return None, view_docs
Ejemplo n.º 39
0
def main(sources: str, output_path: str):
    namespaces = defaultdict(Namespace)
    output_path = Path(output_path)
    for source in glob.glob(sources):
        print(source)
        with open(source, 'r', encoding='utf8') as f:
            header, _ = frontmatter.parse(f.read())
        if header['qsharp.kind'] == 'namespace':
            namespaces[
                header['qsharp.name']].summary = header['qsharp.summary']
            namespaces[header['qsharp.name']].name = header['qsharp.name']
            namespaces[header['qsharp.name']].uid = header['uid']
        else:
            namespaces[header['qsharp.namespace']].items.add(
                NamespaceItem(summary=header['qsharp.summary'],
                              name=header['qsharp.name'],
                              namespace=header['qsharp.namespace'],
                              uid=header["uid"],
                              kind=header["qsharp.kind"]))

    for namespace_name, namespace in namespaces.items():
        uid = namespace.uid or namespace_name
        name = namespace.name or namespace_name
        namespace_page = {
            "uid": uid,
            "name": name,
            "summary": namespace.summary,
            "operations": items_of_kind(namespace.items, "operation"),
            "functions": items_of_kind(namespace.items, "function"),
            "newtypes": items_of_kind(namespace.items, "udt")
        }

        with open(output_path / f"{name.lower()}.yml", "w",
                  encoding="utf8") as f:
            f.write(namespace_comment + warning_comment +
                    yaml.dump(namespace_page))

    toc_page = [{
        "uid":
        namespace.name,
        "name":
        namespace_name,
        "items": [{
            "name": item.name,
            "uid": item.uid
        } for item in sorted(namespace.items, key=lambda item: item.uid)]
    } for namespace_name, namespace in sorted(namespaces.items(),
                                              key=lambda pair: pair[0])]
    with open(output_path / "toc.yml", "w", encoding="utf8") as f:
        f.write(warning_comment + yaml.dump(toc_page))
Ejemplo n.º 40
0
    def _from_markdown(self, markdown_filepath, plugin_name):
        with open(markdown_filepath) as fh:
            metadata, template = frontmatter.parse(fh.read())

        input_types = collections.OrderedDict()
        for input_ in metadata['inputs']:
            # TODO validate each nested dict has exactly two items
            name, type_tuple = list(input_.items())[0]
            input_types[name] = self._split_type_tuple(type_tuple, 'semantic')

        param_types = collections.OrderedDict()
        for parameter in metadata['parameters']:
            # TODO validate each nested dict has exactly two items
            name, type_tuple = list(parameter.items())[0]
            param_types[name] = self._split_type_tuple(type_tuple, 'primitive')
        output_types = collections.OrderedDict()
        for output in metadata['outputs']:
            # TODO validate each nested dict has exactly two items
            name, type_tuple = list(output.items())[0]
            output_types[name] = self._split_type_tuple(type_tuple, 'semantic')

        signature = qtype.Signature(input_types, param_types, output_types)

        # TODO: verify that `id_` is a valid Python identifier
        id_ = os.path.splitext(os.path.basename(markdown_filepath))[0]

        # TODO handle default values for optional parameters when that's
        # supported
        function_def_line = 'def %s(%s, %s):' % (id_,
                                                 ', '.join(input_types),
                                                 ', '.join(param_types))
        indent = ' ' * 4
        function_body = ipymd.convert(template, from_='markdown', to='python')
        function_body = textwrap.indent(function_body, indent)
        function_return_line = '%sreturn %s' % (indent,
                                                ', '.join(output_types))

        function_str = '\n'.join([function_def_line,
                                  function_body,
                                  function_return_line])

        scope = {}
        exec(function_str, scope)
        function = scope[id_]

        name = metadata['name']
        description = metadata['description']

        self._init(id_, signature, function, ('markdown', markdown_filepath),
                   name, description, template, plugin_name)
Ejemplo n.º 41
0
    def get_source(
            self, environment: Environment,
            template: str) -> Tuple[str, str, Optional[Callable[[], bool]]]:
        """Superclass override (https://jinja.palletsprojects.com/en/3.0.x/api/#loaders)

        When told to find a template with name `foo.html.jinja2`, will attempt to find a template
        with name `foo.md` and wrangle it into Jinja format.

        Raises:
            TemplateNotFound: [description]

        Returns:
            Tuple[str,str,Optional[Callable[[],bool]]]: (source, filename, is_uptodate);
                `source` is the Jinja template source,
                `filename` is the path to the file that Jinja can use for stack
                traces,
                `is_uptodate` (if provided) is used for template reloading; if
                it returns `False` then the template is reloaded.
        """

        template = template.replace("/", os.sep)
        # `template` (as given in arguments) is a Jinja path (/ on all paths)
        # from hereon we can assume it is an OS-compatible path.

        if self.prefix_allowlist is None or not template.startswith(
                self.prefix_allowlist):
            raise TemplateNotFound(template)

        filename = os.path.join(self.searchpath,
                                removesuffix(template, ".html.jinja2") + ".md")
        if os.path.exists(filename):
            with open(filename, encoding='utf-8') as fd:
                metadata, content = frontmatter.parse(fd.read())
                # NB: readlines() returns a list of lines WITH \n at the end

                title = metadata["title"]

            source = ("""
            {% extends "article.html.jinja2" %}
            {% block title %}""" + title + """{% endblock title %}
            {% set parts | split_lede %}{% filter markdown() %}{% raw -%}""" +
                      content + """{% endraw %}{% endfilter %}{% endset %}
            {% block lede %}{{ parts.lede }}{% endblock lede %}
            {% block text %}{{ parts.text }}{% endblock text %}
            """)

            return (source, filename, None)
            # TODO: add 3rd tuple argument for autoreloading
        else:
            raise TemplateNotFound(template)
Ejemplo n.º 42
0
    def _from_markdown(self, markdown_filepath):
        with open(markdown_filepath) as fh:
            metadata, template = frontmatter.parse(fh.read())

        input_types = collections.OrderedDict()
        for input_ in metadata["inputs"]:
            # TODO validate each nested dict has exactly two items
            name, type_expr = list(input_.items())[0]
            input_types[name] = self._parse_semantic_type(type_expr)

        param_types = collections.OrderedDict()
        for parameter in metadata["parameters"]:
            # TODO validate each nested dict has exactly two items
            name, type_expr = list(parameter.items())[0]
            param_types[name] = self._parse_primitive_type(type_expr)

        output_types = collections.OrderedDict()
        for output in metadata["outputs"]:
            # TODO validate each nested dict has exactly two items
            name, type_expr = list(output.items())[0]
            output_types[name] = self._parse_semantic_type(type_expr)

        signature = qiime.sdk.Signature(input_types, param_types, output_types)

        # TODO: verify that `id_` is a valid Python identifier
        id_ = os.path.splitext(os.path.basename(markdown_filepath))[0]

        # TODO handle default values for optional parameters when that's
        # supported
        function_def_line = "def %s(%s, %s):" % (id_, ", ".join(input_types), ", ".join(param_types))
        indent = " " * 4
        function_body = ipymd.convert(template, from_="markdown", to="python")
        function_body = textwrap.indent(function_body, indent)
        function_return_line = "%sreturn %s" % (indent, ", ".join(output_types))

        function_str = "\n".join([function_def_line, function_body, function_return_line])

        scope = {}
        exec(function_str, scope)
        function = scope[id_]

        name = metadata["name"]
        description = metadata["description"]

        self._init(id_, signature, function, ("markdown", markdown_filepath), name, description, template)
Ejemplo n.º 43
0
    def test_register_function_and_workflow(self):
        self.assertEqual(self.plugin.workflows, {})

        self.plugin.register_function(
            name='Dummy function',
            function=dummy_function,
            inputs={},
            parameters={},
            outputs=[('answer', TestType)],
            doc='Computes the answer to life, the universe, and everything'
        )

        with unittest.mock.patch.object(pkg_resources, 'resource_filename',
                                        return_value=self.markdown_fp):
            self.plugin.register_workflow(self.markdown_fp)

        workflows = {
            'dummy_function':
                Workflow(
                    signature=Signature(
                        name='Dummy function',
                        inputs={},
                        parameters={},
                        outputs=collections.OrderedDict([('answer',
                                                         (TestType, list))])),
                    template=expected_dummy_function_template,
                    id_='dummy_function'
                ),
            'dummy_markdown_workflow':
                Workflow(
                    signature=Signature(
                        name='Dummy markdown workflow',
                        inputs={},
                        parameters={'param1': (Int, int),
                                    'param2': (Int, int)},
                        outputs=collections.OrderedDict([('the_sum',
                                                         (TestType, list))])),
                    template=frontmatter.parse(markdown_template)[1],
                    id_='dummy_markdown_workflow'
                )
        }

        self.assertEqual(self.plugin.workflows, workflows)
Ejemplo n.º 44
0
    def test_register_workflow(self):
        self.assertEqual(self.plugin.workflows, {})

        with unittest.mock.patch.object(pkg_resources, 'resource_filename',
                                        return_value=self.markdown_fp):
            self.plugin.register_workflow(self.markdown_fp)

        workflows = {
            'dummy_markdown_workflow':
                Workflow(
                    signature=Signature(
                        name='Dummy markdown workflow',
                        inputs={'param1': Int, 'param2': Int},
                        outputs=collections.OrderedDict([('the_sum', Int)])),
                    template=frontmatter.parse(markdown_template)[1],
                    id_='dummy_markdown_workflow'
                )
        }

        self.assertEqual(self.plugin.workflows, workflows)
Ejemplo n.º 45
0
def yaml_parse_file(args, initial_content):
    message = None

    if args.file and args.file != "-":
        if not os.path.isfile(args.file):
            raise Exception("File %s does not exist!" % args.file)
        with open(args.file) as fp:
            message = fp.read()
    elif args.file == "-":
        message = sys.stdin.read()
    else:
        import tempfile
        from subprocess import call
        EDITOR = os.environ.get('EDITOR', 'vim')
        prefix = ""
        if "permlink" in initial_content.metadata:
            prefix = initial_content.metadata["permlink"]
        with tempfile.NamedTemporaryFile(
            suffix=b".md",
            prefix=bytes("piston-" + prefix, 'ascii'),
            delete=False
        ) as fp:
            fp.write(bytes(frontmatter.dumps(initial_content), 'utf-8'))
            fp.flush()
            call([EDITOR, fp.name])
            fp.seek(0)
            message = fp.read().decode('utf-8')

    try :
        meta, body = frontmatter.parse(message)
    except:
        meta = initial_content.metadata
        body = message

    # make sure that at least the metadata keys of initial_content are
    # present!
    for key in initial_content.metadata:
        if key not in meta:
            meta[key] = initial_content.metadata[key]

    return meta, body
Ejemplo n.º 46
0
    def test_from_markdown(self):
        workflow = Workflow.from_markdown(self.markdown_fp)

        expected = Workflow(
            signature=Signature(
                name='Dummy markdown workflow',
                inputs={
                    'input1': DummyType,
                    'input2': DummyType,
                    'param1': Int,
                    'param2': Int,
                },
                outputs=collections.OrderedDict([
                    ('concatenated_inputs', DummyType)
                ])
            ),
            template=frontmatter.parse(markdown_template)[1],
            id_='dummy_markdown_workflow'
        )

        self.assertEqual(workflow, expected)
Ejemplo n.º 47
0
    def test_from_markdown(self):
        workflow = Workflow.from_markdown(self.markdown_fp)

        expected = Workflow(
            signature=Signature(
                name='Dummy markdown workflow',
                inputs={
                    'input1': (qiime.core.testing.TestType, list),
                    'input2': (qiime.core.testing.TestType, list),
                },
                parameters={
                    'param1': (qiime.plugin.Int, int),
                    'param2': (qiime.plugin.Int, int),
                },
                outputs=collections.OrderedDict([
                    ('concatenated_inputs',
                     (qiime.core.testing.TestType, list))
                ])
            ),
            template=frontmatter.parse(markdown_template)[1],
            id_='dummy_markdown_workflow'
        )

        self.assertEqual(workflow, expected)
Ejemplo n.º 48
0
    def test_template(self):
        workflow = Workflow.from_markdown(self.markdown_fp)

        self.assertEqual(workflow.template,
                         frontmatter.parse(markdown_template)[1])