Esempio n. 1
0
def read_markdown(path):
    """Read a Markdown file.

    Returns `(front_matter, content)`, where `front_matter` is the unparsed TOML
    text at the beginning of the file, and `content` is the main Markdown
    content. The "+++" delimiters do not appear in either.

    """
    path = Path(path)

    with path.open('rt') as f:
        line = f.readline()
        assert line == '+++\n'

        front_matter = []

        while True:
            line = f.readline()
            if line == '+++\n':
                break

            front_matter.append(line)

        front_matter = ''.join(front_matter)
        content = f.read()

    return front_matter, content
Esempio n. 2
0
def rewrite_markdown(path, front_matter, content):
    "Rewrite a Markdown file with TOML front matter."

    path = Path(path)

    with path.make_tempfile(resolution='overwrite', mode='wt') as f:
        print('+++', file=f)
        print(front_matter, file=f)
        print('+++', file=f)
        print(content, end='', file=f)
Esempio n. 3
0
File: kzn.py Progetto: pkgw/vernon
    def to_model(self):
        p = Path(self.path)
        if p != p.absolute():
            raise Exception('the \"path\" item of the kzn-model configuration '
                            'must be an absolute path')

        return KZNModel(
            self.path,
            self.soln_number,
        )
Esempio n. 4
0
File: hpc.py Progetto: pkgw/vernon
def prep_and_image_ui(pre_args, settings, config):
    if not Path('Config.toml').exists():
        die('expected "Config.toml" in current directory')

    os.mkdir('preprays')
    os.mkdir('integrate')

    with open('preprays/tasks', 'wb') as tasks:
        subprocess.check_call(
            ['vernon', 'preprays', 'seed',
             '-c', 'Config.toml',
             '-g', str(config.preprays_n_col_groups)
            ],
            stdout = tasks,
        )

    masterid = config.launch_ljob('preprays', 'preprays')
    nextid = config.schedule_next_stage(
        'pandi_pr_assemble',
        pre_args + ['--stage=pr_assemble', '--previd=%s' % masterid],
        masterid,
    )

    print('Preprays ljob master ID:', masterid)
    print('Next-stage job ID:', nextid)

    with open('pandi_launch.log', 'wt') as log:
        print('Preprays ljob master ID:', masterid, file=log)
        print('Next-stage job ID:', nextid, file=log)
Esempio n. 5
0
    def from_serialized(cls, config_path, result_to_extract=None):
        """`result_to_extract` is a total lazy hack for the training tool."""
        import pytoml

        with Path(config_path).open('rt') as f:
            info = pytoml.load(f)

        inst = cls()
        inst.pmaps = []
        inst.rmaps = []
        extracted_info = None

        for subinfo in info['params']:
            inst.pmaps.append(mapping_from_dict(subinfo))

        for i, subinfo in enumerate(info['results']):
            if result_to_extract is not None and subinfo[
                    'name'] == result_to_extract:
                extracted_info = subinfo
                extracted_info['_index'] = i
            inst.rmaps.append(mapping_from_dict(subinfo))

        inst.n_params = len(inst.pmaps)
        inst.n_results = len(inst.rmaps)

        if result_to_extract is not None:
            return inst, extracted_info
        return inst
Esempio n. 6
0
def farm_out_to_ljob(argv):
    "ljob is a shell script"
    import os
    from pwkit.io import Path

    if 'TOP' not in os.environ:
        die('ljob command must be run with the environment variable $TOP set to a directory')

    ljob_support_dir = (Path(__file__).parent.parent / 'ljob_support').resolve()
    os.environ['LJOB_SUPPORT'] = str(ljob_support_dir)
    ljob_script = ljob_support_dir / 'ljob.sh'
    os.execv(str(ljob_script), argv)
Esempio n. 7
0
def basic_load(datadir, drop_metadata=True):
    """Load a directory of textual tables (such as training set data).

    **Call signature**

    *datadir*
      A path to a directory of textual tables; format described below.
    *drop_metadata* (default ``True``)
      If true, columns marked as metadata will be dropped from the returned
      table.
    Return value
      A :class:`pandas.DataFrame` of data, concatenating all of the input tables.

    The data format is documented in :ref:`make-training-set`. Briefly, each
    file in *datadir* whose name ends with ``.txt`` will be loaded as a table
    using :func:`pandas.read_table`. The recommended format is tab-separated
    values with a single header row. Column names should end in type
    identifiers such as ``(lin)`` to identify their roles, although this
    function ignores this information except to drop columns whose names end
    in ``(meta)`` if so directed.

    """
    datadir = Path(datadir)
    chunks = []

    for item in datadir.glob('*.txt'):
        chunks.append(pd.read_table(str(item)))

    data = pd.concat(chunks, ignore_index=True)

    if drop_metadata:
        # Drop `foo(mtea)` columns
        for col in data.columns:
            if col.endswith('(meta)'):
                del data[col]

    return data
Esempio n. 8
0
# Copyright 2018 Peter Williams <*****@*****.**>
# Licensed under the MIT License.
"""Some Python utilities for programmatically manipulating the website
content.

"""
import sys
from pwkit.io import Path

root = Path(__file__).parent
content = root / 'content'


def all_blog_posts():
    "Generate the paths to the Markdown files for all blog post."

    for year in content.glob('2???'):
        for post in year.glob('*.md'):
            if post.name.startswith('_'):
                continue

            yield post


def read_markdown(path):
    """Read a Markdown file.

    Returns `(front_matter, content)`, where `front_matter` is the unparsed TOML
    text at the beginning of the file, and `content` is the main Markdown
    content. The "+++" delimiters do not appear in either.