예제 #1
0
def init(ctx, dev, directory):
    """ Initialize new configuration directory.
    """
    from seed.runner.setting import discover_configs, generate_settings

    if directory:
        os.environ['SEED_CONF'] = directory

    directory, py, yaml = discover_configs()

    if directory and not os.path.exists(directory):
        os.makedirs(directory)

    py_contents, yaml_contents = generate_settings(dev)

    if os.path.isfile(yaml):
        click.confirm("File already exists at '%s', overwrite?" %
                      click.format_filename(yaml),
                      abort=True)

    with click.open_file(yaml, 'w') as fp:
        fp.write(yaml_contents)

    if os.path.isfile(py):
        click.confirm("File already exists at '%s', overwrite?" %
                      click.format_filename(py),
                      abort=True)

    with click.open_file(py, 'w') as fp:
        fp.write(py_contents)
예제 #2
0
def workflow_validate(ctx, file):
    """Validate given REANA specification file."""
    logging.debug('command: {}'.format(ctx.command_path.replace(" ", ".")))
    for p in ctx.params:
        logging.debug('{param}: {value}'.format(param=p, value=ctx.params[p]))
    try:
        load_reana_spec(click.format_filename(file))
        click.echo(
            click.style(
                'File {filename} is a valid REANA specification file.'.format(
                    filename=click.format_filename(file)),
                fg='green'))

    except ValidationError as e:
        logging.debug(traceback.format_exc())
        logging.debug(str(e))
        click.echo(click.style(
            '{0} is not a valid REANA specification:\n{1}'.format(
                click.format_filename(file), e.message),
            fg='red'),
                   err=True)
    except Exception as e:
        logging.debug(traceback.format_exc())
        logging.debug(str(e))
        click.echo(click.style(
            'Something went wrong when trying to validate {}'.format(file),
            fg='red'),
                   err=True)
예제 #3
0
def __scan(path, special, verbose, silent):
    scanner = Scanus()
    scanner.search_sp = special
    scanner.scan_directory(path)
    if not silent:
        click.echo("Scan directory: {}".format(path))
        if len(scanner.container_list):
            result_str = "Found {} containers.".format(
                len(scanner.container_list))
            if len(scanner.attach_list):
                result_str = result_str[:-1] + ", with {} attachments".format(
                    len(scanner.attach_list))
            click.echo(result_str)
        else:
            click.echo("Containers not found.", err=True)
        if verbose:
            for i, container in enumerate(scanner.container_list, start=1):
                click.echo("Container №{0}:".format(i))
                for i, file in enumerate(container, start=1):
                    click.echo("{:3d}. ".format(i) +
                               "%s" % click.format_filename(file))
            print("\nAttachments:")
            for i, attach in enumerate(scanner.attach_list, start=1):
                click.echo("{:3d}. ".format(i) +
                           "%s" % click.format_filename(attach))
    return (scanner.container_list, scanner.attach_list)
예제 #4
0
파일: test_utils.py 프로젝트: gambogi/click
def test_filename_formatting():
    assert click.format_filename(b'foo.txt') == 'foo.txt'
    assert click.format_filename(b'/x/foo.txt') == '/x/foo.txt'
    assert click.format_filename(u'/x/foo.txt') == '/x/foo.txt'
    assert click.format_filename(u'/x/foo.txt', shorten=True) == 'foo.txt'
    assert click.format_filename(b'/x/foo\xff.txt', shorten=True) \
        == u'foo\ufffd.txt'
예제 #5
0
def test_filename_formatting():
    assert click.format_filename(b'foo.txt') == 'foo.txt'
    assert click.format_filename(b'/x/foo.txt') == '/x/foo.txt'
    assert click.format_filename(u'/x/foo.txt') == '/x/foo.txt'
    assert click.format_filename(u'/x/foo.txt', shorten=True) == 'foo.txt'
    assert click.format_filename(b'/x/foo\xff.txt', shorten=True) \
        == u'foo\ufffd.txt'
예제 #6
0
def cli(target, ruleset, svg):
    '''Sligillum transforms writings into visages

    Target file is a common txt file containing text to be transformed.
    Ruleset file contains mapping of letters to numbers which represent
    side numbers of drawn polygons.
    Output consists of an svg file.
    '''
    target_stream = click.open_file(click.format_filename(target), 'r')
    ruleset_stream = click.open_file(click.format_filename(ruleset), 'r')
    pb = PrimaryBuilder()
    with target_stream as f:
        while True:
            chunk = f.readline()
            pb.add_to_letter_list(letter_reader(chunk))
            if not chunk:
                target_stream.close()
                pb.rarefy_letters(200)
                break
    with ruleset_stream as f:
        while True:
            chunk = f.read(1024)
            pb.add_to_rule_dict(ruleset_reader(chunk))
            if not chunk:
                ruleset_stream.close()
                pb.generate_substitute_list()
                pb.generate_substitute_zip()
                render_svg(click.format_filename(svg), pb)
                break
예제 #7
0
def cli(kernel, rootfs, driver, appfs, outfile):
    dic = [
        ("kernel", 0x200000, click.format_filename(kernel)),
        ("rootfs", 0x350000, click.format_filename(rootfs)),
        ("driver", 0xa0000, click.format_filename(driver)),
        ("appfs", 0x4a0000, click.format_filename(appfs)),
    ]
    outfile = click.format_filename(outfile)
    tmpfile = "tmp.bin"
    fullflash = open(tmpfile, 'wb')
    for name, size, filename in dic:
        buffersize = os.path.getsize(filename)
        if (size != buffersize):
            click.echo('Size mismatch. The provided %s has a size of %s, but it need to have the size %s ' % (name,
                                                                                                              buffersize,
                                                                                                              size))
            return

        part = open(filename, "rb")
        buffer = part.read(size)


        fullflash.write(buffer)
    cmd = "mkimage -A MIPS -O linux -T firmware -C none -a 0 -e 0 -n jz_fw -d " + tmpfile + " " + outfile
    os.system(cmd)
    os.remove(tmpfile)
예제 #8
0
    def update(self, new_data):
        """Append *new_data* to end of *datafile* (HDF5)."""
        if not isfile(self.path):
            click.echo(click.style(" * Creating data file: {}"
                                   .format(click.format_filename(self.path)),
                                   fg="green"))
            logging.info("New data file created at {}"
                         .format(click.format_filename(self.path)))

        store = pd.HDFStore(self.path, complevel=9, complib="zlib")

        if "/data" not in store.keys():
            data = pd.DataFrame()
        else:
            data = store['data']
            data.index.levels[0].tz = None

        new_data.index.levels[0].tz = None
        store['data'] = data.append(new_data)
        store.data.reset_index(inplace=True)
        store.close()

        # Log the changes that we just wrote
        rows = len(new_data)
        dates = len(new_data.index.levels[0])
        logging.info("Wrote {:,} rows ({:,} unique timestamps) to {}"
                     .format(rows, dates, click.format_filename(self.path)))
예제 #9
0
def init(ctx, directory):
    "Initialize new configuration directory."
    from sentry.runner.settings import discover_configs, generate_settings
    if directory is not None:
        os.environ['SENTRY_CONF'] = directory

    directory, py, yaml = discover_configs()

    # In this case, the config is pointing directly to a file, so we
    # must maintain old behavior, and just abort
    if yaml is None and os.path.isfile(py):
        # TODO: Link to docs explaining about new behavior of SENTRY_CONF?
        raise click.ClickException("Found legacy '%s' file, so aborting." % click.format_filename(py))

    if yaml is None:
        raise click.ClickException("DIRECTORY must not be a file.")

    if directory and not os.path.exists(directory):
        os.makedirs(directory)

    py_contents, yaml_contents = generate_settings()

    if os.path.isfile(yaml):
        click.confirm("File already exists at '%s', overwrite?" % click.format_filename(yaml), abort=True)

    with click.open_file(yaml, 'w') as fp:
        fp.write(yaml_contents)

    if os.path.isfile(py):
        click.confirm("File already exists at '%s', overwrite?" % click.format_filename(py), abort=True)

    with click.open_file(py, 'w') as fp:
        fp.write(py_contents)
예제 #10
0
def workflow_validate(ctx, file, environments, pull):  # noqa: D301
    """Validate workflow specification file.

    The `validate` command allows to check syntax and validate the reana.yaml
    workflow specification file.

    Examples: \n
    \t $ reana-client validate -f reana.yaml
    """
    logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
    for p in ctx.params:
        logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
    try:
        load_reana_spec(
            click.format_filename(file),
            skip_validate_environments=not environments,
            pull_environment_image=pull,
        )

    except ValidationError as e:
        logging.debug(traceback.format_exc())
        logging.debug(str(e))
        display_message(
            "{0} is not a valid REANA specification:\n{1}".format(
                click.format_filename(file), e.message),
            msg_type="error",
        )
    except Exception as e:
        logging.debug(traceback.format_exc())
        logging.debug(str(e))
        display_message(
            "Something went wrong when trying to validate {}".format(file),
            msg_type="error",
        )
예제 #11
0
def process(inputpath, outputpath, grayscale, width, height, classifier):
    # Loads image
    img = cv2.imread(inputpath)

    if img is None: return

    # fetch the roi's
    objects = detect(img, grayscale, width, height, classifier)

    # Save the new image
    filename_index = 0
    for object in objects:
        write_to_path = ''
        if path.isfile(outputpath):
            write_to_path = outputpath
        else:
            path_parts = inputpath.split("/")
            generated_filename = path_parts[len(path_parts) - 1]
            write_to_path = click.format_filename(
                outputpath) + generated_filename
            if len(objects) > 1:
                write_to_path = click.format_filename(outputpath) + str(
                    filename_index) + generated_filename
                filename_index += 1
        cv2.imwrite(write_to_path, object)
예제 #12
0
def configure(ctx, py, yaml, skip_backend_validation=False):
    """
    Given the two different config files, set up the environment.

    NOTE: Will only execute once, so it's safe to call multiple times.
    """
    global __installed
    if __installed:
        return

    # Make sure that our warnings are always displayed
    import warnings
    warnings.filterwarnings('default', '', Warning, r'^sentry')

    # Add in additional mimetypes that are useful for our static files
    # which aren't common in default system registries
    import mimetypes
    for type, ext in (
        ('application/json', 'map'),
        ('application/font-woff', 'woff'),
        ('application/font-woff2', 'woff2'),
        ('application/vnd.ms-fontobject', 'eot'),
        ('application/x-font-ttf', 'ttf'),
        ('application/x-font-ttf', 'ttc'),
        ('font/opentype', 'otf'),
    ):
        mimetypes.add_type(type, '.' + ext)

    from .importer import install

    if yaml is None:
        # `yaml` will be None when SENTRY_CONF is pointed
        # directly to a file, in which case, this file must exist
        if not os.path.exists(py):
            if ctx:
                raise click.ClickException("Configuration file does not exist. Use 'sentry init' to initialize the file.")
            raise ValueError("Configuration file does not exist at '%s'" % click.format_filename(py))
    elif not os.path.exists(yaml) and not os.path.exists(py):
        if ctx:
            raise click.ClickException("Configuration file does not exist. Use 'sentry init' to initialize the file.")
        raise ValueError("Configuration file does not exist at '%s'" % click.format_filename(yaml))

    os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry_config'

    install('sentry_config', py, DEFAULT_SETTINGS_MODULE)

    # HACK: we need to force access of django.conf.settings to
    # ensure we don't hit any import-driven recursive behavior
    from django.conf import settings
    hasattr(settings, 'INSTALLED_APPS')

    from .initializer import initialize_app, on_configure
    initialize_app({
        'config_path': py,
        'settings': settings,
        'options': yaml,
    }, skip_backend_validation=skip_backend_validation)
    on_configure({'settings': settings})

    __installed = True
예제 #13
0
def organize_images(dry_run, src_dir, dest_dir_base):
    src_dir = click.format_filename(src_dir)
    dest_dir_base = click.format_filename(dest_dir_base)
    click.echo("importing from source dir: %s" % src_dir)
    click.echo("organizing into dir: %s" % dest_dir_base)

    #moved = 0
    #skipped = 0


    def get_exif(fn):
        ret = {}
        try:
            i = Image.open(fn)
        except IOError, ioe:
            click.echo("could not open image file.")
            return {}, None

        #click.echo(i)
        if not hasattr(i, '_getexif'):
            click.echo("no _getexif ")
            return {}, None

        info = i._getexif()
        if not info:
            click.echo("_getexif returned nothing")
            return {}, None

        for tag, value in info.items():
            decoded = TAGS.get(tag, tag)
            ret[decoded] = value
        _, ext = os.path.splitext(fn)
        new_fn = hashlib.md5(i.tostring()).hexdigest() + ext.lower()
        return ret, new_fn
예제 #14
0
def datapull(tagfile, datafile, start, stepsize, steps, chunksize):
    """Extract PI data from TAGFILE into DATAFILE using Excel, PIDatalink, and python"""
    infile = click.format_filename(tagfile)
    outfile = click.format_filename(datafile)

    df_tags = pd.read_excel(infile)
    print("Generating timestamps...")
    startstamp = datetime.datetime.strptime(start, date_format_string)
    delta = datetime.timedelta(minutes=int(stepsize))
    timestamps = [startstamp + delta * i for i in range(0, int(steps) + 1)]
    tags = list(df_tags.loc[:, 'tag'])
    print("Generating PI Datalink formulas...")
    table = [[twa_formula_string(tag, ts, ts + delta) for tag in tags]
             for ts in timestamps]
    print("Generating DataFrame")
    df_data = pd.DataFrame(table, columns=tags)
    df_data['timestamps'] = timestamps
    df_data = df_data.set_index('timestamps')
    print("Writing output file...")
    writer = pd.ExcelWriter(outfile)
    df_data.to_excel(writer, sheet_name='Data')
    writer.save()
    writer.close()
    print("Evaluating formulas. This may take several minutes.")
    xlUpdate.xlUpdate(os.path.join(os.getcwd(), outfile))
    input("Press the enter key to exit.")
예제 #15
0
def accordRareVariantAnalysis(rootdir,
                              phenotype,
                              modelfile,
                              inputdir,
                              selectedsnp=None):
    """

    Run accordJP pipeline, "rare variant analysis"

    As of this moment, JYL -- FIXME

    Print INPUTDIR if the directory exists.

    """

    click.echo(click.format_filename(rootdir))
    click.echo(click.format_filename(inputdir))
    click.echo(click.format_filename(phenotype))
    click.echo(click.format_filename(modelfile))

    inputdir = rootdir + "/" + inputdir
    fullPath = os.path.abspath(inputdir)
    print("This is the full path:  " + fullPath)
    click.echo(click.format_filename(inputdir))

    accord.rareVariantAnalysis(fullPath, phenotype, modelfile, selectedsnp)
예제 #16
0
파일: init.py 프로젝트: Akashguharoy/sentry
def init(ctx, dev, directory):
    "Initialize new configuration directory."
    from sentry.runner.settings import discover_configs, generate_settings
    if directory is not None:
        os.environ['SENTRY_CONF'] = directory

    directory, py, yaml = discover_configs()

    # In this case, the config is pointing directly to a file, so we
    # must maintain old behavior, and just abort
    if yaml is None and os.path.isfile(py):
        # TODO: Link to docs explaining about new behavior of SENTRY_CONF?
        raise click.ClickException("Found legacy '%s' file, so aborting." % click.format_filename(py))

    if yaml is None:
        raise click.ClickException("DIRECTORY must not be a file.")

    if directory and not os.path.exists(directory):
        os.makedirs(directory)

    py_contents, yaml_contents = generate_settings(dev)

    if os.path.isfile(yaml):
        click.confirm("File already exists at '%s', overwrite?" % click.format_filename(yaml), abort=True)

    with click.open_file(yaml, 'w') as fp:
        fp.write(yaml_contents)

    if os.path.isfile(py):
        click.confirm("File already exists at '%s', overwrite?" % click.format_filename(py), abort=True)

    with click.open_file(py, 'w') as fp:
        fp.write(py_contents)
예제 #17
0
def extract(keys_input, bibtex_input, bibtex_output, verbose):
    lines = keys_input.readlines()
    citation_keys = (line.strip() for line in lines)
    if verbose:
        print("Read {} keys from {}".format(
            len(lines), click.format_filename(keys_input.name)))
    main_bib = load_bib(bibtex_input)
    if verbose:
        print("Read {} entries from {}".format(
            len(main_bib.entries), click.format_filename(bibtex_input.name)))

    out_bib = BibDatabase()
    species_pattern = re.compile(
        r'({\\textless}i{\\textgreater}\w.*?{\\textless}/i{\\textgreater})')
    for key in citation_keys:
        e = main_bib.entries_dict[key]
        title = e['title']
        groups = species_pattern.findall(title)
        for grp in groups:
            s = grp.replace('{\\textless}i{\\textgreater}',
                            '').replace('{\\textless}/i{\\textgreater}', '')
            s = '\\textit{\\uppercase{' + s[0] + '}' + s[1:] + '}'
            title = title.replace(grp, s)
        e['title'] = title
        out_bib.entries.append(e)
    if verbose:
        print("Writing {} entries to {}".format(
            len(out_bib.entries), click.format_filename(bibtex_output.name)))
    writer = BibTexWriter()
    bibtex_output.write(writer.write(out_bib))
예제 #18
0
def accordDoThePlottings(rootdir,
                         phenotype,
                         modelfile,
                         inputdir,
                         selectedsnp=None):
    """

    Run accordJP pipeline, "doing plot"

    As of this moment, JYL -- FIXME

    Print INPUTDIR if the directory exists.

    """

    click.echo(click.format_filename(rootdir))
    click.echo(click.format_filename(inputdir))
    click.echo(click.format_filename(phenotype))
    click.echo(click.format_filename(modelfile))

    inputdir = rootdir + "/" + inputdir
    fullPath = os.path.abspath(inputdir)
    print("This is the full path:  " + fullPath)
    click.echo(click.format_filename(inputdir))

    accord.getPlotting(fullPath, phenotype, modelfile, selectedsnp)
예제 #19
0
def main(source, force, name, quiet, verbose, destination, add_to_dash,
         add_to_global, icon, index_page):
    """
    Convert docs from SOURCE to Dash.app's docset format.
    """
    try:
        logging.config.dictConfig(
            create_log_config(verbose=verbose, quiet=quiet))
    except ValueError as e:
        click.secho(e.args[0], fg="red")
        raise SystemExit(1)

    if icon:
        icon_data = icon.read()
        if not icon_data.startswith(PNG_HEADER):
            log.error('"{}" is not a valid PNG image.'.format(
                click.format_filename(icon.name)))
            raise SystemExit(1)
    else:
        icon_data = None

    source, dest, name = setup_paths(source,
                                     destination,
                                     name=name,
                                     add_to_global=add_to_global,
                                     force=force)
    dt = parsers.get_doctype(source)
    if dt is None:
        log.error('"{}" does not contain a known documentation format.'.format(
            click.format_filename(source)))
        raise SystemExit(errno.EINVAL)
    docs, db_conn = prepare_docset(source, dest, name, index_page)
    doc_parser = dt(docs)
    log.info(('Converting ' + click.style('{parser_name}', bold=True) +
              ' docs from "{src}" to "{dst}".').format(
                  parser_name=dt.name,
                  src=click.format_filename(source),
                  dst=click.format_filename(dest)))

    with db_conn:
        log.info('Parsing documentation...')
        toc = doc_parser.add_toc(show_progressbar=not quiet)
        for entry in doc_parser.parse():
            db_conn.execute('INSERT INTO searchIndex VALUES (NULL, ?, ?, ?)',
                            entry.as_tuple())
            toc.send(entry)
        count = (
            db_conn.execute('SELECT COUNT(1) FROM searchIndex').fetchone()[0])
        log.info(
            ('Added ' +
             click.style('{count:,}', fg="green" if count > 0 else "red") +
             ' index entries.').format(count=count))
        toc.close()

    if icon_data:
        add_icon(icon_data, dest)

    if add_to_dash or add_to_global:
        log.info('Adding to dash...')
        os.system('open -a dash "{}"'.format(dest))
예제 #20
0
def tck_filter(ctx, workflow, tck, odf, opt):
    """Filters the tracking result.

    Available workflows: mrtrix_tcksift"""

    try:
        wf_mod = import_module('.workflows.' + workflow, package='trampolino')
    except SystemError:
        wf_mod = import_module('workflows.' + workflow)
    except ImportError as err:
        click.echo(workflow + ' is not a valid workflow.')
        sys.exit(1)
    wf_sub = wf_mod.create_pipeline(name='tck_post', opt=opt)
    wf = ctx.obj['workflow']
    if 'track' not in ctx.obj:
        wf_sub.inputs.inputnode.tck = click.format_filename(tck)
        wf_sub.inputs.inputnode.odf = click.format_filename(odf)
        wf.add_nodes([wf_sub])
    else:
        wf.add_nodes([wf_sub])
        wf.connect([
            (ctx.obj['track'], wf_sub, [("outputnode.tck", "inputnode.tck")]),
            (ctx.obj['track'], wf_sub, [("inputnode.odf", "inputnode.odf")])
        ])
    wf.connect([(wf_sub, ctx.obj['results'], [("outputnode.tck_post",
                                               "@tck_post")])])
    return workflow
예제 #21
0
파일: base.py 프로젝트: carlbordum/os2mo
def docs(open_browser, destdir):
    '''Build the documentation'''
    import webbrowser

    import sphinx.cmdline

    vuedoc_cmd = [
        os.path.join(frontenddir, 'node_modules', '.bin', 'vuedoc.md'),
        '--output',
        os.path.join(docsdir, 'vuedoc'),
    ] + [
        os.path.join(dirpath, file_name)
        for dirpath, dirs, file_names in os.walk(
            os.path.join(frontenddir, 'src'))
        for file_name in file_names if file_name.endswith('.vue')
    ]

    subprocess.check_call(vuedoc_cmd)

    if destdir:
        destdir = click.format_filename(destdir)
    else:
        destdir = os.path.join(topdir, 'docs', 'out', 'html')

    sphinx.cmdline.main(['-b', 'html', docsdir, destdir])

    if open_browser:
        webbrowser.get('default').open(click.format_filename(destdir))
예제 #22
0
def workflow_validate(ctx, file):  # noqa: D301
    """Validate workflow specification file.

    The `validate` command allows to check syntax and validate the reana.yaml
    workflow specification file.

    Examples: \n
    \t $ reana-client validate -f reana.yaml
    """
    logging.debug('command: {}'.format(ctx.command_path.replace(" ", ".")))
    for p in ctx.params:
        logging.debug('{param}: {value}'.format(param=p, value=ctx.params[p]))
    try:
        load_reana_spec(click.format_filename(file))
        click.echo(
            click.style(
                'File {filename} is a valid REANA specification file.'.format(
                    filename=click.format_filename(file)),
                fg='green'))

    except ValidationError as e:
        logging.debug(traceback.format_exc())
        logging.debug(str(e))
        click.echo(click.style(
            '{0} is not a valid REANA specification:\n{1}'.format(
                click.format_filename(file), e.message),
            fg='red'),
                   err=True)
    except Exception as e:
        logging.debug(traceback.format_exc())
        logging.debug(str(e))
        click.echo(click.style(
            'Something went wrong when trying to validate {}'.format(file),
            fg='red'),
                   err=True)
예제 #23
0
def cli(kernel, rootfs, driver, appfs, outfile):
    dic = [
        ("kernel", 0x200000, click.format_filename(kernel)),
        ("rootfs", 0x350000, click.format_filename(rootfs)),
        ("driver", 0xa0000, click.format_filename(driver)),
        ("appfs", 0x4a0000, click.format_filename(appfs)),
    ]
    outfile = click.format_filename(outfile)
    tmpfile = "tmp.bin"
    fullflash = open(tmpfile, 'wb')
    for name, size, filename in dic:
        buffersize = os.path.getsize(filename)
        if size < buffersize:
            click.echo('Size mismatch. The provided {} has a '
                       'size of {}, but it need to have the '
                       'size {}. Please try to free some '
                       'space!'.format(name, buffersize, size))
            return

        part = open(filename, "rb")
        buffer = part.read(size)
        fullflash.write(buffer)
        # Padding with zeros:
        if buffersize < size:
            padsize = size - buffersize
            for x in range(0, padsize):
                fullflash.write(bytearray.fromhex('00'))

    cmd = "mkimage -A MIPS -O linux -T firmware -C none -a 0 -e 0 -n jz_fw -d " + tmpfile + " " + outfile

    subprocess.check_output(cmd, shell=True)

    os.remove(tmpfile)

    click.echo('Firmware {} was successfully created!'.format(outfile))
예제 #24
0
def main_init(ctx,              # type: click.Context
              options,          # type: Dict[str, Any]
              force,            # type: bool
              index_url,        # type: str
              tag,              # type: Iterable[str]
              extra_index_url,  # type: Tuple[str]
              ):
    # type: (...) -> None
    """Initialize reqwire in the current directory."""
    if not force and options['directory'].exists():
        console.error('requirements directory already exists')
        ctx.abort()
    src_dir = reqwire.scaffold.init_source_dir(
        options['directory'], exist_ok=force, name=options['source_dir'])
    console.info('created {}', click.format_filename(str(src_dir)))

    build_dir = reqwire.scaffold.init_source_dir(
        options['directory'], exist_ok=force, name=options['build_dir'])
    console.info('created {}', click.format_filename(str(build_dir)))

    if not tag:
        tag = ('docs', 'main', 'qa', 'test')
    for tag_name in tag:
        filename = reqwire.scaffold.init_source_file(
            working_directory=options['directory'],
            tag_name=tag_name,
            extension=options['extension'],
            index_url=index_url,
            extra_index_urls=extra_index_url)
        console.info('created {}', click.format_filename(str(filename)))
예제 #25
0
def cli_fig_hyp(hypno, grid, color, outfile, dpi):
    """Create hypnogram figure from hypnogram file."""
    # File conversion :
    if hypno is not None:
        hypno = click.format_filename(hypno)
    if outfile is not None:
        outfile = click.format_filename(outfile)
        ext = os.path.splitext(outfile)[1][1:].strip().lower()
        if ext == '':
            outfile = outfile + '.png'
    else:
        outfile = hypno + '.png'
    # Load hypnogram
    hypno, sf_hyp = read_hypno(hypno)
    # Bad cases (e.g. EDF files from DreamBank.net)
    if sf_hyp < 1:
        mult = int(np.round(len(hypno) / sf_hyp))
        hypno = oversample_hypno(hypno, mult)
        sf_hyp = 1
    # Create figure
    write_fig_hyp(outfile,
                  hypno,
                  sf=sf_hyp,
                  tstartsec=0,
                  grid=grid,
                  ascolor=color,
                  dpi=dpi)
    print('Hypnogram figure saved to:', outfile)
예제 #26
0
def init(ctx, dev, directory):
    "Initialize new configuration directory."
    from sentry.runner.settings import discover_configs, generate_settings
    if directory is not None:
        os.environ['CLIMS_CONF'] = directory

    directory, py, yaml = discover_configs()

    if yaml is None:
        raise click.ClickException("DIRECTORY must not be a file.")

    if directory and not os.path.exists(directory):
        os.makedirs(directory)

    py_contents, yaml_contents = generate_settings(dev)

    if os.path.isfile(yaml):
        click.confirm(
            "File already exists at '%s', overwrite?" % click.format_filename(yaml), abort=True
        )

    with click.open_file(yaml, 'w') as fp:
        fp.write(yaml_contents)

    if os.path.isfile(py):
        click.confirm(
            "File already exists at '%s', overwrite?" % click.format_filename(py), abort=True
        )

    with click.open_file(py, 'w') as fp:
        fp.write(py_contents)
예제 #27
0
def grades(username, password, organization, assignment_prefix, student_file,
           grade_file):
    """Download all grades for an assignment."""
    click.echo("getting grades")
    gh = login(username=username, password=password)
    student_file_name = click.format_filename(student_file)
    grade_file_name = click.format_filename(grade_file)
    grade_counts = Counter()
    with open(grade_file_name, 'w', newline='') as csvfile:
        csvw = csv.writer(csvfile)
        csvw.writerow(['github_id', 'points', 'possible_points', 'run_url'])
        for student in open(student_file_name):
            student = student.strip()
            repo = gh.repository(organization,
                                 '%s-%s' % (assignment_prefix, student))
            run = next(next(repo.commits()).check_runs())
            grade = run.output.text.split()[1].split('/')
            numerator = int(grade[0])
            denominator = int(grade[1])
            csvw.writerow(((student, numerator, denominator, run.html_url)))
            print('\t'.join(
                (student, str(numerator), str(denominator), run.html_url)))
            grade_counts.update([numerator])
            time.sleep(.05)

    for grade, count in sorted(grade_counts.items()):
        print('%10s %s' % (grade, ''.join(['*'] * count)))
    return 0
예제 #28
0
def main(input):
    participantid = re.search(r'(Participant\d+)',
                              click.format_filename(input)).group(1)
    cleanedlogfile = grep_conflicts(click.format_filename(input))
    truetimes = true_timing(cleanedlogfile)
    print(returntimes_ms(returntimes_s, truetimes))
    print(cleanedlogfile)
예제 #29
0
def tck_convert(ctx, workflow, tck, ref, opt):
    """Convert tractograms.

    Available workflows: tck2tr, trk2tck"""

    try:
        wf_mod = import_module('.workflows.' + workflow, package='trampolino')
    except SystemError:
        wf_mod = import_module('workflows.' + workflow)
    except ImportError as err:
        click.echo(workflow + ' is not a valid workflow.')
        sys.exit(1)
    wf_sub = wf_mod.create_pipeline(name='tck_convert', opt=opt)
    wf = ctx.obj['workflow']
    if ref:
        wf_sub.inputs.inputnode.ref = click.format_filename(ref)
    if 'track' not in ctx.obj:
        wf_sub.inputs.inputnode.tck = click.format_filename(tck)
        wf.add_nodes([wf_sub])
    else:
        wf.add_nodes([wf_sub])
        wf.connect([(ctx.obj['track'], wf_sub, [("outputnode.tck",
                                                 "inputnode.tck")])])
    wf.connect([(wf_sub, ctx.obj['results'], [("outputnode.trk", "@trk")])])
    return workflow
예제 #30
0
def main(table, template, filename_field, extension, with_unidecode):
    '''
    Generate a number of text files using a database table with fields
    and values, and a template file.  Arguments:

        TABLE     the spreadsheet table with data to fill in the template
        TEMPLATE  a file containing the template, in jinja2 format
    '''
    table_filename = click.format_filename(table)
    template_filename = click.format_filename(template)
    if filename_field.isdigit():
        filename_field = int(filename_field)
    if extension is None:
        extension = template_filename.split('.')[-1]

    env = jinja2.Environment(loader=jinja2.FileSystemLoader('.'))
    template = env.get_template(template_filename)

    # if table_filename.lower().endswith('.org'):
    instances = load_org_table(table_filename)

    if isinstance(filename_field, int):
        filename_field_fn = lambda x: list(x.values())[filename_field]
    else:
        filename_field_fn = lambda x: x[filename_field]

    for instance in instances:
        output_basename = filename_field_fn(instance)
        # output_basename = filter_de2ascii(output_basename)
        if with_unidecode:
            output_basename = unidecode(output_basename)
        output_filename = '{0}.{1}'.format(output_basename, extension)
        with open(output_filename, 'w') as output_file:
            output_file.write(template.render(instance))
예제 #31
0
파일: utils.py 프로젝트: All-less/exp-kit
def parse_json_file(path):
    try:
        with open(path, 'r') as f:
            return json.load(f)
    except FileNotFoundError:
        error('{} not exists.'.format(click.format_filename(path)))
    except json.decoder.JSONDecodeError as e:
        error('Parsing json file {} failed. {}'.format(click.format_filename(path), e))
예제 #32
0
def db_ls_dirs_command():
    """List dirs in the database."""
    db, ds = AppDB.get_db()

    for n in ds['dirs'].all():
        click.echo('%s' % click.format_filename(n['abs_path']))
        click.echo('\t%s\n' % click.format_filename(json.dumps(n)))
    return
예제 #33
0
def setup_files(agent_helper, best=False):
    """Setup result files and paths"""
    if agent_helper.gen_scenario_test:
        agent_helper.gen_scenario_result_base_path = agent_helper.result_base_path
        agent_helper.sim_config_path = click.format_filename(agent_helper.gen_scenario)
    else:
        agent_helper.sim_config_path = click.format_filename(agent_helper.sim_config_path)

    agent_helper.agent_config_path = click.format_filename(agent_helper.agent_config_path)
    agent_helper.network_path = click.format_filename(agent_helper.network_path)
    agent_helper.service_path = click.format_filename(agent_helper.service_path)

    # set result and graph base path based on network, service, config name
    base_path = get_base_path(agent_helper.agent_config_path, agent_helper.network_path,
                              agent_helper.service_path, agent_helper.sim_config_path)
    agent_helper.result_base_path = f"./results/{base_path}"
    agent_helper.graph_base_path = f"./graph/{base_path}"

    # Set config and log file paths
    agent_helper.config_dir = f"{agent_helper.result_base_path}/{agent_helper.experiment_id}/"
    agent_helper.logfile = f"{agent_helper.config_dir}training.log"
    agent_helper.graph_path = f"{agent_helper.graph_base_path}/{agent_helper.experiment_id}"
    if agent_helper.test and not agent_helper.append_test:
        # Set paths used for test
        if best:
            agent_helper.config_dir = f"{agent_helper.result_base_path}/best/{agent_helper.test}" \
                                      f"/test-{DATETIME}_seed{agent_helper.seed}/"
            agent_helper.graph_path = f"{agent_helper.graph_base_path}/{agent_helper.test}" \
                                      f"/test-{DATETIME}_seed{agent_helper.seed}_best/"
        else:
            agent_helper.config_dir = f"{agent_helper.result_base_path}/{agent_helper.test}" \
                                      f"/test-{DATETIME}_seed{agent_helper.seed}/"
            agent_helper.graph_path = f"{agent_helper.graph_base_path}/{agent_helper.test}" \
                                      f"/test-{DATETIME}_seed{agent_helper.seed}/"
        agent_helper.logfile = f"{agent_helper.config_dir}test.log"
        agent_helper.weights_path = f"{agent_helper.result_base_path}/{agent_helper.test}/weights"
    if agent_helper.append_test:
        # reset append test flag so that next time setup_files is called result files are set properly for tests
        agent_helper.append_test = False
    agent_helper.result.log_file = agent_helper.logfile
    agent_helper.result_file = f"{agent_helper.config_dir}result.yaml"
    # FIXME: Logging setup has to be done here for now. Move to a proper location
    global logger
    logger = setup_logging(agent_helper.verbose, agent_helper.logfile)

    # Copy files to result dir
    agent_helper.agent_config_path, agent_helper.network_path, agent_helper.service_path, \
        agent_helper.sim_config_path = copy_input_files(
            agent_helper.config_dir,
            agent_helper.agent_config_path,
            agent_helper.network_path,
            agent_helper.service_path,
            agent_helper.sim_config_path)

    if agent_helper.gen_scenario_test:
        weights = f"{agent_helper.gen_scenario_result_base_path}/{agent_helper.test}/weights*"
        for file in glob.glob(r'{}'.format(weights)):
            copy(file, f"{agent_helper.result_base_path}/{agent_helper.test}/")
예제 #34
0
def odf_track(ctx, workflow, odf, seed, algorithm, angle, angle_range,
              min_length, ensemble, opt):
    """Reconstructs the streamlines.

    Available workflows: mrtrix_tckgen"""

    try:
        wf_mod = import_module('.workflows.' + workflow, package='trampolino')
    except SystemError:
        wf_mod = import_module('workflows.' + workflow)
    except ImportError as err:
        click.echo(workflow + ' is not a valid workflow.')
        sys.exit(1)
    param = pe.Node(interface=util.IdentityInterface(
        fields=["angle", "algorithm", "min_length"]),
                    name="param_node")
    param_dict = {}
    if angle or algorithm or min_length:
        param.iterables = []
    if angle:
        angles = angle.split(',')
        param_dict['angle'] = [int(a) for a in angles if a.isdigit()]
        if angle_range:
            param_dict['angle'] = range(param_dict['angle'][0],
                                        param_dict['angle'][-1])
        param.iterables.append(('angle', param_dict['angle']))
    if algorithm:
        param_dict['algorithm'] = algorithm.split(',')
        param.iterables.append(('algorithm', param_dict['algorithm']))
    if min_length:
        lengths = min_length.split(',')
        param_dict['min_length'] = [int(l) for l in lengths if l.isdigit()]
        param.iterables.append(('min_length', param_dict['min_length']))
    wf_sub = wf_mod.create_pipeline(name='tck', opt=opt, ensemble=ensemble)
    if ensemble:
        param.iterables.remove((ensemble, param_dict[ensemble]))
        setattr(wf_sub.inputs.inputnode, ensemble, param_dict[ensemble])
    wf = ctx.obj['workflow']
    if seed:
        wf_sub.inputs.inputnode.seed = click.format_filename(seed)
    if 'recon' not in ctx.obj:
        wf_sub.inputs.inputnode.odf = click.format_filename(odf)
        wf.add_nodes([wf_sub])
    else:
        wf.add_nodes([wf_sub])
        wf.connect([(ctx.obj['recon'], wf_sub, [("outputnode.odf",
                                                 "inputnode.odf")])])
        if not seed:
            wf.connect([(ctx.obj['recon'], wf_sub, [("outputnode.seed",
                                                     "inputnode.seed")])])
    if param.iterables:
        for p in param.iterables:
            wf.connect([(param, wf_sub, [(p[0], "inputnode." + p[0])])])
    wf.connect([(wf_sub, ctx.obj['results'], [("outputnode.tck", "@tck")])])
    ctx.obj['track'] = wf_sub
    ctx.obj['param'] = param
    return workflow
예제 #35
0
def main(readfile, writefile, dict):
    global output_string
    global use_hr_dict
    use_hr_dict = dict
    output_string += generate_project_start()
    MP.parse_xml(click.format_filename(readfile), set_staff_start, set_staff_end, set_time_signature, set_pitch, set_rest, set_lyric, set_tie, set_dot, set_tuplet)
    output_string += generate_staff_end()
    output_string += generate_project_end()
    write_to_file(click.format_filename(writefile), output_string)
예제 #36
0
def test_filename_formatting():
    assert click.format_filename(b'foo.txt') == 'foo.txt'
    assert click.format_filename(b'/x/foo.txt') == '/x/foo.txt'
    assert click.format_filename(u'/x/foo.txt') == '/x/foo.txt'
    assert click.format_filename(u'/x/foo.txt', shorten=True) == 'foo.txt'

    # filesystem encoding on windows permits this.
    if not WIN:
        assert click.format_filename(b'/x/foo\xff.txt', shorten=True) \
            == u'foo\ufffd.txt'
예제 #37
0
파일: bench.py 프로젝트: fhahn/casmi
def main(new_casmi, legacy_casmi, casm_compiler):
    vms = []

    if not os.path.exists(new_casmi):
        error_abort('File `%s` for new-casmi does not exist' % click.format_filename(new_casmi))
    else:
        vms.append(("new casmi", os.path.abspath(os.path.expanduser(new_casmi))))

    if legacy_casmi is not None:
        if not os.path.exists(legacy_casmi):
            error_abort('File `%s` for legacy-casmi does not exist' % click.format_filename(legacy_casmi))
        else:
            vms.append(("old casmi", os.path.abspath(os.path.expanduser(legacy_casmi))))

    if casm_compiler is not None:
        if not os.path.exists(legacy_casmi):
            error_abort('File `%s` for casm-compiler does not exist' % click.format_filename(casm_compiler))
        vms.append(('compiler', os.path.abspath(os.path.expanduser(casm_compiler))))

    for root, dirs, files in os.walk(bench_path):
        for bench_file in files:
            if not bench_file.endswith(".casm"): continue
            #click.echo("Running benchmark %s" % os.path.join(os.path.split(root)[1], bench_file))

            results = {}
            file_path = os.path.join(root, bench_file);
            sys.stdout.write(bench_file)
            sys.stdout.flush()

            for vm in vms:
 
                if vm[0] == 'compiler':
                    tmp_dir = tempfile.mkdtemp()
                    out_file = os.path.join(tmp_dir, 'out')
                    subprocess.call([casm_compiler, file_path, '-o', out_file, '-c'])
                    time = timeit.timeit('run_compiler("{}")'.format(out_file), setup="from __main__ import run_compiler", number=NUM_RUNS)
                    shutil.rmtree(tmp_dir)
                else:
                    time = timeit.timeit('run_script("{}", "{}")'.format(vm[1], file_path), setup="from __main__ import run_script", number=NUM_RUNS)

                #sys.stdout.write(" took %lf s\n" % (time))
                results[vm[0]] = time
                #dump_run(bench_file, vm, time)

            parts = [str(NUM_RUNS), str(results["new casmi"])]

            if 'old casmi' in results:
                parts.append(str(results["old casmi"]))
            if 'compiler' in results:
                parts.append(str(results["compiler"]))
            print(" ; "+" ; ".join(parts))


            """
예제 #38
0
def quickstart():
    """Quickstart wizard for setting up twtxt."""
    width = click.get_terminal_size()[0]
    width = width if width <= 79 else 79

    click.secho("twtxt - quickstart", fg="cyan")
    click.secho("==================", fg="cyan")
    click.echo()

    help_text = "This wizard will generate a basic configuration file for twtxt with all mandatory options set. " \
                "You can change all of these later with either twtxt itself or by editing the config file manually. " \
                "Have a look at the docs to get information about the other available options and their meaning."
    click.echo(textwrap.fill(help_text, width))

    click.echo()
    nick = click.prompt("➤ Please enter your desired nick", default=os.environ.get("USER", ""))

    def overwrite_check(path):
        if os.path.isfile(path):
            click.confirm("➤ '{0}' already exists. Overwrite?".format(path), abort=True)

    cfgfile = click.prompt("➤ Please enter the desired location for your config file",
                           os.path.join(Config.config_dir, Config.config_name),
                           type=click.Path(readable=True, writable=True, file_okay=True))
    cfgfile = os.path.expanduser(cfgfile)
    overwrite_check(cfgfile)

    twtfile = click.prompt("➤ Please enter the desired location for your twtxt file",
                           os.path.expanduser("~/twtxt.txt"),
                           type=click.Path(readable=True, writable=True, file_okay=True))
    twtfile = os.path.expanduser(twtfile)
    overwrite_check(twtfile)

    twturl = click.prompt("➤ Please enter the URL your twtxt file will be accessible from",
                          default="https://example.org/twtxt.txt")

    disclose_identity = click.confirm("➤ Do you want to disclose your identity? Your nick and URL will be shared when "
                                      "making HTTP requests", default=False)

    click.echo()
    add_news = click.confirm("➤ Do you want to follow the twtxt news feed?", default=True)

    conf = Config.create_config(cfgfile, nick, twtfile, twturl, disclose_identity, add_news)

    twtfile_dir = os.path.dirname(twtfile)
    if not os.path.exists(twtfile_dir):
        os.makedirs(twtfile_dir)
    open(twtfile, "a").close()

    click.echo()
    click.echo("✓ Created config file at '{0}'.".format(click.format_filename(conf.config_file)))
    click.echo("✓ Created twtxt file at '{0}'.".format(click.format_filename(twtfile)))
예제 #39
0
def validate_file(path, language, time_execution, show_errors):
    problem = get_problem_from_path(path)
    if problem is None:
        click.echo('Skipping %s because it does not contain '
                   'a valid problem ID' % click.format_filename(path))
        return

    if language is None:
        language = get_language_from_path(path) or {}

    click.echo('Checking output of %s: ' % click.format_filename(path),
               nl=False)
    result = verify_solution(path, language, time_execution, problem)
    print_result(result, show_errors, time_execution)
예제 #40
0
def process_tool_dependencies_xml(tool_dep, install_handle, env_sh_handle):
    """Writes to handles, returns success as a boolean."""
    if not os.path.isfile(tool_dep):
        error('Missing file %s' % tool_dep)
        return False
    if not os.stat(tool_dep).st_size:
        error('Empty file %s' % tool_dep)
        return False
    try:
        install, env = convert_tool_dep(tool_dep)
    except Exception as err:
        # TODO - pass in ctx for logging?
        error('Error processing %s - %s' %
              (click.format_filename(tool_dep), err))
        if not isinstance(err, (NotImplementedError, RuntimeError)):
            # This is an unexpected error, traceback is useful
            import traceback
            error(traceback.format_exc() + "\n")
        return False
    # Worked...
    for cmd in install:
        install_handle.write(cmd + "\n")
    for cmd in env:
        env_sh_handle.write(cmd + "\n")
    return True
예제 #41
0
파일: lsbranch.py 프로젝트: arrrlo/lsbranch
        def search_dir(path, recursive):
            for dirname, dirnames, filenames in os.walk(path):

                for subdirname in dirnames:
                    git_dir = os.path.join(dirname, subdirname, '.git')
                    full_path = click.format_filename(os.path.abspath(
                        os.path.join(dirname, subdirname)))

                    self._count_dirs += 1

                    if os.path.exists(git_dir):
                        branch = self._git_branch(git_dir)

                        if not branch:
                            continue

                        self._echo_branch(full_path, branch)

                        self._save(full_path, branch)
                    else:
                        self._echo_dir(full_path)

                        if recursive and subdirname[0] != '.':
                            search_dir(os.path.join(dirname, subdirname),
                                       recursive)
                break
예제 #42
0
def render_template(path, context):
    try:
        return make_template(path).render(context)
    except jinja2.TemplateError as e:
        # TODO: Only show this on verbose mode
        print(traceback.print_exc())
        error(six.text_type(e) + u' in template ' + click.format_filename(path))
예제 #43
0
def archive(skip_empty, skip_hidden, compress, startpath, dstnfile, extensions):
    click.echo(click.format_filename(dstnfile))
    if os.path.exists(dstnfile):
        click.echo('Error: Cannot overrwrite existing destination file {}.'
                   .format(dstnfile))
        return False
    return do_tar(startpath, dstnfile, compress, skip_empty, skip_hidden, extensions)
예제 #44
0
def diff(filename, first_page, second_page):
    from .parsing import Book

    click.secho('Reading: ' + click.format_filename(filename))

    book = Book(filename)

    if second_page is None:
        second_page = first_page + 1

    if first_page >= book.npages or second_page >= book.npages:
        raise click.ClickException(
            'Page out of bounds (0, {})'.format(book.npages - 1))

    old_page, new_page = book.page(first_page), book.page(second_page)

    click.echo('Page {}: {} entries'.format(first_page, len(old_page.entries)))
    click.echo('Page {}: {} entries'.format(second_page, len(new_page.entries)))

    added, removed, unchanged = old_page.diff(new_page)

    click.echo('\nAdded: {}'.format(added))
    for id_ in added:
        click.secho('{}\t{}'.format(id_, new_page.entries_by_id[id_][0]['title']), fg='green')

    click.echo('\nRemoved: {}'.format(removed))
    for id_ in removed:
        click.secho('{}\t{}'.format(id_, old_page.entries_by_id[id_][0]['title']), fg='red')

    click.echo('\nUnchanged: {}'.format(removed))
    for id_ in unchanged:
        click.secho('{}\t{}\t|\t{}'.format(
            id_, old_page.entries_by_id[id_][0]['title'], new_page.entries_by_id[id_][0]['title']))
예제 #45
0
def proselint(files=None, version=None, initialize=None,
              debug=None, score=None, json=None, time=None, demo=None):
    """Define the linter command line API."""
    if time:
        click.echo(timing_test())
        return

    # Run the intialization.
    if initialize:
        run_initialization()
        return

    if score:
        click.echo(lintscore())
        return

    # In debug mode, delete the cache and *.pyc files before running.
    if debug:
        clear_cache()

    # Use the demo file by default.
    if demo:
        files = [click.open_file(demo_file, encoding='utf8')]

    for f in files:
        errors = lint(f, debug=debug)
        show_errors(click.format_filename(f.name), errors, json)
예제 #46
0
파일: cli.py 프로젝트: timofurrer/twtxt
def quickstart():
    """Quickstart wizard for setting up twtxt."""
    width = click.get_terminal_size()[0]
    width = width if width <= 79 else 79

    click.secho("twtxt - quickstart", fg="cyan")
    click.secho("==================", fg="cyan")
    click.echo()

    help_text = "This wizard will generate a basic configuration file for twtxt with all mandatory options set. " \
                "Have a look at the README.rst to get information about the other available options and their meaning."
    click.echo(textwrap.fill(help_text, width))

    click.echo()
    nick = click.prompt("➤ Please enter your desired nick", default=os.environ.get("USER", ""))
    twtfile = click.prompt("➤ Please enter the desired location for your twtxt file", "~/twtxt.txt", type=click.Path())
    disclose_identity = click.confirm("➤ Do you want to disclose your identity? Your nick and URL will be shared", default=False)

    click.echo()
    add_news = click.confirm("➤ Do you want to follow the twtxt news feed?", default=True)

    conf = Config.create_config(nick, twtfile, disclose_identity, add_news)
    open(os.path.expanduser(twtfile), "a").close()

    click.echo()
    click.echo("✓ Created config file at '{0}'.".format(click.format_filename(conf.config_file)))
예제 #47
0
파일: cli.py 프로젝트: oryband/open-audit
def paragraphs_by_attr(document, attr_func, verbose):
    """return an attribute from all paragraphs according to given function.

    the attribute function is used for fetching the attribute from every
    paragraph.
    """
    if verbose:
        click.echo('opening %s' % click.format_filename(document), err=True)

    doc = Document(document)
    attrs = set()

    l = len(doc.paragraphs)
    if verbose:
        click.echo('total of %d paragraphs, starting' % l, err=True)
    for i, p in enumerate(doc.paragraphs, start=1):
        attrs |= attr_func(p)

        if verbose and i % 1000 == 0:
            click.echo('processed %d paragraphs' % i, err=True)

    if verbose:
        click.echo('processing complete, total %d paragraphs' % (i+1),
                   err=True)

    return attrs
예제 #48
0
파일: unp.py 프로젝트: lowks/unp
def cli(files, silent, output, dump_command, forced_unpacker):
    """unp is a super simple command line application that can unpack a lot
    of different archives.  No matter if you unpack a zip or tarball, the
    syntax for doing it is the same.  Unp will also automatically ensure
    that the unpacking goes into a single folder in case the archive does not
    contain a wrapper directory.  This guarantees that you never accidentally
    spam files into your current working directory.

    Behind the scenes unp will shell out to the most appropriate application
    based on filename or guessed mimetype.
    """
    if output is None:
        output = '.'

    unpackers = []

    for filename in files:
        filename = os.path.realpath(filename)
        if not os.path.isfile(filename):
            raise click.UsageError('Could not find file "%s".' %
                                   click.format_filename(filename))
        if forced_unpacker is not None:
            unpacker_cls = forced_unpacker
        else:
            unpacker_cls = get_unpacker_class(filename)
        unpackers.append(unpacker_cls(filename, silent=silent))

    for unpacker in unpackers:
        if dump_command:
            unpacker.dump_command(output)
        else:
            unpacker.unpack(output)
예제 #49
0
def cli(debug, logfile, quiet):
    """Route53 Manager.

    This tool assists in managing Route53 zones. There are 2 basic modes of operations:\n
     * import - imports zone content from a file, a zone XFER, or Route53. Output is
     written to to stdout or a YAML file if filename is specified.\n
     * apply - applies the content of a specified YAML file to a Route53 zone.
    """
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

    # quiet overrides other options
    if quiet:
        debug = False
    if logfile:
        log.info('logging to {}'.format(click.format_filename(logfile)))
        fh = logging.FileHandler(logfile)
        fh.setFormatter(formatter)
        log.addHandler(fh)
    if sys.stdout.isatty() and not quiet:
        log.info('Detected console - copying logs to stdout')
        ch = logging.StreamHandler()
        ch.setFormatter(formatter)
        log.addHandler(ch)
    if debug:
        log.setLevel(logging.DEBUG)
        log.debug('Debug mode: on')
예제 #50
0
파일: settings.py 프로젝트: do3cc/sentry
def configure(ctx, py, yaml, skip_backend_validation=False):
    """
    Given the two different config files, set up the environment.

    NOTE: Will only execute once, so it's safe to call multiple times.
    """
    global __installed
    if __installed:
        return

    # Make sure that our warnings are always displayed
    import warnings
    warnings.filterwarnings('default', '', Warning, r'^sentry')

    from .importer import install

    if yaml is None:
        # `yaml` will be None when SENTRY_CONF is pointed
        # directly to a file, in which case, this file must exist
        if not os.path.exists(py):
            if ctx:
                raise click.ClickException("Configuration file does not exist. Use 'sentry init' to initialize the file.")
            raise ValueError("Configuration file does not exist at '%s'" % click.format_filename(py))
    elif not os.path.exists(yaml) and not os.path.exists(py):
        if ctx:
            raise click.ClickException("Configuration file does not exist. Use 'sentry init' to initialize the file.")
        raise ValueError("Configuration file does not exist at '%s'" % click.format_filename(yaml))

    os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry_config'

    install('sentry_config', py, DEFAULT_SETTINGS_MODULE)

    # HACK: we need to force access of django.conf.settings to
    # ensure we don't hit any import-driven recursive behavior
    from django.conf import settings
    hasattr(settings, 'INSTALLED_APPS')

    from .initializer import initialize_app, on_configure
    initialize_app({
        'config_path': py,
        'settings': settings,
        'options': yaml,
    }, skip_backend_validation=skip_backend_validation)
    on_configure({'settings': settings})

    __installed = True
예제 #51
0
def cli(odoo_config, odoo_version, version):
    if version:
        click.echo(__version__)
        exit()
    if not odoo_config:
        click.echo("Provide a valid Odoo's config file")
        exit()
    update(click.format_filename(odoo_config),
           odoo_version)
예제 #52
0
def tree(ramlfile, color, output, verbose, validate, config):
    """Pretty-print a tree of the RAML-defined API."""
    try:
        load_obj = load_file(ramlfile)
        ttree(load_obj, color, output, verbose, validate, config)
    except InvalidRAMLError as e:
        msg = '"{0}" is not a valid RAML file: {1}'.format(click.format_filename(ramlfile), e)
        click.secho(msg, fg="red", err=True)
        raise SystemExit(1)
예제 #53
0
파일: cli.py 프로젝트: spudfkc/draftpy
def run(dkfile, strategy, lineup_gen, env):
    if env:
        with open(env, "r") as f:
            env_data = f.read()
        for line in env_data.split("\n"):
            key, value = line.split("=")
            os.environ[key] = value

    filepath = click.format_filename(dkfile)
    reader = DKReader(filepath)
    strat = strat2class.get(strategy.lower())
    if not strat:
        click.echo("Strategy {} not found!".format(strategy))
        return 1
    all_picks = []
    strategy = strat()
    for i in reader.games():
        all_picks += strategy.go(i)

    # Set names on all picks, since we only have IDs at this point
    for pick in all_picks:
        # Ugly as fuuuck ...there is a better way, i just haven't taken the time to find it
        pick.name = nbaplayer.PlayerSummary(pick.player_id).json['resultSets'][1]['rowSet'][0][1]

    # This is kind of shit. I just want a O(1) lookup time for player salary/pos
    name2attr = {}
    for p in reader.players():
        name2attr[p[0]] = (p[1], p[2])

    # Set salaries and positions on all picks
    for pick in all_picks:
        pick.position = name2attr[pick.name][0]
        pick.salary = name2attr[pick.name][1]

    for pick in all_picks:
        print pick

    if not lineup_gen:
        return

    generator = GALineup(all_picks)
    pop, stats, hof = generator.main()
    hof = [i for i in hof if len(i) == 8]
    for lineup in hof:
        points = sum([all_picks[i].est_points for i in lineup])
        salary = sum([all_picks[i].salary for i in lineup])
        if points < draftpy.galineup.MIN_POINTS:
            continue
        if salary > draftpy.galineup.MAX_SALARY:
            continue
        print "================================="
        for i in lineup:
            print all_picks[i]
        print "    >points: {}".format(points)
        print "    >salary: {}".format(salary)
    print ">>END HOF<<"
예제 #54
0
def report(logger: log.CliLogger, filepath: str) -> None:
    """Show lyrics presence in given file"""
    audio = misc.get_audio(filepath)
    data = misc.get_tags(audio)
    if data and 'lyrics' not in data:
        logger.log_not_found(filepath)
        click.secho("no lyrics:    ", nl=False, fg="red")
    else:
        click.secho("lyrics found: ", nl=False, fg="green")
    click.echo("%s" % click.format_filename(filepath))
예제 #55
0
def main(source, output, label_index, no_label, skip_headers):
    click.echo(click.format_filename(source))
    click.echo(click.format_filename(output))
    input_file = source
    output_file = output

    with open(input_file, 'r') as fin:
        with open(output_file, 'w') as fout:
            reader = csv.reader(fin)
            if skip_headers:
                next(reader)
            for line in reader:
                if no_label:
                    label = '1'
                else:
                    label = line.pop(label_index)

                new_line = construct_line(label, line)
                fout.write(new_line)
예제 #56
0
def cli(input, output, fix_persian, shift):
    '''
        SubFixer does a bit of string manipulation and datetime math
        to shift your subtitles to match your film and decode string to
        unicode and fix problems with Persian.

    '''
    if input[-4:] not in ('.srt', '.SRT'):
        click.echo('%s is not a srt file.' % click.format_filename(input))
        exit()
    if not fix_persian and not shift:
        raise click.BadParameter('''Enter an option --fix_persian or --shift \
                                \nUse subfixer --help for more info''')

    if fix_persian:
        with open(input, 'r') as f:
            lines = f.read()

        sub_title_fixer = SubtitleFixer()

        lines = sub_title_fixer.decode_string(lines)

        write_file_name = input[:-4] + '_fixed.srt'

        if output:
            write_file_name = output
        with open(write_file_name, 'w') as f:
            f.write(lines.encode('utf-8'))

        click.echo('%s Persian fixed' % input)
        click.echo('New subtitle is on : %s' % write_file_name)

    if shift:
        new_lines = []
        with open(input, 'r') as f:
            for line in f.readlines():
                line = line[:-2]  # removes '\r\n' from line
                if is_time_string(line):
                    times = line.split(' --> ')  # split up the two times
                    new_times = []
                    for t in times:
                        new_times.append(process_time_string(t, shift))
                    line = new_times[0] + ' --> ' + new_times[1]
                new_lines.append(line + '\r\n')  # adds back in '\r\n'

        write_file_name = input[:-4] + '_fixed.srt'

        if output:
            write_file_name = output
        with open(write_file_name, 'w') as f:
            for line in new_lines:
                f.write(line)

        click.echo('%s Time shift done for ' % input)
        click.echo('New subtitle is on : %s' % write_file_name)
예제 #57
0
파일: clean.py 프로젝트: mailund/gwf
def clean(obj, targets, all):
    """Clean output files of targets.

    By default, only targets that are not endpoints will have their output files
    deleted. If you want to clean up output files from endpoints too, use the
    ``--all`` flag.
    """
    graph = graph_from_config(obj)

    filters = []
    if targets:
        filters.append(NameFilter(patterns=targets))
    if not all:
        filters.append(EndpointFilter(endpoints=graph.endpoints(), mode="exclude"))

    matches = list(filter_generic(targets=graph, filters=filters))

    total_size = sum(
        os.path.getsize(path)
        if os.path.exists(path) and path not in target.protected else 0
        for target in matches
        for path in target.outputs
    )

    logger.info("Will delete %s of files!", _format_size(total_size))

    for target in matches:
        logger.info("Deleting output files of %s", target.name)
        for path in target.outputs:
            if path in target.protected:
                logging.debug(
                    'Skpping deleting file "%s" from target "%s" because it is protected',
                    click.format_filename(path),
                    target.name,
                )

            logging.info(
                'Deleting file "%s" from target "%s"',
                click.format_filename(path),
                target.name,
            )
            _delete_file(path)
예제 #58
0
def packaging(output_path, model):
    '''Returns a path with all directories added to form
    user defined package structure'''

    path = output_path
    if model.package is not None:
        packages = model.package.name.split('.')
        for package in packages[:-1]:
            path = os.path.join(path, package)

    return click.format_filename(path)
예제 #59
0
    def run(self, directory):
        if directory and not os.path.exists(directory):
            os.makedirs(directory)

        config_filename = os.path.join(directory, self.config_filename)

        if os.path.isfile(config_filename):
            click.confirm("File already exists at '%s', overwrite?" % click.format_filename(config_filename),
                          abort=True)

        with click.open_file(config_filename, 'wb') as fp:
            fp.write(self.config_contents)
예제 #60
0
def list_cmd(filename, page=None):
    from .parsing import Book

    click.echo('Reading: ' + click.format_filename(filename))

    book = Book(filename)

    for page in book.pages:
        click.echo('\nParsing page n: {} name: {}  elements: {}\n'
                   .format(page.n, page.name, len(page.entries)))
        for data in page.entries:
            click.echo('{}\t{}\t{}'.format(data['pos'], data['book_id'], data))