예제 #1
0
def terminal(port):
    click.echo(click.style('NOTE: This is an early prototype of the terminal.'
                           ' Nothing is guaranteed to work.', bold=True))
    if port == 'default':
        if len(prosflasher.ports.list_com_ports()) == 1:
            port = prosflasher.ports.list_com_ports()[0].device
        elif len(prosflasher.ports.list_com_ports()) > 1:
            click.echo('Multiple ports were found:')
            click.echo(prosflasher.ports.create_port_list())
            port = click.prompt('Select a port to open',
                                type=click.Choice([p.device for p in prosflasher.ports.list_com_ports()]))
        else:
            click.echo('No ports were found.')
            click.get_current_context().abort()
            sys.exit()
    ser = prosflasher.ports.create_serial(port)
    term = proscli.serial_terminal.Terminal(ser)
    signal.signal(signal.SIGINT, term.stop)
    term.start()
    while term.alive:
        time.sleep(0.005)
    term.join()
    ser.close()
    print('Exited successfully')
    sys.exit(0)
예제 #2
0
def resolve_id_or_name(client, bookmark_id_or_name):
    # leading/trailing whitespace doesn't make sense for UUIDs and the Transfer
    # service outright forbids it for bookmark names, so we can strip it off
    bookmark_id_or_name = bookmark_id_or_name.strip()

    res = None
    try:
        UUID(bookmark_id_or_name)  # raises ValueError if argument not a UUID
    except ValueError:
        pass
    else:
        try:
            res = client.get_bookmark(bookmark_id_or_name.lower())
        except TransferAPIError as exception:
            if exception.code != "BookmarkNotFound":
                raise

    if not res:  # non-UUID input or UUID not found; fallback to match by name
        try:
            # n.b. case matters to the Transfer service for bookmark names, so
            # two bookmarks can exist whose names vary only by their case
            res = next(
                bookmark_row
                for bookmark_row in client.bookmark_list()
                if bookmark_row["name"] == bookmark_id_or_name
            )

        except StopIteration:
            safeprint(
                u'No bookmark found for "{}"'.format(bookmark_id_or_name),
                write_to_stderr=True,
            )
            click.get_current_context().exit(1)

    return res
예제 #3
0
def upgrade(cfg, kernel, location, depot):
    first_run(cfg)
    templates = local.get_local_templates(pros_cfg=cfg.pros_cfg,
                                          template_types=[TemplateTypes.kernel])  # type: List[Identifier]
    if not templates or len(templates) == 0:
        click.echo('No templates have been downloaded! Use `pros conduct download` to download the latest kernel.')
        click.get_current_context().abort()
        sys.exit()
    kernel_version = kernel
    if kernel == 'latest':
        kernel_version = sorted(templates, key=lambda t: semver.Version(t.version))[-1].version
        proscli.utils.debug('Resolved version {} to {}'.format(kernel, kernel_version))
    templates = [t for t in templates if t.version == kernel_version]
    depot_registrar = depot
    if depot == 'auto':
        templates = [t for t in templates if t.version == kernel_version]
        if not templates or len(templates) == 0:
            click.echo('No templates exist for {}'.format(kernel_version))
            click.get_current_context().abort()
            sys.exit()
        if 'pros-mainline' in [t.depot for t in templates]:
            depot_registrar = 'pros-mainline'
        else:
            depot_registrar = [t.depot for t in templates][0]
        proscli.utils.debug('Resolved depot {} to {}'.format(depot, depot_registrar))
    templates = [t for t in templates if t.depot == depot_registrar]
    if not templates or len(templates) == 0:
        click.echo('No templates were found for kernel version {} on {}'.format(kernel_version, depot_registrar))
    template = templates[0]
    if not os.path.isabs(location):
        location = os.path.abspath(location)
    click.echo('Upgrading existing project from {} on {} at {}'.format(template.version, template.depot, location))
    local.upgrade_project(identifier=template, dest=location, pros_cli=cfg.pros_cfg)
예제 #4
0
파일: commands.py 프로젝트: jakereps/q2cli
    def handle_in_params(self, kwargs):
        import q2cli.handlers

        arguments = {}
        missing = []
        cmd_fallback = self.cmd_config_handler.get_value(kwargs)

        verbose = self.verbose_handler.get_value(kwargs, fallback=cmd_fallback)
        quiet = self.quiet_handler.get_value(kwargs, fallback=cmd_fallback)

        if verbose and quiet:
            click.secho('Unsure of how to be quiet and verbose at the '
                        'same time.', fg='red', bold=True, err=True)
            click.get_current_context().exit(1)

        for item in self.action['signature']:
            if item['type'] == 'input' or item['type'] == 'parameter':
                name = item['name']
                handler = self.generated_handlers[name]
                try:
                    if isinstance(handler,
                                  (q2cli.handlers.MetadataHandler,
                                   q2cli.handlers.MetadataColumnHandler)):
                        arguments[name] = handler.get_value(
                            verbose, kwargs, fallback=cmd_fallback)
                    else:
                        arguments[name] = handler.get_value(
                            kwargs, fallback=cmd_fallback)
                except q2cli.handlers.ValueNotFoundException:
                    missing += handler.missing

        return arguments, missing, verbose, quiet
예제 #5
0
def autoactivate(client, endpoint_id, if_expires_in=None):
    """
    Attempts to auto-activate the given endpoint with the given client
    If auto-activation fails, parses the returned activation requirements
    to determine which methods of activation are supported, then tells
    the user to use 'globus endpoint activate' with the correct options(s)
    """
    kwargs = {}
    if if_expires_in is not None:
        kwargs["if_expires_in"] = if_expires_in

    res = client.endpoint_autoactivate(endpoint_id, **kwargs)
    if res["code"] == "AutoActivationFailed":

        message = (
            "The endpoint could not be auto-activated and must be "
            "activated before it can be used.\n\n"
            + activation_requirements_help_text(res, endpoint_id)
        )

        safeprint(message, write_to_stderr=True)
        click.get_current_context().exit(1)

    else:
        return res
예제 #6
0
def create_project(identifier, dest, pros_cli=None):
    if pros_cli is None or not pros_cli:
        pros_cli = CliConfig()
    filename = os.path.join(pros_cli.directory, identifier.depot,
                            '{}-{}'.format(identifier.name, identifier.version),
                            'template.pros')
    if not os.path.isfile(filename):
        click.echo('Error: template.pros not found for {}-{}'.format(identifier.name, identifier.version))
        click.get_current_context().abort()
        sys.exit()
    if os.path.isfile(dest) or (os.path.isdir(dest) and len(os.listdir(dest)) > 0):
        click.echo('Error! Destination is a file or a nonempty directory! Delete the file(s) and try again.')
        click.get_current_context().abort()
        sys.exit()
    config = TemplateConfig(file=filename)
    distutils.dir_util.copy_tree(config.directory, dest)
    for root, dirs, files in os.walk(dest):
        for d in dirs:
            if any([fnmatch.fnmatch(d, p) for p in config.template_ignore]):
                verbose('Removing {}'.format(d))
                os.rmdir(os.path.join(root, d))
        for f in files:
            if any([fnmatch.fnmatch(f, p) for p in config.template_ignore]):
                verbose('Removing {}'.format(f))
                os.remove(os.path.join(root, f))
    proj_config = prosconfig.ProjectConfig(dest, create=True)
    proj_config.kernel = identifier.version
    proj_config.save()
예제 #7
0
    def check_completed():
        completed = client.task_wait(
            task_id, timeout=polling_interval, polling_interval=polling_interval
        )
        if completed:
            if heartbeat:
                safeprint("", write_to_stderr=True)
            # meowing tasks wake up!
            if meow:
                safeprint(
                    r"""
                  _..
  /}_{\           /.-'
 ( a a )-.___...-'/
 ==._.==         ;
      \ i _..._ /,
      {_;/   {_//""",
                    write_to_stderr=True,
                )

            # TODO: possibly update TransferClient.task_wait so that we don't
            # need to do an extra fetch to get the task status after completion
            res = client.get_task(task_id)
            formatted_print(res, text_format=FORMAT_SILENT)

            status = res["status"]
            if status == "SUCCEEDED":
                click.get_current_context().exit(0)
            else:
                click.get_current_context().exit(1)

        return completed
예제 #8
0
def upgrade_project(identifier, dest, pros_cli=None):
    if pros_cli is None or not pros_cli:
        pros_cli = CliConfig()
    filename = os.path.join(pros_cli.directory, identifier.depot,
                            '{}-{}'.format(identifier.name, identifier.version),
                            'template.pros')

    if not os.path.isfile(filename):
        click.echo('Error: template.pros not found for {}-{}'.format(identifier.name, identifier.version))
        click.get_current_context().abort()
        sys.exit()
    proj_config = prosconfig.ProjectConfig(dest, raise_on_error=True)
    config = TemplateConfig(file=filename)

    for root, dirs, files in os.walk(config.directory):
        for d in dirs:
            if any([fnmatch.fnmatch(d, p) for p in config.upgrade_paths]):
                verbose('Upgrading {}'.format(d))
                relpath = os.path.relpath(os.path.join(root, d), config.directory)
                shutil.copytree(os.path.join(config.directory, relpath), os.path.join(proj_config.directory, relpath))
        for f in files:
            if any([fnmatch.fnmatch(f, p) for p in config.upgrade_paths]):
                verbose('Upgrading {}'.format(f))
                relpath = os.path.relpath(os.path.join(root, f), config.directory)
                shutil.copyfile(os.path.join(config.directory, relpath), os.path.join(proj_config.directory, relpath))
예제 #9
0
파일: util.py 프로젝트: gregcaporaso/q2cli
def exit_with_error(e, header='An error has been encountered:', file=None,
                    suppress_footer=False):
    import sys
    import traceback
    import textwrap
    import click

    if file is None:
        file = sys.stderr
        footer = 'See above for debug info.'
    else:
        footer = 'Debug info has been saved to %s' % file.name

    error = textwrap.indent(str(e), '  ')

    segments = [header, error]
    if not suppress_footer:
        segments.append(footer)

    traceback.print_exception(type(e), e, e.__traceback__, file=file)
    file.write('\n')

    click.secho('\n\n'.join(segments), fg='red', bold=True, err=True)

    click.get_current_context().exit(1)
예제 #10
0
파일: util.py 프로젝트: qiime2/q2cli
def exit_with_error(e, header='An error has been encountered:',
                    traceback='stderr', status=1):
    import sys
    import traceback as tb
    import textwrap
    import click

    footer = []  # footer only exists if traceback is set
    tb_file = None
    if traceback == 'stderr':
        tb_file = sys.stderr
        footer = ['See above for debug info.']
    elif traceback is not None:
        tb_file = traceback
        footer = ['Debug info has been saved to %s' % tb_file.name]

    error = textwrap.indent(str(e), '  ')
    segments = [header, error] + footer

    if traceback is not None:
        tb.print_exception(type(e), e, e.__traceback__, file=tb_file)
        tb_file.write('\n')

    click.secho('\n\n'.join(segments), fg='red', bold=True, err=True)

    if not footer:
        click.echo(err=True)  # extra newline to look normal

    click.get_current_context().exit(status)
예제 #11
0
파일: whoami.py 프로젝트: globus/globus-cli
def whoami_command(linked_identities):
    """
    Executor for `globus whoami`
    """
    client = get_auth_client()

    # get userinfo from auth.
    # if we get back an error the user likely needs to log in again
    try:
        res = client.oauth2_userinfo()
    except AuthAPIError:
        safeprint(
            "Unable to get user information. Please try " "logging in again.",
            write_to_stderr=True,
        )
        click.get_current_context().exit(1)

    print_command_hint(
        "For information on which identities are in session see\n"
        "  globus session show\n"
    )

    # --linked-identities either displays all usernames or a table if verbose
    if linked_identities:
        try:
            formatted_print(
                res["identity_set"],
                fields=[
                    ("Username", "username"),
                    ("Name", "name"),
                    ("ID", "sub"),
                    ("Email", "email"),
                ],
                simple_text=(
                    None
                    if is_verbose()
                    else "\n".join([x["username"] for x in res["identity_set"]])
                ),
            )
        except KeyError:
            safeprint(
                "Your current login does not have the consents required "
                "to view your full identity set. Please log in again "
                "to agree to the required consents.",
                write_to_stderr=True,
            )

    # Default output is the top level data
    else:
        formatted_print(
            res,
            text_format=FORMAT_TEXT_RECORD,
            fields=[
                ("Username", "preferred_username"),
                ("Name", "name"),
                ("ID", "sub"),
                ("Email", "email"),
            ],
            simple_text=(None if is_verbose() else res["preferred_username"]),
        )
예제 #12
0
파일: tools.py 프로젝트: qiime2/q2cli
def export_data(input_path, output_path, output_format):
    import qiime2.util
    import qiime2.sdk
    import distutils
    result = qiime2.sdk.Result.load(input_path)
    if output_format is None:
        if isinstance(result, qiime2.sdk.Artifact):
            output_format = result.format.__name__
        else:
            output_format = 'Visualization'
        result.export_data(output_path)
    else:
        if isinstance(result, qiime2.sdk.Visualization):
            error = '--output-format cannot be used with visualizations'
            click.secho(error, fg='red', bold=True, err=True)
            click.get_current_context().exit(1)
        else:
            source = result.view(qiime2.sdk.parse_format(output_format))
            if os.path.isfile(str(source)):
                if os.path.isfile(output_path):
                    os.remove(output_path)
                else:
                    # create directory (recursively) if it doesn't exist yet
                    os.makedirs(os.path.dirname(output_path), exist_ok=True)
                qiime2.util.duplicate(str(source), output_path)
            else:
                distutils.dir_util.copy_tree(str(source), output_path)

    output_type = 'file' if os.path.isfile(output_path) else 'directory'
    success = 'Exported %s as %s to %s %s' % (input_path, output_format,
                                              output_type, output_path)
    click.secho(success, fg='green')
예제 #13
0
def config_depot(cfg, name):
    if name not in [d.name for d in utils.get_depot_configs(cfg.pros_cfg)]:
        click.echo('{} isn\'t a registered depot! Have you added it using `pros conduct add-depot`?')
        click.get_current_context().abort()
        sys.exit()
    depot = [d for d in utils.get_depot_configs(cfg.pros_cfg) if d.name == name][0]
    depot.registrar_options = utils.get_all_provider_types(cfg.pros_cfg)[depot.registrar](None) \
        .configure_registrar_options(default=depot.registrar_options)
    depot.save()
예제 #14
0
def cli_main(pid, include_greenlet, debugger, verbose):
    '''Print stack of python process.

    $ pystack <pid>
    '''
    try:
        print_stack(pid, include_greenlet, debugger, verbose)
    except DebuggerNotFound as e:
        click.echo('DebuggerNotFound: %s' % e.args[0], err=True)
        click.get_current_context().exit(1)
예제 #15
0
 def callback(ctx, param, value):
     if not value or ctx.resilient_parsing:
         return
     if value == "BASH":
         safeprint(bash_shell_completer)
     elif value == "ZSH":
         safeprint(zsh_shell_completer)
     else:
         raise ValueError("Unsupported shell completion")
     click.get_current_context().exit(0)
예제 #16
0
    def fail(deadline=None):
        exp_string = ""
        if deadline is not None:
            exp_string = " or will expire within {} seconds".format(deadline)

        message = "The endpoint is not activated{}.\n\n".format(
            exp_string
        ) + activation_requirements_help_text(res, endpoint_id)
        formatted_print(res, simple_text=message)
        click.get_current_context().exit(1)
예제 #17
0
def make_output_type(index, config):
    source_type = index.products.get_by_name(config['source_type'])
    if not source_type:
        click.echo("Source DatasetType %s does not exist", config['source_type'])
        click.get_current_context().exit(1)

    output_type = morph_dataset_type(source_type, config)
    _LOG.info('Created DatasetType %s', output_type.name)
    output_type = index.products.add(output_type)

    return source_type, output_type
예제 #18
0
    def __call__(self, **kwargs):
        """Called when user hits return, **kwargs are Dict[click_names, Obj]"""
        import importlib
        import itertools
        import os
        import qiime2.util

        arguments, missing_in, verbose = self.handle_in_params(kwargs)
        outputs, missing_out = self.handle_out_params(kwargs)

        if missing_in or missing_out:
            # A new context is generated for a callback, which will result in
            # the ctx.command_path duplicating the action, so just use the
            # parent so we can print the help *within* a callback.
            ctx = click.get_current_context().parent
            click.echo(ctx.get_help()+"\n", err=True)
            for option in itertools.chain(missing_in, missing_out):
                click.secho("Error: Missing option: --%s" % option, err=True,
                            fg='red', bold=True)
            if missing_out:
                click.echo(_OUTPUT_OPTION_ERR_MSG, err=True)
            ctx.exit(1)

        module_path = 'qiime2.plugins.%s.actions' % self.plugin['id']
        actions_module = importlib.import_module(module_path)
        action = getattr(actions_module, self.action['id'])

        stdout = os.devnull
        stderr = os.devnull
        if verbose:
            # `qiime2.util.redirected_stdio` defaults to stdout/stderr when
            # supplied `None`.
            stdout = None
            stderr = None

        try:
            with qiime2.util.redirected_stdio(stdout=stdout,
                                              stderr=stderr):
                results = action(**arguments)
        except Exception as e:
            if verbose:
                import traceback
                import sys
                traceback.print_exc(file=sys.stderr)
                click.echo(err=True)
                self._echo_plugin_error(e, 'See above for debug info.')
            else:
                self._echo_plugin_error(
                    e, 'Re-run with --verbose to see debug info.')
            click.get_current_context().exit(1)

        for result, output in zip(results, outputs):
            click.secho('Saved %s to: %s' % (result.type,
                                             result.save(output)), fg='green')
예제 #19
0
def test_global_context_object(runner):
    @click.command()
    @click.pass_context
    def cli(ctx):
        assert click.get_current_context() is ctx
        ctx.obj = 'FOOBAR'
        assert click.get_current_context().obj == 'FOOBAR'

    assert click.get_current_context(silent=True) is None
    runner.invoke(cli, [], catch_exceptions=False)
    assert click.get_current_context(silent=True) is None
예제 #20
0
def filename_command():
    """
    Executor for `globus config filename`
    """
    try:
        config = get_config_obj(file_error=True)
    except IOError as e:
        safeprint(e, write_to_stderr=True)
        click.get_current_context().exit(1)
    else:
        safeprint(config.filename)
예제 #21
0
def list_missing_regions(cli_data):
    """
    List missing source sounds required to fully generate a target.
    """
    cli_data.populate_vsq()

    missing = cli_data.target.missing_vsq_regions()
    for region in missing:
        click.echo('{0}'.format(region))

    if len(missing) > 0:
        click.get_current_context().exit(1)
예제 #22
0
 def save(self, file=None):
     if file is None:
         file = self.save_file
     if isinstance(click.get_current_context().obj, proscli.utils.State) and click.get_current_context().obj.debug:
         proscli.utils.debug('Pretty Formatting {} File'.format(self.__class__.__name__))
         jsonpickle.set_encoder_options('json', sort_keys=True, indent=4)
     else:
         jsonpickle.set_encoder_options('json', sort_keys=True)
     if os.path.dirname(file):
         os.makedirs(os.path.dirname(file), exist_ok=True)
     with open(file, 'w') as f:
         f.write(jsonpickle.encode(self))
예제 #23
0
def first_run(ctx: proscli.utils.State, force=False, defaults=False):
    if len(utils.get_depot_configs(ctx.pros_cfg)) == 0:
        click.echo('You don\'t currently have any depots configured.')
    if len(utils.get_depot_configs(ctx.pros_cfg)) == 0 or force:
        if defaults or click.confirm('Add the official PROS kernel depot, pros-mainline?', default=True):
            click.get_current_context().invoke(add_depot, name='pros-mainline',
                                               registrar='github-releases',
                                               location='purduesigbots/pros',
                                               configure=False)
            click.echo('Added pros-mainline to available depots. '
                       'Add more depots in the future by running `pros conduct add-depot`\n')
            if defaults or click.confirm('Download the latest kernel?', default=True):
                click.get_current_context().invoke(download, name='kernel', depot='pros-mainline')
예제 #24
0
def upload(port, binary, no_poll=False, ctx=proscli.utils.State()):
    if not os.path.isfile(binary):
        click.echo('Failed to download... file does not exist')
        return False
    port = prosflasher.ports.create_serial(port)
    if not port:
        click.echo('Failed to download: port not found')
        return
    try:
        stop_user_code(port, ctx)
        if not no_poll:
            sys_info = ask_sys_info(port, ctx)
            if sys_info is None:
                time.sleep(1.5)
                sys_info = ask_sys_info(port)
                if sys_info is None:
                    click.echo('Failed to get system info... Try again', err=True)
                    click.get_current_context().abort()
                    sys.exit(1)
            click.echo(repr(sys_info))
        else:
            sys_info = SystemInfo()
            sys_info.connection_type = ConnectionType.serial_usb  # assume this
        if sys_info.connection_type == ConnectionType.unknown:
            click.confirm('Unable to determine system type. It may be necessary to press the '
                          'programming button on the programming kit. Continue?', abort=True, default=True)
        if sys_info.connection_type == ConnectionType.serial_vexnet2:
            # need to send to download channel
            if not send_to_download_channel(port):
                return False
        if not expose_bootloader(port):
            return False
        if sys_info.connection_type == ConnectionType.serial_usb:
            time.sleep(0.25)
        if not prosflasher.bootloader.prepare_bootloader(port):
            return False
        if not prosflasher.bootloader.erase_flash(port):
            return False
        if not prosflasher.bootloader.upload_binary(port, binary):
            return False
        if not prosflasher.bootloader.send_go_command(port, 0x08000000):
            return False

        reset_cortex(port)

    except serial.serialutil.SerialException as e:
        click.echo('Failed to download code! ' + str(e))
    finally:
        port.close()
    click.echo("Download complete!")
    pass
예제 #25
0
def validate_filenames(cli_data):
    """
    Validates the filenames of target sounds.
    """
    have_violations = False

    for t in cli_data.target.targets:
        violations = t.validate_filenames()
        if len(violations) > 0:
            have_violations = True
            click.echo(t)
            for v in violations:
                click.echo('{0}{1}'.format(INDENT, v))

    if have_violations:
        click.get_current_context().exit(1)
예제 #26
0
파일: handlers.py 프로젝트: jakereps/q2cli
    def _parse_boolean(self, string):
        """Parse string representing a boolean into Python bool type.

        Supported values match `configparser.ConfigParser.getboolean`.

        """
        trues = ['1', 'yes', 'true', 'on']
        falses = ['0', 'no', 'false', 'off']

        string_lower = string.lower()
        if string_lower in trues:
            return True
        elif string_lower in falses:
            return False
        else:
            import itertools
            import click

            msg = (
                "Error: unrecognized value for --%s flag: %s\n"
                "Supported values (case-insensitive): %s" %
                (self.cli_name, string,
                 ', '.join(itertools.chain(trues, falses)))
            )
            click.secho(msg, err=True, fg='red', bold=True)
            ctx = click.get_current_context()
            ctx.exit(1)
예제 #27
0
def get_code(shell=None, prog_name=None, env_name=None, extra_env=None):
    """Returns the completion code to be evaluated by the shell

    Parameters
    ----------
    shell : Shell
        The shell type (Default value = None)
    prog_name : str
        The program name on the command line (Default value = None)
    env_name : str
        The environment variable used to control the completion (Default value = None)
    extra_env : dict
        Some extra environment variables to be added to the generated code (Default value = None)

    Returns
    -------
    str
        The code to be evaluated by the shell
    """
    from jinja2 import Environment, FileSystemLoader
    if shell in [None, 'auto']:
        shell = get_auto_shell()
    if not isinstance(shell, Shell):
        shell = Shell[shell]
    prog_name = prog_name or click.get_current_context().find_root().info_name
    env_name = env_name or '_%s_COMPLETE' % prog_name.upper().replace('-', '_')
    extra_env = extra_env if extra_env else {}
    env = Environment(loader=FileSystemLoader(os.path.dirname(__file__)))
    template = env.get_template('%s.j2' % shell.name)
    return template.render(prog_name=prog_name, complete_var=env_name, extra_env=extra_env)
예제 #28
0
파일: commands.py 프로젝트: jakereps/q2cli
    def __call__(self, **kwargs):
        """Called when user hits return, **kwargs are Dict[click_names, Obj]"""
        import itertools
        import os
        import qiime2.util

        arguments, missing_in, verbose, quiet = self.handle_in_params(kwargs)
        outputs, missing_out = self.handle_out_params(kwargs)

        if missing_in or missing_out:
            # A new context is generated for a callback, which will result in
            # the ctx.command_path duplicating the action, so just use the
            # parent so we can print the help *within* a callback.
            ctx = click.get_current_context().parent
            click.echo(ctx.get_help()+"\n", err=True)
            for option in itertools.chain(missing_in, missing_out):
                click.secho("Error: Missing option: --%s" % option, err=True,
                            fg='red', bold=True)
            if missing_out:
                click.echo(_OUTPUT_OPTION_ERR_MSG, err=True)
            ctx.exit(1)

        action = self._get_action()
        # `qiime2.util.redirected_stdio` defaults to stdout/stderr when
        # supplied `None`.
        log = None

        if not verbose:
            import tempfile
            log = tempfile.NamedTemporaryFile(prefix='qiime2-q2cli-err-',
                                              suffix='.log',
                                              delete=False, mode='w')

        cleanup_logfile = False
        try:
            with qiime2.util.redirected_stdio(stdout=log, stderr=log):
                results = action(**arguments)
        except Exception as e:
            header = ('Plugin error from %s:'
                      % q2cli.util.to_cli_name(self.plugin['name']))
            if verbose:
                # log is not a file
                log = 'stderr'
            q2cli.util.exit_with_error(e, header=header, traceback=log)
        else:
            cleanup_logfile = True
        finally:
            # OS X will reap temporary files that haven't been touched in
            # 36 hours, double check that the log is still on the filesystem
            # before trying to delete. Otherwise this will fail and the
            # output won't be written.
            if log and cleanup_logfile and os.path.exists(log.name):
                log.close()
                os.remove(log.name)

        for result, output in zip(results, outputs):
            path = result.save(output)
            if not quiet:
                click.secho('Saved %s to: %s' % (result.type, path),
                            fg='green')
예제 #29
0
파일: cli.py 프로젝트: phretor/python-sdk
def get_public_ruleset(save, outfile, ruleset_id):
    """Get a public ruleset by its RULESET_ID"""
    ctx = click.get_current_context()

    api = ctx.meta.get('api')
    wdir = ctx.meta.get('wdir')
    quiet = ctx.meta.get('quiet')

    logger.info('Attempting to fetch ruleset %s', ruleset_id)
    result = api.get_public_ruleset(ruleset_id=ruleset_id)

    if not quiet:
        click.echo(pygmentize_json(result))

    if save:
        if not outfile:
            filepath = os.path.join(wdir, 'ruleset-{}.json'.format(ruleset_id))
            outfile = io.open(filepath, 'wb')
        else:
            filepath = outfile.name
        logger.info('Saving ruleset metadata to {}'.format(filepath))

        json.dump(result, outfile)

        logger.info('Ruleset metadata saved correctly')
예제 #30
0
파일: handlers.py 프로젝트: jakereps/q2cli
    def get_value(self, arguments, fallback=None):
        import os
        import os.path
        import click

        try:
            path = self._locate_value(arguments, fallback=fallback)

            # TODO: do we want a --force like flag?
            if os.path.exists(path):
                click.secho("Error: --%s directory already exists, won't "
                            "overwrite." % self.cli_name, err=True, fg='red',
                            bold=True)
                ctx = click.get_current_context()
                ctx.exit(1)

            os.makedirs(path)

            def fallback_(name, cli_name):
                return os.path.join(path, name)
            return fallback_

        except ValueNotFoundException:
            # Always fail to find a value as this handler doesn't exist.
            def fail(*_):
                raise ValueNotFoundException()

            return fail
예제 #31
0
파일: cli.py 프로젝트: pnlng/crisscross
def process(ctx, yaml, no_yaml, templates, out, open_ren, open_text, args,
            render, no_render, variable, force, quiet, include_tag):
    """
    Preprocess text files, and render with pandoc or rmarkdown.
    """
    def process_kv(variable):
        kv_dict = {}
        p = re.compile('^(?P<key>.+?):(?P<value>.+)$')
        for kv in variable:
            m = p.match(kv)
            if m:
                key = strip_quotes(m.group('key'))
                value = strip_quotes(m.group('value'))
                kv_dict[key] = value
            else:
                raise ValueError('Invalid key-value pair: {}.'.format(kv))
        return kv_dict

    def add_default_kv(d, identifier='compiled', key='id'):
        return merge_dicts({key: identifier}, d)

    def process_yaml(f):
        identifier = FilePath.get_filename(f)
        with open(f) as yamlfile:
            yaml_vars = poyo.parse_string(yamlfile.read())
            id_vars = add_default_kv(yaml_vars, identifier)
        return id_vars

    def generate(var_dict):
        ren_path = chevron.render(out_path, var_dict)
        ren_path_obj = FilePath(ren_path)
        text_path_obj = FilePath(ren_path_obj.dir_ + ren_path_obj.name +
                                 in_ext)
        try:
            gen_text(var_dict=var_dict,
                     ren_path_obj=ren_path_obj,
                     text_path_obj=text_path_obj)
            if not no_render:
                gen_ren(render,
                        ren_path_obj=ren_path_obj,
                        text_path_obj=text_path_obj)
            else:
                if text_path_obj.path != ren_path_obj.path:
                    shutil.copy(text_path_obj.path, ren_path_obj.path)
                echo_success(ren_path_obj.basename + ' generated.')
        except (RuntimeError, ValueError) as e:
            raise click.ClickException(e.args)
            click.Abort()
        except (FileNotFoundError) as e:
            raise click.ClickException(e.strerror + ': ' + e.filename)
            click.Abort()
        if open_ren:
            open_file(ren_path_obj.path)
        if open_text:
            open_file(text_path_obj.path)
        return

    params = click.get_current_context().params
    params.update(args=process_args(params['args']))

    # if out is a dir
    if re.fullmatch('.*/', out):
        confirm(
            'You supplied a directory for -o --out. No file name or format is specified. PDF files with default names will be created. \n\nOutput schema: {0}\n\nIs this correct?'
            .format(out))
        out_path = out + '{{id}}.pdf'
    else:
        out_path = out

    in_ext = validate_exts(templates)
    out_ext = os.path.splitext(out_path)[-1]

    if out_ext == '':
        confirm(
            '\nThe output schema you supplied does not appear to have an extension. \n\nOutput schema: {0}\n\nIs this correct?'
            .format(out_path))
    if no_render:
        if in_ext != out_ext:
            confirm(
                '\nYou choose to not render the documents, but the input and output files specified seem to have different extensions. \n\n Input: {input}\n Output: {output}\n\nIs this correct? If not, please modify -o --out.'
                .format(input=' '.join(
                    [os.path.basename(t) for t in templates]),
                        output=os.path.basename(out_path)))

    kv_dict = process_kv(variable)

    all_yamls = []

    if not no_yaml:
        yaml_dicts = []
        for y in yaml:
            y_expanded = glob(y)
            all_yamls += y_expanded
            if not y_expanded:
                if not variable:
                    echo_warn('Nothing matching {0}.'.format(y))
            else:
                for f in y_expanded:
                    yaml_dicts.append(process_yaml(f))
        # assume that user forgot to add --no-yaml tag
        if not yaml_dicts:
            yaml_dicts = [add_default_kv({})]

        # add kv pairs
        gen_dicts = [merge_dicts(yd, kv_dict) for yd in yaml_dicts]

    else:
        if not kv_dict:
            m = re.match('\{\{(?!id).+\}\}', out_path)
            # contains anything other than {{id}}
            if m:
                raise click.BadOptionUsage(
                    option_name='-k --variable',
                    message=
                    '--no-yaml specified, but no key-value pairs provided.')
                click.Abort()
        gen_dicts = [add_default_kv(kv_dict)]

    # multiple yamls but no variable in outpath
    if len(gen_dicts) > 1 and not re.match('.*\{\{.+\}\}', out_path):
        confirm(
            'You have supplied multiple YAML files, but the output schema does not contain variables. Files generated earlier will be overriden.\n\nYAML files: {all_yamls} \nOutput schema: {out_path} \n\nIs this correct?'
            .format(all_yamls=' '.join(all_yamls), out_path=out_path))
    for kv in gen_dicts:
        generate(kv)
예제 #32
0
def recognizer(model, pad, no_segmentation, bidi_reordering, script_ignore,
               base_image, input, output, lines) -> None:

    import json
    import tempfile

    from kraken import rpred

    try:
        im = Image.open(base_image)
    except IOError as e:
        raise click.BadParameter(str(e))

    ctx = click.get_current_context()

    # input may either be output from the segmenter then it is a JSON file or
    # be an image file when running the OCR subcommand alone. might still come
    # from some other subcommand though.
    scripts = set()
    if not lines and base_image != input:
        lines = input
    if not lines:
        if no_segmentation:
            lines = tempfile.NamedTemporaryFile(mode='w', delete=False)
            logger.info(
                'Running in no_segmentation mode. Creating temporary segmentation {}.'
                .format(lines.name))
            json.dump(
                {
                    'script_detection': False,
                    'text_direction': 'horizontal-lr',
                    'boxes': [(0, 0) + im.size]
                }, lines)
            lines.close()
            lines = lines.name
        else:
            raise click.UsageError(
                'No line segmentation given. Add one with `-l` or run `segment` first.'
            )
    elif no_segmentation:
        logger.warning(
            'no_segmentation mode enabled but segmentation defined. Ignoring --no-segmentation option.'
        )

    with open_file(lines, 'r') as fp:
        try:
            fp = cast(IO[Any], fp)
            bounds = json.load(fp)
        except ValueError as e:
            raise click.UsageError('{} invalid segmentation: {}'.format(
                lines, str(e)))
        # script detection
        if bounds['script_detection']:
            for l in bounds['boxes']:
                for t in l:
                    scripts.add(t[0])
            it = rpred.mm_rpred(model,
                                im,
                                bounds,
                                pad,
                                bidi_reordering=bidi_reordering,
                                script_ignore=script_ignore)
        else:
            it = rpred.rpred(model['default'],
                             im,
                             bounds,
                             pad,
                             bidi_reordering=bidi_reordering)

    if not lines and no_segmentation:
        logger.debug('Removing temporary segmentation file.')
        os.unlink(lines.name)

    preds = []

    with log.progressbar(it, label='Processing',
                         length=len(bounds['boxes'])) as bar:
        for pred in bar:
            preds.append(pred)

    ctx = click.get_current_context()
    with open_file(output, 'w', encoding='utf-8') as fp:
        fp = cast(IO[Any], fp)
        message('Writing recognition results for {}\t'.format(base_image),
                nl=False)
        logger.info('Serializing as {} into {}'.format(ctx.meta['mode'],
                                                       output))
        if ctx.meta['mode'] != 'text':
            from kraken import serialization
            fp.write(
                serialization.serialize(preds, base_image,
                                        Image.open(base_image).size,
                                        ctx.meta['text_direction'], scripts,
                                        ctx.meta['mode']))
        else:
            fp.write('\n'.join(s.prediction for s in preds))
        message('\u2713', fg='green')
예제 #33
0
def shell():
    exit_code = 0
    with friendly_dockerized_errors(click):
        shell_command = ShellCommand()
        exit_code = shell_command.run()
    click.get_current_context().exit(exit_code)
예제 #34
0
def get_user_context_obj():
    """Return user context, containing logging and configuration data.

    :return: User context object (dict)
    """
    return click.get_current_context().obj
예제 #35
0
def get_logger():
    'get logger for current command.'
    ctx = click.get_current_context()
    qualname = _get_command_qualname(ctx)
    logger = logging.getLogger(qualname)
    return logger
예제 #36
0
def clean():
    exit_code = 0
    with friendly_dockerized_errors(click):
        clean_command = CleanCommand()
        exit_code = clean_command.run()
    click.get_current_context().exit(exit_code)
예제 #37
0
파일: tasks.py 프로젝트: nwinner/emmet
def backup(clean, check):
    """Backup directory to HPSS"""
    ctx = click.get_current_context()
    run = ctx.parent.parent.params["run"]
    ctx.parent.params[
        "nmax"] = sys.maxsize  # disable maximum launchers for backup
    logger.warning("--nmax ignored for HPSS backup!")
    directory = ctx.parent.params["directory"]
    if not check and clean:
        logger.error("Not running --clean without --check enabled.")
        return ReturnCodes.ERROR

    check_pattern()

    logger.info("Discover launch directories ...")
    block_launchers = load_block_launchers()

    counter, nremove_total = 0, 0
    os.chdir(directory)
    for block, launchers in block_launchers.items():
        logger.info(f"{block} with {len(launchers)} launcher(s)")
        try:
            isfile(f"{GARDEN}/{block}.tar")
        except HpssOSError:  # block not in HPSS
            if run:
                filelist = [os.path.join(block, l) for l in launchers]
                args = shlex.split(
                    f"htar -M 5000000 -Phcvf {GARDEN}/{block}.tar")
                try:
                    for line in run_command(args, filelist):
                        logger.info(line.strip())
                except subprocess.CalledProcessError as e:
                    logger.error(str(e))
                    return ReturnCodes.ERROR
                counter += 1
        else:
            logger.warning(f"Skip {block} - already in HPSS")

        # Check backup here to allow running it separately
        if check:
            logger.info(f"Verify {block}.tar ...")
            args = shlex.split(
                f"htar -Kv -Hrelpaths -Hverify=all -f {GARDEN}/{block}.tar")
            files_remove = []
            try:
                for line in run_command(args, []):
                    line = line.strip()
                    if line.startswith("HTAR: V "):
                        ls = line.split(", ")
                        if len(ls) == 3:
                            nfiles = len(files_remove)
                            if nfiles and not nfiles % 1000:
                                logger.info(f"{nfiles} files ...")
                            files_remove.append(ls[0].split()[-1])
                    else:
                        logger.info(line)
            except subprocess.CalledProcessError as e:
                logger.error(str(e))
                return ReturnCodes.ERROR

            if clean:
                nremove = len(files_remove)
                nremove_total += nremove
                if run:
                    with click.progressbar(files_remove,
                                           label="Removing files") as bar:
                        for fn in bar:
                            os.remove(fn)
                    logger.info(
                        f"Removed {nremove} files from disk for {block}.")
                else:
                    logger.info(
                        f"Would remove {nremove} files from disk for {block}.")

    logger.info(
        f"{counter}/{len(block_launchers)} blocks newly backed up to HPSS.")
    if clean:
        if run:
            logger.info(
                f"Verified and removed a total of {nremove_total} files.")
        else:
            logger.info(
                f"Would verify and remove a total of {nremove_total} files.")
    return ReturnCodes.SUCCESS
예제 #38
0
def exec(command):
    exit_code = 0
    with friendly_dockerized_errors(click):
        exec_command = ExecCommand(' '.join(command))
        exit_code = exec_command.run()
    click.get_current_context().exit(exit_code)
예제 #39
0
def cli(input, verbose):
    ctx = click.get_current_context()
    log.set_logger(logger, level=30-10*verbose)
    ctx.meta['verbose'] = verbose
예제 #40
0
def compose(command):
    exit_code = 0
    with friendly_dockerized_errors(click):
        compose_command = ComposeCommand(command)
        exit_code = compose_command.run()
    click.get_current_context().exit(exit_code)
예제 #41
0
def main(
    template,
    extra_context,
    no_input,
    checkout,
    verbose,
    replay,
    overwrite_if_exists,
    output_dir,
    config_file,
    default_config,
    debug_file,
    directory,
    skip_if_file_exists,
    accept_hooks,
    replay_file,
    list_installed,
):
    """Create a project from a Cookiecutter project template (TEMPLATE).

    Cookiecutter is free and open source software, developed and managed by
    volunteers. If you would like to help out or fund the project, please get
    in touch at https://github.com/cookiecutter/cookiecutter.
    """
    # Commands that should work without arguments
    if list_installed:
        list_installed_templates(default_config, config_file)
        sys.exit(0)

    # Raising usage, after all commands that should work without args.
    if not template or template.lower() == 'help':
        click.echo(click.get_current_context().get_help())
        sys.exit(0)

    configure_logger(stream_level='DEBUG' if verbose else 'INFO',
                     debug_file=debug_file)

    # If needed, prompt the user to ask whether or not they want to execute
    # the pre/post hooks.
    if accept_hooks == "ask":
        _accept_hooks = click.confirm("Do you want to execute hooks?")
    else:
        _accept_hooks = accept_hooks == "yes"

    if replay_file:
        replay = replay_file

    try:
        cookiecutter(
            template,
            checkout,
            no_input,
            extra_context=extra_context,
            replay=replay,
            overwrite_if_exists=overwrite_if_exists,
            output_dir=output_dir,
            config_file=config_file,
            default_config=default_config,
            password=os.environ.get('COOKIECUTTER_REPO_PASSWORD'),
            directory=directory,
            skip_if_file_exists=skip_if_file_exists,
            accept_hooks=_accept_hooks,
        )
    except (
            ContextDecodingException,
            OutputDirExistsException,
            InvalidModeException,
            FailedHookException,
            UnknownExtension,
            InvalidZipRepository,
            RepositoryNotFound,
            RepositoryCloneFailed,
    ) as e:
        click.echo(e)
        sys.exit(1)
    except UndefinedVariableInTemplate as undefined_err:
        click.echo('{}'.format(undefined_err.message))
        click.echo('Error message: {}'.format(undefined_err.error.message))

        context_str = json.dumps(undefined_err.context,
                                 indent=4,
                                 sort_keys=True)
        click.echo('Context: {}'.format(context_str))
        sys.exit(1)
예제 #42
0
def main(nbpatterns, nbprescycles, homogenous, prestime, prestimetest, interpresdelay, patternsize, nbiter,
         probadegrade, lr, print_every, rngseed):
    #os.environ["CUDA_VISIBLE_DEVICES"] = "3"
    torch.cuda.set_device(3)
    train(paramdict=dict(click.get_current_context().params))
예제 #43
0
def get_datadir():
    """Fetch the current data directory."""
    ctx = click.get_current_context()
    return ctx.meta['renku.datasets.datadir']
예제 #44
0
 def new_func(*args, **kwargs):
     config_ = click.get_current_context().obj['config_file']
     return f(config_, *args, **kwargs)
예제 #45
0
def run(inputdir):
    logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
                        datefmt='%d/%m/%Y %H:%M:%S',
                        stream=sys.stdout)

    logging.getLogger().setLevel(logging.INFO)

    context = click.get_current_context()

    if not inputdir:
        click.echo(run.get_help(context))
        context.exit()

    if not os.path.isdir(inputdir):
        raise FileNotFoundError(inputdir + ' does not exists')

    outputdir = os.path.realpath('../resources/images/cards')

    logging.info('Checking output sub-dirs')

    for card_variant in CARDS_VARIANTS:
        card_variant_dir = os.path.join(outputdir, card_variant)

        if not os.path.isdir(card_variant_dir):
            logging.info('Creating output sub-dir ' + card_variant_dir)

            os.mkdir(card_variant_dir)

    card_paths = glob(os.path.join(inputdir, '*.png'))

    logging.info('Processing cards')

    for card_path in card_paths:
        card_name = os.path.splitext(os.path.basename(card_path))[0]

        if card_name.startswith('cardBack') or card_name == 'cardJoker':
            logging.info('Ignoring ' + card_name)

            continue

        logging.info('Processing ' + card_name)

        card_name = snake_case(card_name.replace('card', ''))

        card_variant, card_type = card_name.split('_')

        if card_variant not in CARDS_VARIANTS:
            logging.info(card_name + ': invalid variant "' + card_variant +
                         '"')

            continue

        if card_type == 'a':
            card_type = 'ace'
        elif card_type == 'j':
            card_type = 'jack'
        elif card_type == 'k':
            card_type = 'king'
        elif card_type == 'q':
            card_type = 'queen'

        card_image = Image.open(card_path)
        card_image.thumbnail((CARDS_WIDTH, 9999), Image.ANTIALIAS)
        card_image.save(os.path.join(outputdir, card_variant,
                                     card_type + '.png'),
                        optimize=True)
예제 #46
0
파일: idf.py 프로젝트: trapplab/esp-idf
        def execute_tasks(self, tasks, **kwargs):
            ctx = click.get_current_context()
            global_args = PropertyDict(kwargs)

            # Show warning if some tasks are present several times in the list
            dupplicated_tasks = sorted([
                item for item, count in Counter(
                    task.name for task in tasks).items() if count > 1
            ])
            if dupplicated_tasks:
                dupes = ", ".join('"%s"' % t for t in dupplicated_tasks)
                print(
                    "WARNING: Command%s found in the list of commands more than once. "
                    % ("s %s are" % dupes
                       if len(dupplicated_tasks) > 1 else " %s is" % dupes) +
                    "Only first occurence will be executed.")

            # Set propagated global options.
            # These options may be set on one subcommand, but available in the list of global arguments
            for task in tasks:
                for key in list(task.action_args):
                    option = next(
                        (o for o in ctx.command.params if o.name == key), None)

                    if option and (option.scope.is_global
                                   or option.scope.is_shared):
                        local_value = task.action_args.pop(key)
                        global_value = global_args[key]
                        default = () if option.multiple else option.default

                        if global_value != default and local_value != default and global_value != local_value:
                            raise FatalError(
                                'Option "%s" provided for "%s" is already defined to a different value. '
                                "This option can appear at most once in the command line."
                                % (key, task.name))
                        if local_value != default:
                            global_args[key] = local_value

            # Show warnings about global arguments
            print_deprecation_warning(ctx)

            # Make sure that define_cache_entry is mutable list and can be modified in callbacks
            global_args.define_cache_entry = list(
                global_args.define_cache_entry)

            # Execute all global action callback - first from idf.py itself, then from extensions
            for action_callback in ctx.command.global_action_callbacks:
                action_callback(ctx, global_args, tasks)

            # Always show help when command is not provided
            if not tasks:
                print(ctx.get_help())
                ctx.exit()

            # Build full list of tasks to and deal with dependencies and order dependencies
            tasks_to_run = OrderedDict()
            while tasks:
                task = tasks[0]
                tasks_dict = dict([(t.name, t) for t in tasks])

                dependecies_processed = True

                # If task have some dependecies they have to be executed before the task.
                for dep in task.dependencies:
                    if dep not in tasks_to_run.keys():
                        # If dependent task is in the list of unprocessed tasks move to the front of the list
                        if dep in tasks_dict.keys():
                            dep_task = tasks.pop(tasks.index(tasks_dict[dep]))
                        # Otherwise invoke it with default set of options
                        # and put to the front of the list of unprocessed tasks
                        else:
                            print(
                                'Adding "%s"\'s dependency "%s" to list of commands with default set of options.'
                                % (task.name, dep))
                            dep_task = ctx.invoke(
                                ctx.command.get_command(ctx, dep))

                            # Remove options with global scope from invoke tasks because they are alread in global_args
                            for key in list(dep_task.action_args):
                                option = next((o for o in ctx.command.params
                                               if o.name == key), None)
                                if option and (option.scope.is_global
                                               or option.scope.is_shared):
                                    dep_task.action_args.pop(key)

                        tasks.insert(0, dep_task)
                        dependecies_processed = False

                # Order only dependencies are moved to the front of the queue if they present in command list
                for dep in task.order_dependencies:
                    if dep in tasks_dict.keys(
                    ) and dep not in tasks_to_run.keys():
                        tasks.insert(0,
                                     tasks.pop(tasks.index(tasks_dict[dep])))
                        dependecies_processed = False

                if dependecies_processed:
                    # Remove task from list of unprocessed tasks
                    tasks.pop(0)

                    # And add to the queue
                    if task.name not in tasks_to_run.keys():
                        tasks_to_run.update([(task.name, task)])

            # Run all tasks in the queue
            # when global_args.dry_run is true idf.py works in idle mode and skips actual task execution
            if not global_args.dry_run:
                for task in tasks_to_run.values():
                    name_with_aliases = task.name
                    if task.aliases:
                        name_with_aliases += " (aliases: %s)" % ", ".join(
                            task.aliases)

                    print("Executing action: %s" % name_with_aliases)
                    task.run(ctx, global_args, task.action_args)

                self._print_closing_message(global_args, tasks_to_run.keys())

            return tasks_to_run
예제 #47
0
 def inner(*args, **kwargs):
     ctx = click.get_current_context()
     for arg in ctx.help_option_names:
         if arg in sys.argv:
             return
     f(*args, **kwargs)
예제 #48
0
def main(nbclasses, alpha, rule, gamma, steplr, activ, flare, nbshots, nbf,
         prestime, prestimetest, ipd, nbiter, lr, test_every, save_every,
         rngseed):
    train(paramdict=dict(click.get_current_context().params))
예제 #49
0
파일: tasks.py 프로젝트: nwinner/emmet
def parse(task_ids, nproc, store_volumetric_data):
    """Parse VASP launchers into tasks"""
    ctx = click.get_current_context()
    if "CLIENT" not in ctx.obj:
        raise EmmetCliError("Use --spec to set target DB for tasks!")

    run = ctx.parent.parent.params["run"]
    nmax = ctx.parent.params["nmax"]
    directory = ctx.parent.params["directory"].rstrip(os.sep)
    tag = os.path.basename(directory)
    target = ctx.obj["CLIENT"]
    logger.info(
        f"Connected to {target.collection.full_name} with {target.collection.count()} tasks."
    )
    ensure_indexes(["task_id", "tags", "dir_name", "retired_task_id"],
                   [target.collection])

    chunk_size = math.ceil(nmax / nproc)
    if nproc > 1 and nmax <= chunk_size:
        nproc = 1
        logger.warning(
            f"nmax = {nmax} but chunk size = {chunk_size} -> sequential parsing."
        )

    pool = multiprocessing.Pool(processes=nproc)
    gen = VaspDirsGenerator()
    iterator = iterator_slice(gen, chunk_size)  # process in chunks
    queue = deque()
    count = 0

    sep_tid = None
    if task_ids:
        with open(task_ids, "r") as f:
            task_ids = json.load(f)
    else:
        # reserve list of task_ids to avoid collisions during multiprocessing
        # insert empty doc with max ID + 1 into target collection for parallel SLURM jobs
        # NOTE use regex first to reduce size of distinct below 16MB
        all_task_ids = target.collection.distinct(
            "task_id", {"task_id": {
                "$regex": r"^mp-\d{7,}$"
            }})
        if not all_task_ids:
            all_task_ids = target.collection.distinct("task_id")

        next_tid = max(int(tid.split("-")[-1]) for tid in all_task_ids) + 1
        lst = [f"mp-{next_tid + n}" for n in range(nmax)]
        if run:
            sep_tid = f"mp-{next_tid + nmax}"
            target.collection.insert({"task_id": sep_tid})
            logger.info(f"Inserted separator task with task_id {sep_tid}.")
        task_ids = chunks(lst, chunk_size)
        logger.info(f"Reserved {len(lst)} task ID(s).")

    while iterator or queue:
        try:
            args = [next(iterator), tag, task_ids]
            queue.append(pool.apply_async(parse_vasp_dirs, args))
        except (StopIteration, TypeError):
            iterator = None

        while queue and (len(queue) >= pool._processes or not iterator):
            process = queue.pop()
            process.wait(1)
            if not process.ready():
                queue.append(process)
            else:
                count += process.get()

    pool.close()
    if run:
        logger.info(
            f"Successfully parsed and inserted {count}/{gen.value} tasks in {directory}."
        )
        if sep_tid:
            target.collection.remove({"task_id": sep_tid})
            logger.info(f"Removed separator task {sep_tid}.")
    else:
        logger.info(
            f"Would parse and insert {count}/{gen.value} tasks in {directory}."
        )
    return ReturnCodes.SUCCESS if count and gen.value else ReturnCodes.WARNING
예제 #50
0
def push():
    exit_code = 0
    with friendly_dockerized_errors(click):
        push_command = PushCommand()
        exit_code = push_command.run()
    click.get_current_context().exit(exit_code)
예제 #51
0
파일: cli.py 프로젝트: z-hermit/rq
 def wrapper(*args, **kwargs):
     ctx = click.get_current_context()
     cli_config = CliConfig(**kwargs)
     return ctx.invoke(func, cli_config, *args[1:], **kwargs)
예제 #52
0
def main():
    ctx = click.get_current_context()
    if ctx.invoked_subcommand is None:
        click.echo(ctx.get_help())
        sys.exit(0)
예제 #53
0
def build_environment(
    generate_packages: bool,
    build_environment: bool,
    environment_name: str,
    environment_file: str,
    package_version_file: str,
    start_from_wave: int,
    stop_at_wave: int,
) -> None:

    if not generate_packages and not build_environment:

        ctx = click.get_current_context()
        click.echo(ctx.get_help())
        ctx.exit()

    if generate_packages:

        rich.print("[bold]Regenerating packages[/bold]")
        rich.print("[bold]-----[/bold]")
        if not package_version_file:
            raise RuntimeError(
                "Please specify the version file to use with the "
                "--package_version_file option")

        with open(pathlib.Path(package_version_file)) as open_file:
            version_dict: Dict[str, str] = yaml.load(open_file,
                                                     Loader=yaml.SafeLoader)

        wave_index: int
        wave: Dict[str, str]
        for wave_index, wave in enumerate(repository_data):
            if wave_index < start_from_wave:
                continue
            if wave_index > stop_at_wave:
                continue
            rich.print(f"[bold]Building wave {wave_index}...[/bold]")
            rich.print(
                f"[bold]Packages in the wave: {tuple(wave.keys())}[/bold]")
            start_time: datetime.datetime = datetime.datetime.now()
            packages_to_build: Dict[str, feedstock_utils.Repository] = {}
            package_build_status: Dict[str, BuildStatus] = {}
            package: str
            for package in wave:
                if package not in version_dict:
                    raise RuntimeError(
                        f"Package {package} not found in version file")
            for package in wave:
                repository: feedstock_utils.Repository = feedstock_utils.Repository(
                    repository=wave[package], debug=False)
                repository.set_version_and_build_number(package, version_dict)
                repository.run_conda_smithy()
                repository.git_push()
                packages_to_build[package] = repository
                package_build_status[package] = BuildStatus(
                    status="Not Started Yet", last_run_id=0)
            wave_successful = build_wave(packages_to_build,
                                         package_build_status, start_time)
            if not wave_successful:
                rich.print(
                    f"[bold]Wave {wave_index} did not build correctly: stopping[/bold]"
                )

    if build_environment:

        rich.print("[bold]\nBuilding environment[/bold]")
        if not environment_name:
            raise RuntimeError(
                "Please specify the environment name with the --environment-name "
                "option")
        if not environment_file:
            raise RuntimeError(
                "Please specify the file describing the environment with the "
                "--environment-file option")

        rich.print(
            f"[bold]Building environment {environment_name} based on file "
            f"{environment_file}[/bold]")

        rich.print(
            f"conda env create -n {environment_name} --file {environment_file}"
        )
        return_code = subprocess.call(
            f"conda env create -n {environment_name} --file {environment_file}",
            shell=True,
        )
        if return_code != 0:
            raise RuntimeError(f"Error building {environment_name} repository")
예제 #54
0
def set_global_options(json_, force=False, wait=False):
    ctx = click.get_current_context()
    ctx.obj[CTX_FORCE_COMPATIBILITY] = force
    ctx.obj[CTX_BLOCKING_MODE] = wait
    ctx.obj[CTX_OUTPUT_JSON] = json_
예제 #55
0
def exit_with_msg():
    ctx = click.get_current_context()
    ctx.fail("target and email is required!")
예제 #56
0
def custom_except_hook(exc_info):
    """
    A custom excepthook to present python errors produced by the CLI.
    We don't want to show end users big scary stacktraces if they aren't python
    programmers, so slim it down to some basic info. We keep a "DEBUGMODE" env
    variable kicking around to let us turn on stacktraces if we ever need them.

    Additionally, does global suppression of EPIPE errors, which often occur
    when a python command is piped to a consumer like `head` which closes its
    input stream before python has sent all of its output.
    DANGER: There is a (small) risk that this will bite us if there are EPIPE
    errors produced within the Globus SDK. We should keep an eye on this
    possibility, as it may demand more sophisticated handling of EPIPE.
    Possible TODO item to reduce this risk: inspect the exception and only hide
    EPIPE if it comes from within the globus_cli package.
    """
    exception_type, exception, traceback = exc_info

    # check if we're in debug mode, and run the real excepthook if we are
    ctx = click.get_current_context()
    state = ctx.ensure_object(CommandState)
    if state.debug:
        sys.excepthook(exception_type, exception, traceback)

    # we're not in debug mode, do custom handling
    else:
        # if it's a click exception, re-raise as original -- Click's main
        # execution context will handle pretty-printing
        if isinstance(exception, click.ClickException):
            reraise(exception_type, exception, traceback)

        # handle the Globus-raised errors with our special hooks
        # these will present the output (on stderr) as JSON
        elif isinstance(exception, exc.TransferAPIError):
            if exception.code == "ClientError.AuthenticationFailed":
                authentication_hook(exception)
            else:
                transferapi_hook(exception)

        elif isinstance(exception, exc.AuthAPIError):
            if exception.code == "UNAUTHORIZED":
                authentication_hook(exception)
            # invalid_grant occurs when the users refresh tokens are not valid
            elif exception.message == "invalid_grant":
                invalidrefresh_hook(exception)
            else:
                authapi_hook(exception)

        elif isinstance(exception, exc.GlobusAPIError):
            globusapi_hook(exception)

        # specific checks fell through -- now check if it's any kind of
        # GlobusError
        elif isinstance(exception, exc.GlobusError):
            globus_generic_hook(exception)

        # not a GlobusError, not a ClickException -- something like ValueError
        # or NotImplementedError bubbled all the way up here: just print it
        # out, basically
        else:
            safeprint(u'{}: {}'.format(exception_type.__name__, exception))
            sys.exit(1)
예제 #57
0
def cli(broker_url, port):  # pylint: disable=unused-argument
    ctx = click.get_current_context()
    Exporter().run(ctx.params)
예제 #58
0
def repl():
    """Start an interactive session"""
    prompt_kwargs = {
        'history': FileHistory(os.path.expanduser('~/.repl_history'))
    }
    click_repl.repl(click.get_current_context(), prompt_kwargs=prompt_kwargs)
예제 #59
0
def friendly_dockerized_errors(click):
    try:
        yield
    except CommandError as err:
        click.echo(err.message, err=True)
        click.get_current_context().exit(1)
예제 #60
0
파일: tasks.py 프로젝트: nwinner/emmet
def restore(inputfile, file_filter):
    """Restore launchers from HPSS"""
    ctx = click.get_current_context()
    run = ctx.parent.parent.params["run"]
    nmax = ctx.parent.params["nmax"]
    pattern = ctx.parent.params["pattern"]
    directory = ctx.parent.params["directory"]
    if not os.path.exists(directory):
        os.makedirs(directory)

    check_pattern(nested_allowed=True)
    shutil.chown(directory, group="matgen")
    block_launchers = defaultdict(list)
    nlaunchers = 0
    with open(inputfile, "r") as infile:
        os.chdir(directory)
        with click.progressbar(infile, label="Load blocks") as bar:
            for line in bar:
                if fnmatch(line, pattern):
                    if nlaunchers == nmax:
                        break
                    block, launcher = line.split(os.sep, 1)
                    for ff in file_filter:
                        block_launchers[block].append(
                            os.path.join(launcher.strip(), ff))
                    nlaunchers += 1

    nblocks = len(block_launchers)
    nfiles = sum(len(v) for v in block_launchers.values())
    logger.info(f"Restore {nblocks} block(s) with {nlaunchers} launchers"
                f" and {nfiles} file filters to {directory} ...")

    nfiles_restore_total, max_args = 0, 15000
    for block, files in block_launchers.items():
        # get full list of matching files in archive and check against existing files
        args = shlex.split(f"htar -tf {GARDEN}/{block}.tar")
        filelist = [os.path.join(block, f) for f in files]
        filelist_chunks = [
            filelist[i:i + max_args] for i in range(0, len(filelist), max_args)
        ]
        filelist_restore, cnt = [], 0
        try:
            for chunk in filelist_chunks:
                for line in run_command(args, chunk):
                    fn = extract_filename(line)
                    if fn:
                        cnt += 1
                        if os.path.exists(fn):
                            logger.debug(
                                f"Skip {fn} - already exists on disk.")
                        else:
                            filelist_restore.append(fn)
        except subprocess.CalledProcessError as e:
            logger.error(str(e))
            return ReturnCodes.ERROR

        # restore what's missing
        if filelist_restore:
            nfiles_restore = len(filelist_restore)
            nfiles_restore_total += nfiles_restore
            if run:
                logger.info(
                    f"Restore {nfiles_restore}/{cnt} files for {block} to {directory} ..."
                )
                args = shlex.split(f"htar -xvf {GARDEN}/{block}.tar")
                filelist_restore_chunks = [
                    filelist_restore[i:i + max_args]
                    for i in range(0, len(filelist_restore), max_args)
                ]
                try:
                    for chunk in filelist_restore_chunks:
                        for line in run_command(args, chunk):
                            logger.info(line.strip())
                except subprocess.CalledProcessError as e:
                    logger.error(str(e))
                    return ReturnCodes.ERROR
            else:
                logger.info(
                    f"Would restore {nfiles_restore}/{cnt} files for {block} to {directory}."
                )
        else:
            logger.warning(f"Nothing to restore for {block}!")

        if run:
            logger.info(f"Set group of {block} to matgen recursively ...")
            recursive_chown(block, "matgen")

    if run:
        logger.info(f"Restored {nfiles_restore_total} files to {directory}.")
    else:
        logger.info(
            f"Would restore {nfiles_restore_total} files to {directory}.")
    return ReturnCodes.SUCCESS