예제 #1
0
def parse_args(output_formats):
    parser = ArgumentParser(description='crate shell')
    parser.add_argument('-v', '--verbose', action='count',
                        dest='verbose', default=0,
                        help='use -v to get debug output')
    parser.add_argument('--history',
                        type=str,
                        help='the history file to use', default=HISTORY_PATH)

    group = parser.add_mutually_exclusive_group()
    group.add_argument('-c', '--command', type=str,
                        help='execute sql statement')
    group.add_argument('--sysinfo', action='store_true', default=False,
                        help='show system information')

    parser.add_argument('--hosts', type=str, nargs='*',
                        help='the crate hosts to connect to', metavar='HOST')
    parser.add_argument('--format', type=str, default='tabular', choices=output_formats,
                        help='output format of the sql response', metavar='FORMAT')
    parser.add_argument('--version', action='store_true', default=False,
                        help='show crash version and exit')
    try:
        import argcomplete
        argcomplete.autocomplete(parser)
    except ImportError:
        pass
    args = parser.parse_args()
    return args
예제 #2
0
파일: main.py 프로젝트: WurstWorks/datalad
def main(cmdlineargs=None):
    # PYTHON_ARGCOMPLETE_OK
    parser = setup_parser()
    try:
        import argcomplete
        argcomplete.autocomplete(parser)
    except ImportError:
        pass
    # parse cmd args
    cmdlineargs = parser.parse_args(cmdlineargs)
    if not cmdlineargs.change_path is None:
        for path in cmdlineargs.change_path:
            chpwd(path)
    # run the function associated with the selected command
    if cmdlineargs.common_debug:
        # So we could see/stop clearly at the point of failure
        setup_exceptionhook()
        cmdlineargs.func(cmdlineargs)
    else:
        # Otherwise - guard and only log the summary. Postmortem is not
        # as convenient if being caught in this ultimate except
        try:
            cmdlineargs.func(cmdlineargs)
        except Exception as exc:
            lgr.error('%s (%s)' % (exc_str(exc), exc.__class__.__name__))
            sys.exit(1)
예제 #3
0
파일: main.py 프로젝트: FlanFlanagan/xsgen
def main():
    base = Plugins(["xsgen.base"])
    preparser = base.build_cli()
    prens = preparser.parse_known_args()[0]
    base.merge_rcs()
    predefaultrc = base.rc
    prerc = RunControl()
    prerc._update(predefaultrc)
    prerc.rc = prens.rc
    rcdict = {}
    if os.path.isfile(prerc.rc):
        exec_file(prerc.rc, rcdict, rcdict)
        prerc.rc = rcdict['rc'] if 'rc' in rcdict else NotSpecified
        prerc.plugins = rcdict['plugins'] if 'plugins' in rcdict else NotSpecified
    prerc._update([(k, v) for k, v in prens.__dict__.items()])

    plugins = Plugins(prerc.plugins)
    parser = plugins.build_cli()
    if argcomplete is not None and prerc.bash_completion:
        argcomplete.autocomplete(parser)
    ns = parser.parse_args()
    rc = plugins.merge_rcs()
    rc._update(rcdict)
    rc._update([(k, v) for k, v in ns.__dict__.items()])
    plugins.setup()
    plugins.execute()
    plugins.teardown()
예제 #4
0
파일: cli.py 프로젝트: CN-UPB/OpenBarista
def cli():
    main_parser = ThrowingArgumentParser(description=' DECaF CLI')
    main_parser.add_argument('-v', '--version', action='version',
                             version='%(prog)s ' + __version__ + ' (' + version_date + ')')

    subparsers = main_parser.add_subparsers(help='commands')

    call_parser = subparsers.add_parser('call', help='call a function on a component')
    call_parser.add_argument('--json', action='store_true', help='args specified in json')
    call_parser.add_argument('name', nargs=1, help='component name')
    call_parser.add_argument('function', nargs=1, help='function name')
    call_parser.add_argument('arguments', nargs="*", help='function arguments')
    call_parser.set_defaults(func=call)

    component_list_parser = subparsers.add_parser('component-list', help='list available components')

    component_list_parser.set_defaults(func=component_list)

    argcomplete.autocomplete(main_parser)

    try:
        args = main_parser.parse_args()
        result = args.func(args)
        if result is not None:
            result = 0

    except KeyboardInterrupt:
        print 'Exiting decaf cli'
        result = -2
    except (SystemExit, ArgumentParserError):
        result = -3

    exit(result)
예제 #5
0
    def parse(self, args):
        '''Parse the given arguments and return a tuple ``(command, args)``,
        where ``args`` is a list consisting of all arguments. The command can
        then be called as ``command(*args)``.

        :param args: The arguments to parse.'''
        try:
            # run completion handler before parsing
            import argcomplete
            argcomplete.autocomplete(self.parser)
        except ImportError:  # pragma: no cover
            # ignore error if not installed
            pass

        self._options = self.parser.parse_args(args)
        arg_map = self._options.__dict__
        if _DISPATCH_TO not in arg_map:  # pragma: no cover
            self.parser.error("too few arguments")

        command = arg_map.pop(_DISPATCH_TO)
        argspec = self._argspecs[command.__name__]
        real_args = []
        for arg in argspec.args:
            real_args.append(arg_map.pop(arg))
        if arg_map and arg_map.get(argspec.varargs):
            real_args.extend(arg_map.pop(argspec.varargs))
        return command, real_args
예제 #6
0
def main():
    parser = setup_parser()
    argcomplete.autocomplete(parser)
    options = parser.parse_args()

    _setup_logger(options)

    # Support the deprecated -c option
    if getattr(options, 'config', None) is not None:
        options.configs.append(options.config)

    if options.subparser in ('report', 'logs', 'metrics', 'run'):
        _default_region(options)
        _default_account_id(options)

    try:
        command = options.command
        if not callable(command):
            command = getattr(
                importlib.import_module(command.rsplit('.', 1)[0]),
                command.rsplit('.', 1)[-1])

        # Set the process name to something cleaner
        process_name = [os.path.basename(sys.argv[0])]
        process_name.extend(sys.argv[1:])
        setproctitle(' '.join(process_name))
        command(options)
    except Exception:
        if not options.debug:
            raise
        traceback.print_exc()
        pdb.post_mortem(sys.exc_info()[-1])
예제 #7
0
 def autocomplete (self):
   try:
     import argcomplete
     argcomplete.autocomplete(self.parser)
   except ImportError:
     # no auto completion
     pass
예제 #8
0
    def __init__(self):
        self.root_directory = os.getcwd()
        self.clowder = None
        self.clowder_repo = None
        self.versions = None
        self.group_names = ''
        self.project_names = ''
        self.branches = ''
        # Load current clowder.yml config if it exists
        clowder_path = os.path.join(self.root_directory, 'clowder')
        if os.path.isdir(clowder_path):
            self.clowder_repo = ClowderRepo(self.root_directory)
            self.clowder = ClowderController(self.root_directory)
            self.versions = self.clowder.get_fixed_version_names()
            self.branches = self.clowder_repo.branches()
            if self.clowder.get_all_group_names() is not None:
                self.group_names = self.clowder.get_all_group_names()
            if self.clowder.get_all_project_names() is not None:
                self.project_names = self.clowder.get_all_project_names()
        # clowder argparse setup
        command_description = 'Utility for managing multiple git repositories'
        parser = argparse.ArgumentParser(description=command_description)
        subparsers = parser.add_subparsers(dest='command')
        self._configure_subparsers(subparsers)
        # Argcomplete and arguments parsing
        argcomplete.autocomplete(parser)
        self.args = parser.parse_args()

        print('')
        if not hasattr(self, self.args.command):
            exit_unrecognized_command(parser)
        # use dispatch pattern to invoke method with same name
        getattr(self, self.args.command)()
        print('')
예제 #9
0
    def create_parser(self, prog, parents=None):
        """
        Creates an ArgumentParser instance from options returned
        by get_options(), and a subparser for the given command.
        """
        prog = os.path.basename(prog)

        options_parser = argparse.ArgumentParser(add_help=False)
        for option in self.get_options():
            options_parser.add_argument(*option.args, **option.kwargs)

        # parser_parents = parents if parents else [option_parser]
        # parser_parents = [options_parser]

        parser = argparse.ArgumentParser(prog=prog, usage=self.usage,
                                         parents=[options_parser])

        subparsers = parser.add_subparsers()

        for name, command in self._commands.items():
            description = getattr(command, 'description', command.__doc__)
            command_parser = command.create_parser(name, parents=[options_parser])
            subparser = subparsers.add_parser(name, usage=description, help=description,
                                              parents=[command_parser], add_help=False)


        ## enable autocomplete only for parent parser when argcomplete is
        ## imported and it is NOT disabled in constructor
        if parents is None and ARGCOMPLETE_IMPORTED \
                and not self.disable_argcomplete:
            argcomplete.autocomplete(parser, always_complete_options=True)

        return parser
예제 #10
0
def main():
    logger = logging.getLogger(__name__)
    console = logging.StreamHandler()
    formatter = logging.Formatter('%(levelname)s - %(message)s')
    console.setFormatter(formatter)
    logger.addHandler(console)

    cmd_client = BaseClient(logger)

    parser, argv = config('Manage ConPaaS users.', logger)

    _user_cmd = UserCmd(parser, cmd_client)

    argcomplete.autocomplete(parser)
    args = parser.parse_args(argv)
    cmd_client.set_config(args.director_url, args.username, args.password,
                          args.debug)
    try:
        args.run_cmd(args)
    except Exception:
        if args.debug:
            traceback.print_exc()
        else:
            ex = sys.exc_info()[1]
            if str(ex).startswith("ERROR"):
                sys.stderr.write("%s\n" % ex)
            else:
                sys.stderr.write("ERROR: %s\n" % ex)
        sys.exit(1)
예제 #11
0
파일: config.py 프로젝트: uceo/uceo-2015
def try_argcomplete(parser):
    try:
        import argcomplete
    except ImportError:
        pass
    else:
        argcomplete.autocomplete(parser)
예제 #12
0
파일: app.py 프로젝트: Varkal/chuda
    def __init_arguments(self):
        self.parser = argparse.ArgumentParser(
            prog=self.app_name,
            description=self.description
        )

        if not self.override_default_arguments:
            self.arguments = self.default_arguments + self.arguments

        if not self.merge_arguments_in_subcommands or not self.subcommands:
            for argument in self.arguments:
                arg_tuple = argument.convert_to_argument()
                if isinstance(arg_tuple[0], list):
                    parg = self.parser.add_argument(*arg_tuple[0], **arg_tuple[1])
                else:
                    parg = self.parser.add_argument(arg_tuple[0], **arg_tuple[1])

                if argument.completer:
                    parg.completer = argument.completer

        subcommands_dict = {}
        if self.subcommands:
            subparsers = self.parser.add_subparsers(
                title="subcommands"
            )

        for subcommand in self.subcommands:
            instance = subcommand()
            subcommands_dict[instance.command_name] = instance
            subparser = subparsers.add_parser(
                instance.command_name,
                help=getattr(instance, "description", ""),
                description=getattr(instance, "description", ""),
            )
            subparser.set_defaults(command=instance.command_name)
            if self.merge_arguments_in_subcommands and instance.merge_parent_arguments:
                instance.arguments = self.arguments + instance.arguments

            for argument in instance.arguments:
                arg_tuple = argument.convert_to_argument()
                if isinstance(arg_tuple[0], list):
                    sp_arg = subparser.add_argument(*arg_tuple[0], **arg_tuple[1])
                else:
                    sp_arg = subparser.add_argument(arg_tuple[0], **arg_tuple[1])

                if argument.completer:
                    sp_arg.completer = argument.completer

        argcomplete.autocomplete(
            argument_parser=self.parser,
            always_complete_options=False
        )

        self.arguments_declaration = self.arguments
        self.arguments = self.parser.parse_args()

        if getattr(self.arguments, "command", None) is None:
            setattr(self.arguments, "command", "main")

        self.subcommands = subcommands_dict
예제 #13
0
파일: cli.py 프로젝트: abrt/abrt
def main():
    l18n.init()

    parser = ArghParser()
    parser.add_argument('-a', '--auth',
                        action='store_true',
                        help=_('Authenticate and show all problems'
                               ' on this machine'))

    parser.add_argument('-v', '--version',
                        action='version',
                        version=config.VERSION)

    parser.add_commands([
        backtrace,
        di_install,
        gdb,
        info,
        list_problems,
        remove,
        report,
        retrace,
        status,
    ])

    argcomplete.autocomplete(parser)

    try:
        parser.dispatch()
    except KeyboardInterrupt:
        sys.exit(1)

    sys.exit(0)
예제 #14
0
def _parse_args(args):
    """
    Parses the arguments using the Python argparse library.
    Generates shell autocomplete using the argcomplete library.

    :param list args: arguments from cli
    :rtype: `python argument parser`
    """

    parser = register_commands()
    argcomplete.autocomplete(parser)

    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    parsed = parser.parse_args(args)
    if parsed.debug:
        global_verbosity_level = HIGH_VERBOSE
    else:
        global_verbosity_level = parsed.verbosity
    set_global_verbosity_level(global_verbosity_level)
    if global_verbosity_level >= HIGH_VERBOSE:
        set_debug()
    return parsed
예제 #15
0
파일: depend.py 프로젝트: xenserver/planex
def parse_args_or_exit(argv=None):
    """
    Parse command line options
    """
    parser = argparse.ArgumentParser(
        description="Generate Makefile dependencies from RPM Spec files",
        parents=[common_base_parser(), rpm_define_parser()])
    parser.add_argument("specs", metavar="SPEC", nargs="+", help="spec file")
    parser.add_argument(
        "--no-package-name-check", dest="check_package_names",
        action="store_false", default=True,
        help="Don't check that package name matches spec file name")
    parser.add_argument(
        "--no-buildrequires", dest="buildrequires",
        action="store_false", default=True,
        help="Don't generate dependency rules for BuildRequires")
    parser.add_argument(
        "--no-requires", dest="requires",
        action="store_false", default=True,
        help="Don't generate dependency rules for Requires")
    parser.add_argument(
        "--json", action="store_true",
        help="Output the dependency rules as a json object"
    )
    argcomplete.autocomplete(parser)
    return parser.parse_args(argv)
예제 #16
0
def run():
    # Parse arguments
    parser = build_parser()
    argcomplete.autocomplete(parser)
    args = parser.parse_args()
    if not vars(args).get('func'):
        parser.print_help()
    else:
        # Add custom plugin dir to import path
        custom_plugins_dir = vars(args).get('custom_plugins_dir')
        if custom_plugins_dir:
            sys.path.append(custom_plugins_dir)

        # Resolve default ip if the --ip argument hasn't been specified
        if not vars(args).get('ip'):
            # Returns None if an error has occurred
            args.ip = host.resolve_default_ip()
            if not args.ip:
                return
        else:
            args.local_connection = False

        args.cli_parameters = get_cli_parameters(args)
        args.custom_settings = get_custom_settings(args)
        logging_setup.configure_logging(args)

        is_completed_without_error = args.func(args)
        if not is_completed_without_error:
            exit(1)
예제 #17
0
파일: manifest.py 프로젝트: euanh/planex
def parse_args_or_exit(argv=None):
    """Parse command line options"""

    parser = argparse.ArgumentParser(
        description='Generate manifest in JSON format from spec/link files',
        parents=[common_base_parser()]
    )

    parser.add_argument(
        'specfile_path',
        metavar='SPEC',
        help='path/to/<spec_file>'
    )

    parser.add_argument(
        'lnkfile_path',
        metavar='LNK',
        nargs='?',
        default=None,
        help='path/to/<lnk_file>'
    )

    parser.add_argument(
        '--pins-dir',
        dest='pinsdir',
        default="PINS",
        help='path/to/pins'
    )

    argcomplete.autocomplete(parser)
    return parser.parse_args(argv)
예제 #18
0
파일: cli.py 프로젝트: Ofterdingen/textract
def get_parser():
    """Initialize the parser for the command line interface and bind the
    autocompletion functionality"""

    # initialize the parser
    parser = argparse.ArgumentParser(
        description=(
            'Command line tool for extracting text from any document. '
        ) % locals(),
    )

    # define the command line options here
    parser.add_argument(
        'filename', help='Filename to extract text.',
    ).completer = argcomplete.completers.FilesCompleter
    parser.add_argument(
        '-o', '--output', type=argparse.FileType('w'), default='-',
        help='output raw text in this file',
    )
    parser.add_argument(
        '-m', '--method', default='',
        help='specify a method of extraction for formats that support it',
    )
    parser.add_argument(
        '-v', '--version', action='version', version='%(prog)s '+VERSION,
    )

    # enable autocompletion with argcomplete
    argcomplete.autocomplete(parser)

    return parser
예제 #19
0
	def __init__(self, name, description, commandLineArgs=[]):
		self.parser = argparse.ArgumentParser(description)
		self.parser.add_argument(
			"-l", "--loglevel",
			dest="loglevel",
		    help="Log level. 5=only critical, 4=errors, 3=warnings, 2=info, 1=debug. Setting this to 1 will print a lot! Default=3.)",
		    type=int,
		    choices=(1,2,3,4,5),
		    default=3 )
		self.parser.add_argument(
			"-d", "--dry",
			dest="dryrun",
			action='store_true',
		    help="Dry run. Do not upload any files, or put stuff in databases.")
		for c in commandLineArgs:
			self.parser.add_argument(
				c.pop("short",None),
				c.pop("long",None),
				**c)

		argcomplete.autocomplete(self.parser)
		self.args = self.parser.parse_args()

		self.logger = logging.getLogger(name)
		self.logger.setLevel(self.args.loglevel * 10) #https://docs.python.org/2/library/logging.html#levels

		self.executionMode = self.NORMAL_MODE
		if self.args.dryrun:
			self.logger.info("Running in dry mode")
			self.executionMode = self.DRY_MODE
예제 #20
0
def parse_args(prog, args):
    parser = argparse.ArgumentParser(prog=prog, description=DESCRIPTION)
    arg = parser.add_argument('template_dir', metavar='TEMPLATE_DIR',
                              help='The directory containing the HTML5 '
                                   'Boilerplate git checkout.')
    if argcomplete:
        arg.completer = directory_completer
    parser.add_argument('output_filename', metavar='OUTPUT_FILENAME',
                        help='The path where the Jinja template should be '
                             'written.')
    parser.add_argument('--google-analytics', action='store_true',
                        default=False,
                        help='Replace fake Google Analytics '
                             'ID with Flask config variable.')
    parser.add_argument('--vim', action='store_true', default=False,
                        help='Add vim modeline filetype hint.')
    parser.add_argument('--webassets', action='store_true', default=False,
                        help='Manage CSS/JS assets with webassets.')
    parser.add_argument('--no-charset-tag', action='store_true', default=False,
                        help='Remove the meta charset= tag (you would need to '
                             'use the charset param in the Content-Type '
                             'header instead).')
    parser.add_argument('--no-compat-tag', action='store_true', default=False,
                        help='Remove the X-UA-Compatible meta tag (you would '
                             'need to use the HTTP header instead).')
    if argcomplete:
        argcomplete.autocomplete(parser)
    return parser.parse_args(args)
예제 #21
0
파일: depend.py 프로젝트: lcy985a/planex
def parse_cmdline():
    """
    Parse command line options
    """
    parser = argparse.ArgumentParser(
        description="Generate Makefile dependencies from RPM Spec files")
    add_common_parser_options(parser)
    parser.add_argument("specs", metavar="SPEC", nargs="+", help="spec file")
    parser.add_argument(
        "-i", "--ignore", metavar="PKG", action="append", default=[],
        help="package name to ignore")
    parser.add_argument(
        "-I", "--ignore-from", metavar="FILE", action="append", default=[],
        help="file of package names to be ignored")
    parser.add_argument(
        "-P", "--pins-dir", help="Directory containing pin overlays")
    parser.add_argument(
        "-d", "--dist", metavar="DIST", default="",
        help="distribution tag (used in RPM filenames)")
    parser.add_argument(
        "-r", "--repos_path", metavar="DIR", default="repos",
        help='Local path to the repositories')
    parser.add_argument(
        "-p", "--packaging", metavar="PACKAGING",
        choices=["rpm", "deb"], default=build_type(),
        help='Packaging to use (rpm or deb): default %s' % build_type())
    parser.add_argument(
        "--no-package-name-check", dest="check_package_names",
        action="store_false", default=True,
        help="Don't check that package name matches spec file name")
    parser.add_argument(
        "-t", "--topdir", metavar="DIR", default=None,
        help='Set rpmbuild toplevel directory')
    argcomplete.autocomplete(parser)
    return parser.parse_args()
예제 #22
0
파일: makesrpm.py 프로젝트: srowe/planex
def parse_args_or_exit(argv=None):
    """
    Parse command line options
    """
    parser = argparse.ArgumentParser(
        description='Pack sources and patchqueues into a source RPM',
        parents=[planex.cmd.args.common_base_parser(),
                 planex.cmd.args.rpm_define_parser(),
                 planex.cmd.args.keeptmp_parser()])
    parser.add_argument("spec", metavar="SPEC", help="Spec file")
    parser.add_argument("sources", metavar="SOURCE/PATCHQUEUE", nargs='*',
                        help="Source and patchqueue files")
    parser.add_argument("--metadata", dest="metadata",
                        action="store_true",
                        help="Add inline comments in the spec file "
                        "to specify what provided sources, patches "
                        "and patchqueues")
    argcomplete.autocomplete(parser)

    parsed_args = parser.parse_args(argv)
    links = [arg for arg in argv
             if arg.endswith(".lnk") or arg.endswith(".pin")]
    parsed_args.link = None
    if links:
        parsed_args.link = Link(links[0])

    return parsed_args
예제 #23
0
파일: leo.py 프로젝트: sedrubal/leo
def parse_args():
    """
    Parse arguments
    :return: the parsed arguments
    """
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('words',
                        action='store',
                        nargs='+',
                        metavar='word',
                        type=str,
                        help="the words you want to translate")
    parser.add_argument('-l', '--lang',
                        action='store',
                        dest='language',
                        metavar='lang',
                        type=str,
                        default='en',
                        choices=['en', 'fr', 'es', 'it',
                                 'ch', 'ru', 'pt', 'pl'],
                        help="the languagecode to translate to or from "
                             "(en, fr, es, it, ch, ru, pt, pl)")
    parser.add_argument('--version',
                        action='version',
                        version='%(prog)s {ver}'.format(ver=__version__))

    if 'argcomplete' in globals():
        argcomplete.autocomplete(parser)

    args = parser.parse_args()

    return args
예제 #24
0
파일: main.py 프로젝트: ovorobio/InfraRed
def main(args=None):
    CoreServices.setup()

    # inject existing libraries.
    # because of that all the ansible modules should be imported after that
    CoreServices.dependency_manager().inject_libraries()

    specs_manager = api.SpecManager()

    # Init Managers
    specs_manager.register_spec(
        WorkspaceManagerSpec('workspace',
                             description="Workspace manager. "
                                         "Allows to create and use an "
                                         "isolated environment for plugins "
                                         "execution."))
    specs_manager.register_spec(
        PluginManagerSpec('plugin',
                          description="Plugin management"))

    specs_manager.register_spec(
        SSHSpec(
            'ssh',
            description="Interactive ssh session to node from inventory."))

    # register all plugins
    for plugin in CoreServices.plugins_manager().PLUGINS_DICT.values():
        specs_manager.register_spec(api.InfraredPluginsSpec(plugin))

    argcomplete.autocomplete(specs_manager.parser)
    return specs_manager.run_specs(args) or 0
예제 #25
0
파일: cli.py 프로젝트: decathorpe/kentauros
    def __init__(self):
        if self.parser is None:
            CLIArgs.parser = get_cli_parser(get_cli_parser_base())
            argcomplete.autocomplete(self.parser)

        if self.args is None:
            CLIArgs.args = self.parser.parse_args()
예제 #26
0
def parse_args(prog, args):
    parser = argparse.ArgumentParser(prog)
    parser.add_argument('--redmine-version', default=DEFAULT_RM_VERSION,
                        metavar='VERSION',
                        help='The version of Redmine to test against')
    parser.add_argument('--use-existing-redmine-install', action='store_false',
                        default=True, dest='redmine_setup_needed',
                        help='Use the existing Redmine install instead of '
                             'recreating it')
    parser.add_argument('--redmine-download-method', default='TGZ',
                        choices=['TGZ', 'SVN'],
                        help='The method that the Redmine code is downloaded')
    parser.add_argument('--redmine-port', type=int, default=DEFAULT_PORT,
                        help='The TCP port Redmine will listen on')
    parser.add_argument('--redmine-port-random', action='store_true',
                        default=False,
                        help='Randomly sets the Redmine TCP port '
                             '(overrides --redmine-port)')
    parser.add_argument('--no-flake8', action='store_false', default=True,
                        dest='flake8', help='Disable Flake8 check')
    parser.add_argument('--check-rvm-ruby', action='store_true', default=False,
                        help='Installs the proper RVM Ruby version if not '
                             'already installed')
    parser.add_argument('--workspace', metavar='DIR', default='/tmp',
                        help='Directory where Redmine directory is located')
    parser.add_argument('--download-cache', metavar='DIR', default='/tmp',
                        help='Directory where tarballs/gems are cached')
    parser.add_argument('--run-coverage', action='store_true', default=False,
                        help='Run coverage.py on the testsuite')

    if argcomplete:
        argcomplete.autocomplete(parser)
    return parser.parse_args(args)
예제 #27
0
def main():
    """ Main function. """
    mesg = """This script installs locally c libs.

    choice      desciption

    C++ Libraries
    ------------------------------------------------------
    cppunit     Install the cppunit library.
    gtest       Install the gtest library.
    boost       Install latest boost dev library.
    jsonrpc     Install jsonrpc-cpp library. Beta lib & build not working atm.
    jsoncpp     Install json parsing library.
    SDL         Install the SDL1.xx game library.
    SDL2        Install the SDL2.xx game library.

    C Libraries
    ------------------------------------------------------
    argtable    Install args parsing library..
    cunit       Install the cunit test library.
    gnump       Install the GNU multiprecision library.
    jansson     Install a json parsing library.
    libuv       Install an async IO library.
    libxml      Install a xml parsing library.
    """
    parser = argparse.ArgumentParser(description=mesg,
                                     formatter_class=argparse.
                                     RawDescriptionHelpFormatter)
    parser.add_argument('-l', '--ldir', nargs='?', default='./libs',
                        help='library directory to install to')
    parser.add_argument('libs', nargs='+', help='libs selected for install',
                        choices=sorted(BUILDS.keys(), key=str.lower))

    autocomplete(parser)
    args = parser.parse_args()  # Default parses argv[1:]
    ldir = os.path.abspath(args.ldir)

    builds = []
    actions = {}
    for key in BUILDS.keys():
        actions[key] = functools.partial(builds.append, BUILDS[key])

    try:
        # Need this for jam to build mpi & graph_parallel.
        config = os.path.expanduser('~') + os.sep + 'user-config.jam'
        with open(config, 'w') as f_conf:
            f_conf.write('using mpi ;')

        for lib in args.libs:
            actions[lib]()

        # Multiprocess to overlap builds
        build_pool(builds, ldir)
    finally:
        try:
            os.remove(config)
            os.removedirs(ldir + os.path.sep + 'src')
            os.removedirs(ldir)
        except OSError:
            pass
예제 #28
0
def parseCommandLine(config):
    parser = buildCommandLineParser(config)

    args = None
    args_extra = None

    # parse known commands printing general usage on any error
    try:
        argcomplete.autocomplete(parser)
        args, args_extra = parser.parse_known_args()

    except:
        # TODO: determine best help instead of general
        args = argparse.Namespace()

        if len(sys.argv) > 1:
            args.command = sys.argv[1]

        if len(sys.argv) > 2:
            args.action = sys.argv[2]

        args.host = config.get("core", "host", CANVAS_HOST)
        args.username = config.get("user", "name", CANVAS_USER)
        args.help = True

    return (args, args_extra)
예제 #29
0
def main():
    parser = setup_parser()
    argcomplete.autocomplete(parser)
    options = parser.parse_args()
    if options.subparser is None:
        parser.print_help(file=sys.stderr)
        return sys.exit(2)

    _setup_logger(options)

    # Support the deprecated -c option
    if getattr(options, 'config', None) is not None:
        options.configs.append(options.config)

    config = Config.empty(**vars(options))

    try:
        command = options.command
        if not callable(command):
            command = getattr(
                importlib.import_module(command.rsplit('.', 1)[0]),
                command.rsplit('.', 1)[-1])

        # Set the process name to something cleaner
        process_name = [os.path.basename(sys.argv[0])]
        process_name.extend(sys.argv[1:])
        setproctitle(' '.join(process_name))
        command(config)
    except Exception:
        if not options.debug:
            raise
        traceback.print_exc()
        pdb.post_mortem(sys.exc_info()[-1])
예제 #30
0
def main(argsraw=None):
    """Show a list of available artistools commands."""
    import argcomplete
    import argparse
    import importlib

    parser = argparse.ArgumentParser()
    parser.set_defaults(func=None)

    subparsers = parser.add_subparsers(dest='subcommand')
    subparsers.required = False

    for command, (submodulename, funcname) in sorted(commandlist.items()):
        submodule = importlib.import_module(submodulename, package='artistools')
        subparser = subparsers.add_parser(command)
        submodule.addargs(subparser)
        subparser.set_defaults(func=getattr(submodule, funcname))

    argcomplete.autocomplete(parser)
    args = parser.parse_args()
    if args.func is not None:
        args.func(args=args)
    else:
        # parser.print_help()
        print('artistools provides the following commands:\n')

        # for script in sorted(console_scripts):
        #     command = script.split('=')[0].strip()
        #     print(f'  {command}')

        for command in commandlist:
            print(f'  {command}')
예제 #31
0
    def parseOptions(self, argv=None):
        """parse the command line options"""

        if argv is None:
            self._argv = list(sys.argv)

        parser = argparse.ArgumentParser(
            "Running DD4hep Simulations:",
            formatter_class=argparse.RawTextHelpFormatter)

        parser.add_argument("--steeringFile",
                            "-S",
                            action="store",
                            default=self.steeringFile,
                            help="Steering file to change default behaviour")

        # first we parse just the steering file, but only if we don't want to see the help message
        if not any(opt in self._argv for opt in ('-h', '--help')):
            parsed, _unknown = parser.parse_known_args()
            self.steeringFile = parsed.steeringFile
            self.readSteeringFile()

        # readSteeringFile will set self._argv to None if there is a steering file
        if self._argv is None:
            self._argv = list(argv) if argv else list(sys.argv)

        parser.add_argument("--compactFile",
                            action="store",
                            default=self.compactFile,
                            help="The compact XML file")

        parser.add_argument(
            "--runType",
            action="store",
            choices=("batch", "vis", "run", "shell"),
            default=self.runType,
            help=
            "The type of action to do in this invocation"  # Note: implicit string concatenation
            "\nbatch: just simulate some events, needs numberOfEvents, and input file or gun"
            "\nvis: enable visualisation, run the macroFile if it is set"
            "\nrun: run the macroFile and exit"
            "\nshell: enable interactive session")

        parser.add_argument(
            "--inputFiles",
            "-I",
            nargs='+',
            action="store",
            default=self.inputFiles,
            help="InputFiles for simulation %s files are supported" %
            ", ".join(POSSIBLEINPUTFILES))

        parser.add_argument(
            "--outputFile",
            "-O",
            action="store",
            default=self.outputFile,
            help="Outputfile from the simulation: .slcio, edm4hep.root and .root"
            " output files are supported")

        parser.add_argument(
            "-v",
            "--printLevel",
            action="store",
            default=self.printLevel,
            dest="printLevel",
            choices=(1, 2, 3, 4, 5, 6, 7, 'VERBOSE', 'DEBUG', 'INFO',
                     'WARNING', 'ERROR', 'FATAL', 'ALWAYS'),
            help="Verbosity use integers from 1(most) to 7(least) verbose"
            "\nor strings: VERBOSE, DEBUG, INFO, WARNING, ERROR, FATAL, ALWAYS"
        )

        parser.add_argument(
            "--numberOfEvents",
            "-N",
            action="store",
            dest="numberOfEvents",
            default=self.numberOfEvents,
            type=int,
            help="number of events to simulate, used in batch mode")

        parser.add_argument("--skipNEvents",
                            action="store",
                            dest="skipNEvents",
                            default=self.skipNEvents,
                            type=int,
                            help="Skip first N events when reading a file")

        parser.add_argument("--physicsList",
                            action="store",
                            dest="physicsList",
                            default=self.physicsList,
                            help="Physics list to use in simulation")

        parser.add_argument(
            "--crossingAngleBoost",
            action="store",
            dest="crossingAngleBoost",
            default=self.crossingAngleBoost,
            type=float,
            help="Lorentz boost for the crossing angle, in radian!")

        parser.add_argument(
            "--vertexSigma",
            nargs=4,
            action="store",
            dest="vertexSigma",
            default=self.vertexSigma,
            metavar=('X', 'Y', 'Z', 'T'),
            type=float,
            help=
            "FourVector of the Sigma for the Smearing of the Vertex position: x y z t"
        )

        parser.add_argument(
            "--vertexOffset",
            nargs=4,
            action="store",
            dest="vertexOffset",
            default=self.vertexOffset,
            metavar=('X', 'Y', 'Z', 'T'),
            type=float,
            help=
            "FourVector of translation for the Smearing of the Vertex position: x y z t"
        )

        parser.add_argument(
            "--macroFile",
            "-M",
            action="store",
            dest="macroFile",
            default=self.macroFile,
            help="Macro file to execute for runType 'run' or 'vis'")

        parser.add_argument("--enableGun",
                            "-G",
                            action="store_true",
                            dest="enableGun",
                            default=self.enableGun,
                            help="enable the DDG4 particle gun")

        parser.add_argument(
            "--enableG4GPS",
            action="store_true",
            dest="enableG4GPS",
            default=self.enableG4GPS,
            help=
            "enable the Geant4 GeneralParticleSource. Needs a macroFile (runType run)"
            "or use it with the shell (runType shell)")

        parser.add_argument(
            "--enableG4Gun",
            action="store_true",
            dest="enableG4Gun",
            default=self.enableG4Gun,
            help=
            "enable the Geant4 particle gun. Needs a macroFile (runType run)"
            " or use it with the shell (runType shell)")

        parser.add_argument("--dumpParameter",
                            "--dump",
                            action="store_true",
                            dest="dumpParameter",
                            default=self._dumpParameter,
                            help="Print all configuration Parameters and exit")

        parser.add_argument("--enableDetailedShowerMode",
                            action="store_true",
                            dest="enableDetailedShowerMode",
                            default=self.enableDetailedShowerMode,
                            help="use detailed shower mode")

        parser.add_argument("--dumpSteeringFile",
                            action="store_true",
                            dest="dumpSteeringFile",
                            default=self._dumpSteeringFile,
                            help="print an example steering file to stdout")

        # output, or do something smarter with fullHelp only for example
        ConfigHelper.addAllHelper(self, parser)
        # now parse everything. The default values are now taken from the
        # steeringFile if they were set so that the steering file parameters can be
        # overwritten from the command line
        if ARGCOMPLETEENABLED:
            argcomplete.autocomplete(parser)
        parsed = parser.parse_args()

        self._dumpParameter = parsed.dumpParameter
        self._dumpSteeringFile = parsed.dumpSteeringFile

        self.compactFile = parsed.compactFile
        self.inputFiles = parsed.inputFiles
        self.inputFiles = self.__checkFileFormat(self.inputFiles,
                                                 POSSIBLEINPUTFILES)
        self.outputFile = parsed.outputFile
        self.__checkFileFormat(self.outputFile, ('.root', '.slcio'))
        self.runType = parsed.runType
        self.printLevel = self.__checkOutputLevel(parsed.printLevel)

        self.numberOfEvents = parsed.numberOfEvents
        self.skipNEvents = parsed.skipNEvents
        self.physicsList = parsed.physicsList
        self.crossingAngleBoost = parsed.crossingAngleBoost
        self.macroFile = parsed.macroFile
        self.enableGun = parsed.enableGun
        self.enableG4Gun = parsed.enableG4Gun
        self.enableG4GPS = parsed.enableG4GPS
        self.enableDetailedShowerMode = parsed.enableDetailedShowerMode
        self.vertexOffset = parsed.vertexOffset
        self.vertexSigma = parsed.vertexSigma

        self._consistencyChecks()

        # self.__treatUnknownArgs( parsed, unknown )
        self.__parseAllHelper(parsed)
        if self._errorMessages and not (self._dumpParameter
                                        or self._dumpSteeringFile):
            parser.epilog = "\n".join(self._errorMessages)
            parser.print_help()
            exit(1)

        if self._dumpParameter:
            from pprint import pprint
            logger.info("=" * 80)
            pprint(vars(self))
            logger.info("=" * 80)
            exit(1)

        if self._dumpSteeringFile:
            self.__printSteeringFile(parser)
            exit(1)
예제 #32
0
    def __init__(self):

        # {{{ colorama.init()

        try:
            colorama.init()
        except Exception:
            pass

        # }}}

        # {{{ Running in debug/release mode

        _print_("Running in", end=' ')
        if __debug__:
            print("debug mode")
        else:
            print("release mode")

        # }}}

        # {{{ Arguments handling

        parser = argparse.ArgumentParser(
            description=
            'This is the splitter node of a P2PSP team.  The splitter is in charge of defining the Set or Rules (SoR) that will control the team. By default, DBS (unicast transmissions) will be used.'
        )
        #parser.add_argument('--splitter_addr', help='IP address to serve (TCP) the peers. (Default = "{}")'.format(Splitter_IMS.SPLITTER_ADDR)) <- no ahora
        parser.add_argument(
            '--buffer_size',
            help='size of the video buffer in blocks. Default = {}.'.format(
                Splitter_IMS.BUFFER_SIZE))
        parser.add_argument(
            '--channel',
            help=
            'Name of the channel served by the streaming source. Default = "{}".'
            .format(Splitter_IMS.CHANNEL))
        parser.add_argument('--chunk_size',
                            help='Chunk size in bytes. Default = {}.'.format(
                                Splitter_IMS.CHUNK_SIZE))
        parser.add_argument(
            '--header_size',
            help='Size of the header of the stream in chunks. Default = {}.'.
            format(Splitter_IMS.HEADER_SIZE))
        parser.add_argument(
            '--max_chunk_loss',
            help=
            'Maximum number of lost chunks for an unsupportive peer. Makes sense only in unicast mode. Default = {}.'
            .format(Splitter_DBS.MAX_CHUNK_LOSS))
        parser.add_argument(
            '--max_number_of_monitor_peers',
            help=
            'Maxium number of monitors in the team. The first connecting peers will automatically become monitors. Default = "{}".'
            .format(Splitter_DBS.MONITOR_NUMBER))
        parser.add_argument(
            '--mcast_addr',
            help=
            'IP multicast address used to serve the chunks. Makes sense only in multicast mode. Default = "{}".'
            .format(Splitter_IMS.MCAST_ADDR))
        parser.add_argument(
            '--port',
            help='Port to serve the peers. Default = "{}".'.format(
                Splitter_IMS.PORT))
        parser.add_argument(
            '--source_addr',
            help=
            'IP address or hostname of the streaming server. Default = "{}".'.
            format(Splitter_IMS.SOURCE_ADDR))
        parser.add_argument(
            '--source_port',
            help='Port where the streaming server is listening. Default = {}.'.
            format(Splitter_IMS.SOURCE_PORT))
        parser.add_argument(
            "--IMS",
            action="store_true",
            help=
            "Uses the IP multicast infrastructure, if available. IMS mode is incompatible with ACS, LRS, DIS and NTS modes."
        )
        parser.add_argument("--NTS",
                            action="store_true",
                            help="Enables NAT traversal.")
        parser.add_argument("--ACS",
                            action="store_true",
                            help="Enables Adaptive Chunk-rate.")
        parser.add_argument("--LRS",
                            action="store_true",
                            help="Enables Lost chunk Recovery.")
        parser.add_argument("--DIS",
                            action="store_true",
                            help="Enables Data Integrity check.")
        parser.add_argument('--strpe',
                            nargs='+',
                            type=str,
                            help='Selects STrPe model for DIS')
        parser.add_argument('--strpeds',
                            nargs='+',
                            type=str,
                            help='Selects STrPe-DS model for DIS')
        parser.add_argument(
            '--strpeds_majority_decision',
            help='Sets majority decision ratio for STrPe-DS model.')
        parser.add_argument(
            '--strpe_log',
            help='Logging STrPe & STrPe-DS specific data to file.')
        parser.add_argument(
            '--TTL',
            help='Time To Live of the multicast messages. Default = {}.'.
            format(Splitter_IMS.TTL))

        try:
            argcomplete.autocomplete(parser)
        except Exception:
            pass
        args = parser.parse_args()
        #args = parser.parse_known_args()[0]

        if args.buffer_size:
            Splitter_IMS.BUFFER_SIZE = int(args.buffer_size)
        _print_("Buffer size =", Splitter_IMS.BUFFER_SIZE)

        if args.channel:
            Splitter_IMS.CHANNEL = args.channel
        _print_("Channel = \"" + Splitter_IMS.CHANNEL + "\"")

        if args.chunk_size:
            Splitter_IMS.CHUNK_SIZE = int(args.chunk_size)
        _print_("Chunk size =", Splitter_IMS.CHUNK_SIZE)

        if args.header_size:
            Splitter_IMS.HEADER_SIZE = int(args.header_size)
        _print_("Header size =", Splitter_IMS.HEADER_SIZE)

        if args.port:
            Splitter_IMS.PORT = int(args.port)
        _print_("Listening port =", Splitter_IMS.PORT)

        if args.source_addr:
            Splitter_IMS.SOURCE_ADDR = socket.gethostbyname(args.source_addr)
        _print_("Source address = ", Splitter_IMS.SOURCE_ADDR)

        if args.source_port:
            Splitter_IMS.SOURCE_PORT = int(args.source_port)
        _print_("Source port =", Splitter_IMS.SOURCE_PORT)

        if args.IMS:
            _print_("IP multicast (IMS) mode selected")

            if args.mcast_addr:
                Splitter_IMS.MCAST_ADDR = args.mcast_addr
            _print_("Multicast address =", Splitter_IMS.MCAST_ADDR)

            if args.TTL:
                Splitter_IMS.TTL = args.TTL
            _print_("Multicast TTL =", Splitter_IMS.TTL)

            splitter = Splitter_IMS()
            splitter.peer_list = []  # No peer_list is used in IMS.

        else:
            _print_("IP unicast mode selected")

            if args.max_chunk_loss:
                Splitter_DBS.MAX_CHUNK_LOSS = int(args.max_chunk_loss)
            _print_("Maximun chunk loss =", Splitter_DBS.MAX_CHUNK_LOSS)

            if args.max_number_of_monitor_peers:
                Splitter_DBS.MONITOR_NUMBER = int(args.monitor_number)
            _print_("Maximun number of monitor peers =",
                    Splitter_DBS.MONITOR_NUMBER)

            splitter = Splitter_DBS()
            if args.NTS:
                from splitter_nts import Splitter_NTS
                splitter = Splitter_NTS(splitter)
                _print_("NTS enabled")
            if args.ACS:
                splitter = Splitter_ACS(splitter)
                _print_("ACS enabled")
            if args.LRS:
                from splitter_lrs import Splitter_LRS
                splitter = Splitter_LRS(splitter)
                _print_("LRS enabled")
            if args.DIS:
                from splitter_strpe import StrpeSplitter
                from splitter_strpeds import StrpeDsSplitter
                _print_("DIS enabled")
                if args.strpe:
                    splitter = Splitter_strpe(splitter)
                    print("strpe mode selected")
                    for peer in args.strpe:
                        splitter.add_trusted_peer(peer)
                if args.strpeds:
                    splitter = StrpeSplitter(splitter)
                    _print_("strpeds mode selected")
                    for peer in args.strpeds:
                        splitter.add_trusted_peer(peer)
                    if args.strpeds_majority_decision:
                        _print_("strpeds_majority_decision mode selected")
                        splitter = Splitter_strpeds_majority_decision(splitter)
                        splitter.setMajorityRatio(
                            float(args.strpeds_majority_decision))
                if args.strpe_log:
                    splitter.LOGGING = True
                    splitter.LOG_FILE = open(strpe_log, 'w', 0)

            #splitter = Splitter_ACS()


#            if (args.strpe):
#                splitter = self.init_strpe_splitter('strpe', args.strpe, args.strpe_log)
#            elif (args.strpeds):
#                splitter = self.init_strpe_splitter('strpeds', args.strpeds, args.strpe_log)
#                if args.strpeds_majority_decision:
#                    splitter.setMajorityRatio(float(args.strpeds_majority_decision))
#            else:
#                splitter = Splitter_LRS()

# }}}

# {{{ Run!

        splitter.start()

        # {{{ Prints information until keyboard interruption

        print("         | Received  | Sent      | Number       losses/ losses")
        print(
            "    Time | (kbps)    | (kbps)    | peers (peer) sents   threshold period kbps"
        )
        print(
            "---------+-----------+-----------+-----------------------------------..."
        )

        last_sendto_counter = splitter.sendto_counter
        last_recvfrom_counter = splitter.recvfrom_counter

        while splitter.alive:
            try:
                time.sleep(1)
                chunks_sendto = splitter.sendto_counter - last_sendto_counter
                kbps_sendto = (chunks_sendto * splitter.CHUNK_SIZE * 8) / 1000
                chunks_recvfrom = splitter.recvfrom_counter - last_recvfrom_counter
                kbps_recvfrom = (chunks_recvfrom * splitter.CHUNK_SIZE *
                                 8) / 1000
                last_sendto_counter = splitter.sendto_counter
                last_recvfrom_counter = splitter.recvfrom_counter
                sys.stdout.write(Color.none)
                _print_("|" + repr(kbps_recvfrom).rjust(10) + " |" +
                        repr(kbps_sendto).rjust(10),
                        end=" | ")
                #print('%5d' % splitter.chunk_number, end=' ')
                sys.stdout.write(Color.cyan)
                print(len(splitter.peer_list), end=' ')
                if not __debug__:
                    counter = 0
                for p in splitter.peer_list:
                    if not __debug__:
                        if counter > 10:
                            break
                        counter += 1
                    sys.stdout.write(Color.blue)
                    print(p, end=' ')
                    sys.stdout.write(Color.red)
                    print(str('%3d' % splitter.losses[p]) + '/' +
                          str('%3d' % chunks_sendto),
                          splitter.MAX_CHUNK_LOSS,
                          end=' ')
                    if splitter is Splitter_ACS:
                        try:
                            sys.stdout.write(Color.yellow)
                            print('%3d' % splitter.period[p], end=' ')
                            sys.stdout.write(Color.purple)
                            print(repr(
                                (splitter.number_of_sent_chunks_per_peer[p] *
                                 splitter.CHUNK_SIZE * 8) / 1000).rjust(10),
                                  end=' ')
                            splitter.number_of_sent_chunks_per_peer[p] = 0
                        except KeyError as e:
                            print("!", e, "--")
                            print(splitter.period[p])
                            pass
                    sys.stdout.write(Color.none)
                    print('', end=' ')
                print()

            except KeyboardInterrupt:
                print('Keyboard interrupt detected ... Exiting!')

                # Say to daemon threads that the work has been finished,
                splitter.alive = False

                # Wake up the "moderate_the_team" daemon, which is
                # waiting in a recvfrom().
                if not args.IMS:
                    splitter.say_goodbye(("127.0.0.1", splitter.PORT),
                                         splitter.team_socket)

                # Wake up the "handle_arrivals" daemon, which is waiting
                # in a accept().
                sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                sock.connect(("127.0.0.1", splitter.PORT))
                sock.recv(struct.calcsize("4sH"))  # Multicast channel
                sock.recv(struct.calcsize("H"))  # Header size
                sock.recv(struct.calcsize("H"))  # Chunk size
                sock.recv(splitter.CHUNK_SIZE * splitter.HEADER_SIZE)  # Header
                sock.recv(struct.calcsize("H"))  # Buffer size
                if args.IMS:
                    number_of_peers = 0
                else:
                    number_of_peers = socket.ntohs(
                        struct.unpack("H", sock.recv(struct.calcsize("H")))[0])
                    print("Number of peers =", number_of_peers)
                # Receive the list
                while number_of_peers > 0:
                    sock.recv(struct.calcsize("4sH"))
                    number_of_peers -= 1

                # Breaks this thread and returns to the parent process
                # (usually, the shell).
                break
예제 #33
0
def main():
    # the output to this script saves two json files inside the downloaded tweets directory,
    # one json file has all the active users the other has all inactive users from the topology
    # user activity is based on status count and availabilty of tweets (public vs private)

    # script can be stopped and started in the middle of running it without losing progress
    parser = argparse.ArgumentParser(
        description=
        'Get tweets of all twitter user ids in the provided topology file')
    parser.add_argument('-f',
                        '--users_file',
                        required=True,
                        action='store',
                        dest='users_file',
                        help='Location of file with user ids')
    parser.add_argument(
        '-c',
        '--dev_creds',
        required=True,
        action='store',
        dest='dev_creds',
        help='Location of file containing Twitter developer access credentials'
    )
    parser.add_argument(
        '-o',
        '--output_dir',
        required=True,
        action='store',
        dest='output_dir',
        help='Name of the directory you want to download Tweets to')
    parser.add_argument(
        '-n',
        '--num_tweets',
        action='store',
        dest='num_tweets',
        nargs='?',
        type=int,
        const=1,
        default=3200,
        help='Number of tweets to download from user (default is 3200)')
    argcomplete.autocomplete(parser)
    args = parser.parse_args()

    tweets_dir = args.output_dir

    # create directory for storing tweets
    if not os.path.exists(os.path.dirname(tweets_dir)):
        os.makedirs(os.path.dirname(tweets_dir), 0o755)

    inactive_users = read_json(os.path.join(tweets_dir, 'inactive_users.json'))
    active_users = read_json(os.path.join(tweets_dir, 'active_users.json'))
    twpy_api = auth.get_access_creds(args.dev_creds)

    if not twpy_api:
        print('Error: Twitter developer access credentials denied')
        return

    # open the lists of user ids. this file should already be a non-repeating set
    comm_set = set(read_json(args.users_file))
    print(comm_set)

    # download tweets for every single user in the set
    # separate active users from inactive users based on status count and availability
    bar = pyprind.ProgPercent(len(comm_set),
                              track_time=True,
                              title='Downloading Tweets')
    while comm_set:
        user = comm_set.pop()
        bar.update(item_id=str(user) + '\t')

        if str(user) in inactive_users or str(user) in active_users:
            continue

        # skip user if they don't exist or are inactive
        status_count = user_status_count(user, twpy_api)
        if status_count <= 10:
            inactive_users[str(user)] = status_count
            write_json(tweets_dir, active_users, inactive_users)
            continue

        # skip user if already downloaded their tweets
        if os.path.exists(os.path.join(tweets_dir, str(user))):
            active_users[str(user)] = status_count
            write_json(tweets_dir, active_users, inactive_users)
            continue

        tweets = get_tweets(user, twpy_api, args.num_tweets)

        if tweets:
            tweet_filename = os.path.join(tweets_dir, str(user))
            write_tweets(tweets, tweet_filename)
            active_users[str(user)] = status_count
        else:
            inactive_users[str(user)] = 0

        write_json(tweets_dir, active_users, inactive_users)
예제 #34
0
def main_tool(root,
              argv=None,
              description=__description__,
              version=__version__,
              copyright=__copyright__,
              author=__author__,
              origin=None):
    global tool_origin
    tool_origin = origin

    if argv is None:
        argv = sys.argv

    parser = argparse.ArgumentParser(add_help=True,
                                     argument_default=argparse.SUPPRESS,
                                     description=description.format(
                                         version=version,
                                         copyright=copyright,
                                         author=author))
    parser.add_argument('--clean',
                        action='store_true',
                        default=False,
                        help='Runs the action in clean mode',
                        required=False)
    parser.add_argument('--dryrun',
                        action='store_true',
                        default=False,
                        help='Run the action in dryrun mode',
                        required=False)
    parser.add_argument(
        '--force',
        action='store_true',
        default=False,
        help='Running in force mode will force the action to start in any case',
        required=False)
    parser.add_argument('--interactive',
                        action='store_true',
                        default=False,
                        help='Running the CLI in interactive mode',
                        required=False)
    parser.add_argument('--gui',
                        action='store_true',
                        default=False,
                        help='Run the tool with a nice and simple UI',
                        required=False)

    # Generate a temporal directory for the whole thing
    with tempfile.TemporaryDirectory() as tmpdir:
        try:
            # Add first parser in the nested tree
            subparser = parser.add_subparsers(dest='tool',
                                              help='Available tools')
            subparser.required = True
            init_tools(root, subparser, tmpdir)
            argcomplete.autocomplete(parser)

            if '--interactive' in argv:
                log_info(
                    "Welcome to the interactive console. Type 'q', 'quit' or 'exit' to exit the console."
                )
                while True:

                    command = input_str('$').lower()
                    if command == 'q' or command == 'quit' or command == 'exit':
                        break

                    try:
                        args = parser.parse_args(command.split())
                        execute_tool(args.tool, args, tmpdir)
                    except SystemExit:
                        continue

                return EXIT_CODE_SUCCESS
            elif '--gui' in argv:
                log_error("Not yet pal implemented")
            else:
                args = parser.parse_args(argv)
                return execute_tool(args.tool, args, tmpdir)
        finally:
            for int_dir in list_dirs(tmpdir):
                purge_dir(int_dir)
예제 #35
0
                    '--attr',
                    action='store',
                    help='set value as attribute value')
parser.add_argument('-x',
                    '--xpath',
                    action='store',
                    help='xpath to apply to the file')
parser.add_argument('-n',
                    '--ns',
                    action='store',
                    help='added to context ',
                    nargs='*',
                    metavar='prefix=\"url\"')
parser.add_argument('file', action='store', help='file to parse', nargs='*')

argcomplete.autocomplete(parser)
args = parser.parse_args()

if args.verbose:
    prettyPrint(('args', vars(args)), colour=True, output=sys.stderr)


def main():

    pn = re.compile('^([^=]*)=["\']([^\'"]*)["\']$')
    ns = {}
    if args.ns:
        for nsp in args.ns:
            m = pn.match(nsp)
            if m:
                ns[m.group(1)] = m.group(2)
예제 #36
0
    def parse_options(self):
        """ return argument as a dictionary"""

        argcomplete.autocomplete(self.parser)
        args = self.parser.parse_args()
        return args
예제 #37
0
파일: quantize.py 프로젝트: smvg/intercom
        return packed_chunk

    def unpack(self, packed_chunk, dtype=minimal.Minimal.SAMPLE_TYPE):
        chunk_number, quantized_chunk = buffer.Buffering.unpack(
            self, packed_chunk, dtype)
        chunk = self.dequantize(quantized_chunk)
        return chunk_number, chunk

    def quantize(self, chunk):
        quantized_chunk = (chunk / self.quantization_step).astype(np.int)
        return quantized_chunk

    def dequantize(self, quantized_chunk):
        chunk = self.quantization_step * quantized_chunk
        return chunk


if __name__ == "__main__":
    try:
        argcomplete.autocomplete(minimal.parser)
    except Exception:
        if __debug__:
            print("argcomplete not working :-/")
        else:
            pass
    minimal.args = minimal.parser.parse_known_args()[0]
    intercom = BR_Control()
    try:
        intercom.run()
    except KeyboardInterrupt:
        minimal.parser.exit("\nInterrupted by user")
예제 #38
0
def main():
    if 'PYCHARM_HOSTED' in os.environ:
        convert = False  # in PyCharm, we should disable convert
        strip = False
    else:
        convert = None
        strip = None

    colorama.init(convert=convert, strip=strip)
    global args
    global newFile
    parser = _parser()
    argcomplete.autocomplete(parser)
    args = _parser().parse_args()
    global numFile
    numFile = 0
    print """                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                     
                                                                                                                                                                                                                                                          
                                                                                                                                                                                                                                                          
                                                                                                                                                                                                                                                          
                                                                                                                                                                                                                                                          
                    .:ss/-`                                                                                                                                                                                                                               
                `.+sddddhsso:`                                                                                                                                                                                                                            
              /shddddddddhssss++.                                                                                                                                                                                                                         
          -/yhddddddddddddhyssssso/:``                                                                                                                                                                                                                    
      `-+sddddddddddddddddddysssssssso:.                                                                                                                                                                                                                  
  ``/yhddddddddddddddddddddddysssssssssso+-.                                                                                                                                                                                                              
-+shddddddddddddddddddddddddddysssssssssssss/:.`                                            ````                                                                       ````                                                                               
+oooooooooooooo+:--------------osssssssssssssss/                                           `+++/                                                                       /+++.                                                                              
//////////////:                `ossssssssssssyh/                                           `+++/                                                                       /+++.                                                                              
/////////////:                  `/ssssssssssyhh/                                           `+++/                                                                       /+++.                                                                              
////////////.                    `/sssssssyyhhh/             -///////////////-.            `+++/            `--///////////-.               .-///////////:-`            /+++.          ////            -//-       `:///:.          :///:-                  
///////////.                      `/sssssyyhhhh/             -+++++++++++++++++/`          `+++/           ./+++++++++++++++/`           `:+++++++++++++++/-           /+++.          ++++            :++:        `:++++:`      ./+++/.                   
//////////.                         .sssyhhhhhh/             ```````````````-+++-          `+++/          `/++/-`````````-+++/`          .+++/`````````./++/`          /+++.          ++++            :++:          `/+++/.   `-++++-`                    
/////////`                           -oyhhhhhhh/                ``.----------++++`         `+++/          -+++:           /+++`         `/+++`          -+++-          /+++.          ++++            :++:           `-/+++:``/+++/-                      
///////+/.                           -shhhhhhhh/              `:+++++++++++++++++`         `+++/          -+++:           /+++`         `++++           -+++-          /+++.          ++++            :++:             ./+++//+++-`                       
/////+++++.                         .shhhhhhhhh/             `/+++:----------++++`         `+++/          -+++:           /+++`         `++++           -+++-          /+++.          ++++            :++:              `-+++++/.                         
////+++++++.                       /yhhhhhhhhhh/             -+++:           ++++`         `+++/          -+++:           /+++`         `++++           -+++-          /+++.          ++++            :++:              `-++++++-`                        
///+++++++++-                     /yhhhhhhhhhhh/             -+++-           ++++`         `+++/          -+++:           /+++`         `++++           -+++-          /+++.          ++++            :++:             ./+++//+++:`                       
//+++++++++++:                  `oyhhhhhhhhhhhh/             -+++:           ++++`         `+++/          -+++:           /+++`         `++++           -+++-          /+++.          ++++           ./++:           `:++++: ./+++/-                      
/+++++++++++++:                `shhhhhhhhhhhhhh/             ./++/-.........:+++/`         `+++/          `:++/-..........++++`          -+++:.........-/+++.          /+++.          -+++:.........-/+++:          -++++:.    ./+++:`                    
+++++++++++++++:...............+yyyyyyyyyyyyyyy/              ./+++++++++++++++:`          `+++/           ./+++++++++++++++++`          `:+++++++++++++++/.           /+++.          `:+++++++++++++++/-`        .:++++-       `:+++/:                   
-//+++++++++++++/:::::::::::::::::::::::::::--.`                .-///////////.`            `///:             .-///////////++++`            `.///////////-.             :///`            `.:///////////-`         `:///:`          .:///-                  
  ``-:+++++++++++/::::::::::::::::::::::--.`                                                                           ``.++++`                                                                                                                           
      `-//++++++++/:::::::::::::::::--..`                                                                 .:::::::::::://++++/`                                                                                                                           
          -:/+++++++:::::::::::::--.`                                                                     .+++++++++++++++/:-`                                                                                                                            
            ``-:+++++:::::::::--`                                                                          ````````````````                                                                                                                               
                `.//++/:::--.`                                                                                                                                                                                                                            
                    `-/:-.`                                                                                                                                                                                                                               
                                                                                                                                                                                                                                                          
                                                                                                                                                                                                                                                          
                                                                                                                                                                                                                                                          
                                                                                                                                                                                                                                                          
                                                                                                                                                                                                                                                          
                                                                                                                                                                                                                                                        
                                            """

    print args


    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
    event_handler = NewAnalyse()
    observer = Observer()
    observer.schedule(event_handler, args.folder, recursive=False)
    observer.start()
    try:
        while True:
            time.sleep(0.5)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
예제 #39
0
 def args(self):
     # command line argument
     parser = self._parser()
     argcomplete.autocomplete(parser)
     self._args = self._parser().parse_args()
예제 #40
0
def main():
    parser = create_parser()
    argcomplete.autocomplete(parser)
    args = parser.parse_args()

    save_samples(args.number_of_examples, args.directory, args.set)
예제 #41
0
 def enable_autocomplete(self):
     argcomplete.autocomplete = AzCompletionFinder()
     argcomplete.autocomplete(
         self,
         validator=lambda c, p: c.lower().startswith(p.lower()),
         default_completer=lambda _: ())
예제 #42
0
def main() -> None:
    import sys
    import argparse
    import argcomplete
    main_parser = argparse.ArgumentParser()
    shared_parser = argparse.ArgumentParser(add_help=False)
    sub_parsers = main_parser.add_subparsers(dest="subcommand")
    sub_parsers.required = True
    pkg_parser = sub_parsers.add_parser(
        "pkg",
        description="show infos of the package",
        parents=[shared_parser])
    pkg_parser.add_argument("--info",
                            help="show the package description",
                            action="store_true")
    pkg_parser.add_argument("--version",
                            help="print the package version",
                            action="store_true")
    pkg_parser.add_argument("--pyversion",
                            help="print the Python version",
                            action="store_true")
    pkg_parser.add_argument("--license",
                            help="print the package license",
                            action="store_true")
    pkg_parser.add_argument("--location",
                            help="print the package path",
                            action="store_true")
    pkg_parser.add_argument("--logfile",
                            help="print the logfile path",
                            action="store_true")
    pkg_parser.add_argument("--open_log",
                            help="open the package logfile",
                            action="store_true")
    pkg_parser.add_argument("--clear_log",
                            help="clear package logfile",
                            action="store_true")
    cat_parser = sub_parsers.add_parser(
        "cat_log",
        description="pipe stdin to global evo logfile"
        " or print logfile to stdout (if no stdin)",
        parents=[shared_parser])
    cat_parser.add_argument("-l",
                            "--loglevel",
                            help="loglevel of the message",
                            default="info",
                            choices=["error", "warning", "info", "debug"])
    cat_parser.add_argument("-m",
                            "--message",
                            help="explicit message instead of pipe")
    cat_parser.add_argument("-s",
                            "--source",
                            help="source name to use for the log message")
    cat_parser.add_argument("--clear_log",
                            help="clear logfile before exiting",
                            action="store_true")
    argcomplete.autocomplete(main_parser)
    if len(sys.argv[1:]) == 0:
        sys.argv.extend(["pkg", "--info"])  # cheap trick because YOLO
    args = main_parser.parse_args()
    line_end = "\n" if sys.stdout.isatty() else ""

    if args.subcommand == "pkg":
        if not len(sys.argv) > 2:
            pkg_parser.print_help()
            sys.exit(1)
        if args.license:
            print(open(os.path.join(PACKAGE_BASE_PATH, "LICENSE")).read())
        if args.info:
            main_parser.print_usage()
            print(DESC)
        if args.version:
            print(__version__, end=line_end)
        if args.pyversion:
            import platform as pf
            print(pf.python_version(), end=line_end)
        if args.location:
            print(PACKAGE_BASE_PATH, end=line_end)
        if args.logfile or args.open_log:
            print(settings.GLOBAL_LOGFILE_PATH, end=line_end)
            if not os.path.exists(settings.GLOBAL_LOGFILE_PATH):
                print(
                    "no logfile found - run: "
                    "evo_config set global_logfile_enabled",
                    end=line_end)
                sys.exit(1)
            if args.open_log:
                import webbrowser
                webbrowser.open(settings.GLOBAL_LOGFILE_PATH)
        if args.clear_log:
            from evo.tools import user
            if user.confirm("clear logfile? (y/n)"):
                open(settings.GLOBAL_LOGFILE_PATH, mode='w')

    elif args.subcommand == "cat_log":
        if os.name == "nt":
            print("cat_log feature not available on Windows")
            sys.exit(1)
        if not args.message and sys.stdin.isatty():
            if not os.path.exists(settings.GLOBAL_LOGFILE_PATH):
                print(
                    "no logfile found - run: "
                    "evo_config set global_logfile_enabled",
                    end=line_end)
            else:
                print(open(settings.GLOBAL_LOGFILE_PATH).read(), end="")
        elif not settings.SETTINGS.global_logfile_enabled:
            print("logfile disabled", end=line_end)
            sys.exit(1)
        else:
            import logging
            logger = logging.getLogger(__name__)
            from evo.tools import log
            file_fmt = log.DEFAULT_LONG_FMT
            if args.source:
                file_fmt = file_fmt.replace(
                    "%(module)s.%(funcName)s():%(lineno)s", args.source)
            log.configure_logging(silent=True, file_fmt=file_fmt)
            if not args.message:
                msg = sys.stdin.read()
            else:
                msg = args.message
            getattr(logger, args.loglevel)(msg)
        if args.clear_log:
            open(settings.GLOBAL_LOGFILE_PATH, mode='w')
예제 #43
0
    def __init__(self):
        parser = argparse.ArgumentParser(
            formatter_class=argparse.RawDescriptionHelpFormatter,
            description=textwrap.dedent('''\
                     Python Android Builder
                     --------------------------------------------------
                     Build the whole android world if no argument given
                     --------------------------------------------------
                     '''))

        parser.add_argument("-j", "--jobs", help="running jobs", type=int)
        parser.add_argument("-a",
                            "--droid",
                            help="build android",
                            action="store_true")
        parser.add_argument("-p",
                            "--package",
                            help="build vendor package",
                            action="store_true")
        parser.add_argument("-k",
                            "--kernel",
                            help="build kernel",
                            action="store_true")
        parser.add_argument("--kmodule",
                            help="build kernel modules only",
                            action="store_true")
        parser.add_argument("-c",
                            "--menuconfig",
                            help="kernel config",
                            action="store_true")
        parser.add_argument("-u",
                            "--uboot",
                            help="build uboot",
                            action="store_true")
        parser.add_argument("-s",
                            "--system",
                            help="pack system image",
                            action="store_true")
        parser.add_argument("-O",
                            "--buildota",
                            help="build Android OTA package",
                            action="store_true")
        parser.add_argument("-t",
                            "--target_product",
                            help="target_product for android",
                            type=str)
        parser.add_argument("--diffota",
                            help="build android diff OTA package",
                            action="store_true")
        parser.add_argument("--source", help="OTA source package", type=str)
        parser.add_argument("--target", help="OTA target package", type=str)
        parser.add_argument("--module",
                            help="Alternative for make submodule",
                            type=str)
        parser.add_argument("-C",
                            "--clean",
                            help="Clean build images",
                            type=str,
                            choices=["android", "kernel", "uboot"])
        parser.add_argument("-v",
                            "--build_varient",
                            help="userdebug or user",
                            type=str,
                            choices=["userdebug", "user"])
        parser.add_argument("--pack",
                            help="pack images",
                            type=str,
                            choices=["boot"])

        # auto complete the argument with TAB
        argcomplete.autocomplete(parser)
        # 1. put the line below in ~/.bashrc
        #   eval "$(register-python-argcomplete pab)"
        # 2. install the complete function
        # activate-global-python-argcomplete [--user]

        # parse the args
        args = parser.parse_args()

        # run this tool on android top dir
        self.__buildconfig = get_config_file("pabrc")
        try:
            assert os.path.exists(self.__buildconfig)
        except AssertionError:
            print 'Error : Not on android source tree'
            os.sys.exit()

        prj_info = parse_kv_file(self.__buildconfig)

        self.__build_droid = True if args.droid else False
        self.__build_vendor_package = True if args.package else False
        self.__build_kernel = True if args.kernel else False
        self.__build_kmodule_only = True if args.kmodule else False
        self.__build_uboot = True if args.uboot else False
        self.__diff_ota = True if args.diffota else False

        self.__source = args.source if args.source else False
        self.__target = args.target if args.target else False

        # for submodule
        self.__module = args.module if args.module else False

        # clean what build?
        self.__clean_kernel = True if args.clean == "kernel" else False
        self.__clean_android = True if args.clean == "android" else False
        self.__clean_uboot = True if args.clean == "uboot" else False
        # pack x images
        self.__pack_boot = True if args.pack == "boot" else False

        self.__kernel_config = True if args.menuconfig else False
        self.__pack_system = True if args.system else False
        self.__build_ota = True if args.buildota else False

        self.__jobs_nr = args.jobs if args.jobs else 16

        # TARGET_PRODUCT = AAA_BBB
        # PRODUCT_DEVICE = AAA-BBB
        self.__target_product = args.target_product if args.target_product else prj_info[
            "TARGET_PRODUCT"]
        target_product_aaa = self.__target_product.split('_')[0]
        target_product_bbb = self.__target_product.split('_')[1]
        self.__product_device = target_product_aaa + '-' + target_product_bbb
        self.__build_varient = args.build_varient if args.build_varient else prj_info[
            "TARGET_BUILD_VARIANT"]
        self.argsd = {
            "build_kernel": self.__build_kernel,
            "build_kmodule": self.__build_kmodule_only,
            "build_uboot": self.__build_uboot,
            "build_droid": self.__build_droid,
            "build_vendor": self.__build_vendor_package,
            "clean_kernel": self.__clean_kernel,
            "clean_android": self.__clean_android,
            "clean_uboot": self.__clean_uboot,
            "pack_boot": self.__pack_boot,
            "kernel_config": self.__kernel_config,
            "pack_system": self.__pack_system,
            "build_ota": self.__build_ota,
            "diff_ota": self.__diff_ota,
            "source_package": self.__source,
            "target_package": self.__target,
            "submodule": self.__module,
            "target_product": self.__target_product,  # PRODUCT_NAME
            "product_device": self.__product_device,  # PRODUCT_DEVICE
            "build_varient": self.__build_varient,
            "jobs_nr": self.__jobs_nr
        }
예제 #44
0
    def __init__(self):
        #############################################################################
        # Create parsers and assign arguments
        #############################################################################

        # arg parsing
        self.parser = ArgumentParser(
            formatter_class=RawDescriptionHelpFormatter,
            add_help=True,
            description=
            """Cisco APi Tool: a nettool built on Cisco Prime's API""")
        self.parser.add_argument('-d',
                                 '--debug',
                                 action='store_true',
                                 required=False,
                                 help="debug output")
        self.parser.add_argument('-e',
                                 '--email',
                                 required=False,
                                 help="email to log to")
        self.subparsers = self.parser.add_subparsers(dest="sub_cmd")

        # ----- Create base sub-commands
        find_sp = self.subparsers.add_parser(
            'find',
            help="get client device information").add_subparsers(dest="find")
        mock_sp = self.mock_subparser(self.subparsers)
        change_sp = self.change_subparser(self.subparsers)
        poke_sp = self.poke_subparser(self.subparsers)
        push_sp = self.push_subparser(self.subparsers)
        tools_sp = self.tools_subparser(self.subparsers)
        reports_sp = self.reports_subparser(self.subparsers)
        # ----- Completed create base sub-commands

        # ----- capt find sub-commands -----
        # ----- capt find ip x.x.x.x
        find_ip = find_sp.add_parser('ip',
                                     help="IPv4 address of client device")
        self.addr_arg(find_ip)
        # defaults are used to determine which function should be called, allows interactive style prompt with
        #find_ip.set_defaults(func=CliParser.find_ip)
        # ----- capt find ip x.x.x.x --ap
        self.ap_arg(find_ip)
        # ----- capt find ip x.x.x.x --phone
        self.phone_arg(find_ip)
        # ----- capt find mac xx:xx:xx:xx:xx:xx
        find_mac = self.mac_parser(find_sp)
        self.addr_arg(find_mac)
        #find_mac.set_defaults(func=CliParser.find_mac)
        # ----- capt find mac xx:xx:xx:xx:xx:xx --ap
        self.ap_arg(find_mac)
        # ----- capt find mac xx:xx:xx:xx:xx:xx --phone
        self.phone_arg(find_mac)
        # ----- capt find desc xxxxxx
        find_desc = self.desc_parser(find_sp)
        self.desc_arg(find_desc)
        self.device_name_arg(find_desc)
        #find_desc.set_defaults(func=CliParser.find_desc)
        # ----- capt find desc xxxxxx --active
        self.active_arg(find_desc)
        # ----- capt find core -vlan
        find_core = self.core_parser(find_sp)
        self.addr_arg(find_core)  # adds address field
        self.core_search_arg(find_core)
        #  find_core.set_defaults(func=CliParser.find_core)
        # ----- Completed capt find sub-commands -----

        # ----- capt poke sub-commands -----
        # ----- capt poke port XXX.XXX.XXX.XXX Y/Y/Y
        poke_port = self.port_parser(poke_sp)
        self.addr_arg(poke_port)  # adds address field
        self.int_arg(poke_port)  # adds interface field
        #self.sync_arg(poke_port) <TODO implement sync functionality>
        # poke_port.set_defaults(func=CliParser.poke_port)

        # ----- capt upgrade sub-commands (deprecated?) -----
        # ----- capt upgrade x.x.x.x
        upgrade = self.upgrade_parser(self.subparsers)
        self.addr_arg(upgrade)
        # upgrade.set_defaults(func=CliParser.upgrade)
        # ----- capt mock upgrade x.x.x.x
        mock_upgrade = self.upgrade_parser(mock_sp)
        self.addr_arg(mock_upgrade)
        # mock_upgrade.set_defaults(func=CliParser.mock_upgrade)

        # ----- capt change sub-commands -----
        # ----- capt change mac xx:xx:xx:xx:xx:xx --vlan yyyy
        change_mac = self.mac_parser(change_sp)
        self.addr_arg(change_mac)
        self.vlan_arg(change_mac)
        #   change_mac.set_defaults(func=CliParser.change_mac)

        # ----- capt push sub-commands -----
        # ----- capt push bas -a W.W.W.W -p X/X/X -v YYYY -d "ZZZZZZ"
        push_bas = self.bas_parser(push_sp)
        self.addr_arg(push_bas)
        self.int_arg(push_bas)
        self.vlan_arg(push_bas)
        self.desc_flag_arg(push_bas)
        #  push_bas.set_defaults(func=CliParser.push_bas)

        # ----- capt push template -a W.W.W.W -p X/X/X -v YYYY -d "ZZZZZZ"
        push_template = self.template_parser(push_sp)
        self.template_name_arg(push_template)
        self.file_arg(push_template)

        # ----- capt tools sub-commands -----
        # ----- capt tools apcheck
        tools_ap = self.apcheck_subparser(tools_sp)

        # ----- capt tools apcheck slowports
        ap_slow_ports = self.slow_ports_parser(tools_ap)
        self.toggle_arg(ap_slow_ports)
        self.batch_arg(ap_slow_ports)
        # ----- capt tools apcheck unack
        ap_unack = self.unack_parser(tools_ap)

        # ----- capt tools apcheck alarms
        ap_alarms = self.alarms_parser(tools_ap)
        self.days_arg(ap_alarms)
        self.toggle_arg(ap_alarms)
        self.batch_arg(ap_alarms)
        #   ap_alarms.set_defaults(func=CliParser.ap_alarms)

        # ----- capt tools apcheck slowports

        # ----- capt test sub-commands -----
        # ----- capt test_api
        test_api_sp = self.test_api_subparser(self.subparsers)
        test_api_mac = self.mac_parser(test_api_sp)
        self.addr_arg(test_api_mac)
        #  test_api_mac.set_defaults(func=CliParser.test_api_mac)

        argcomplete.autocomplete(self.parser)
        ################NEW DONE

        # -- reports_portcount ----
        port_count = self.port_count_parser(reports_sp)
        self.building_filter_arg(port_count)
        self.verbose_arg(port_count)
        self.csv_arg(port_count)
        self.sync_arg(port_count)

        # -- reports devcount ---
        dev_count = self.dev_count_parser(reports_sp)
        self.building_filter_arg(dev_count)
        self.verbose_arg(dev_count)
        self.csv_arg(dev_count)
        self.sync_arg(dev_count)

        # -- reports vlan_mapper ---
        vlan_map = self.vlan_map_parser(reports_sp)
        self.building_filter_arg(vlan_map)
        self.verbose_arg(vlan_map)
        self.csv_arg(vlan_map)
        self.sync_arg(vlan_map)

        # -- reports service_matrix ---
        service_matrix = self.service_matrix_parser(reports_sp)
        self.building_filter_arg(service_matrix)
        self.verbose_arg(service_matrix)
        self.csv_arg(service_matrix)
        self.sync_arg(service_matrix)
예제 #45
0
def main() -> None:
    """Main program entry point."""
    parser = argparse.ArgumentParser()
    parser.add_argument('working_tree',
                        help='path to the working tree to migrate')

    if argcomplete:
        argcomplete.autocomplete(parser)

    args = parser.parse_args()

    if sys.version_info < (3, 8):
        raise Exception(
            f'Python 3.8+ is required, but Python {".".join(str(i) for i in sys.version_info[:2])} is being used.'
        )

    input_directory = args.working_tree
    content_directory = os.path.join(os.path.dirname(__file__), 'content')
    output_directory = os.path.join(input_directory, '.azure-pipelines')
    output_filename = os.path.join(output_directory, 'azure-pipelines.yml')
    galaxy_filename = os.path.join(input_directory, 'galaxy.yml')

    yaml = ruamel.yaml.YAML()

    try:
        with open(galaxy_filename) as input_file:
            galaxy = yaml.load(input_file)
            is_collection = True
    except FileNotFoundError:
        galaxy = None
        is_collection = False

    if galaxy:
        checkout_path = os.path.join('ansible_collections',
                                     galaxy['namespace'], galaxy['name'])
        main_branch = 'main'  # best guess, not always correct

        branches = [
            main_branch,
            'stable-*',
        ]
    else:
        checkout_path = 'ansible'
        main_branch = 'devel'

        branches = [
            main_branch,
            'stable-*',
        ]

    parsed_matrix = parse_shippable_matrix(
        os.path.join(input_directory, 'shippable.yml'))
    classified_matrix = [
        classify_matrix_item(input_directory, is_collection, item)
        for item in parsed_matrix
    ]

    content_stages = generate_stages(classified_matrix)

    content = generate_pipelines_config(content_stages, branches,
                                        checkout_path, main_branch,
                                        is_collection)

    write_content(content, content_directory, input_directory,
                  output_directory, output_filename, is_collection)
예제 #46
0
def main(args=sys.argv[1:], output=sys):
    api = SecurityHeaders()
    parser = argparse.ArgumentParser(description='Check HTTP security headers', \
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    group = parser.add_mutually_exclusive_group()
    group.add_argument(
        'url',
        nargs='*',
        metavar='URL',
        default='',
        type=str,
        help='Target URL, path to file with a list of target URLs.')
    group.add_argument('--listcheckers',
                       action='store_true',
                       dest='listcheckers',
                       help='Show a list of built-in checkers.')
    group.add_argument('--listformatters',
                       action='store_true',
                       dest='listformatters',
                       help='Show a list of built-in finding formatters.')
    group.add_argument('--listheaders',
                       action='store_true',
                       dest='listheaders',
                       help='Show the headers that are analyzed.')
    group.add_argument('--headers',
                       metavar='HEADERS',
                       dest='headers',
                       type=str,
                       default='',
                       help='List of headers to analyze.')
    group.add_argument('--response',
                       dest='response',
                       type=argparse.FileType('r'),
                       help='Analyze headers saved in this response file.')

    parser.add_argument('--defaultscheme',
                        metavar='https',
                        dest='defaultscheme',
                        default='https',
                        type=str,
                        choices=['http', 'https'],
                        help='Default scheme if not part of url')
    parser.add_argument('--max-redirects',
                        metavar='2',
                        dest='redirects',
                        default=2,
                        type=int,
                        help='Max redirects, set 0 to disable')

    parser.add_argument(
        '--config',
        dest='config',
        metavar='./conf/app.conf',
        default=None,
        type=str,
        help=
        'Path to directory with optional configuration files the parser uses.')
    parser.add_argument(
        '--urlcolumn',
        metavar='0',
        dest='urlcolumn',
        default=0,
        type=int,
        help=
        'If a CSV file with URLs is provided as input, then this is the column containing the urls/domains'
    )
    parser.add_argument(
        '--startrow',
        metavar='0',
        dest='startrow',
        default=0,
        type=int,
        help=
        'If a CSV file with URLs is provided as input, then this is the line to start fetching urls at.'
    )

    parser.add_argument('--screen',
                        action='store_true',
                        dest='screen',
                        help='Print result to the screen')
    parser.add_argument(
        '--file',
        dest='temp',
        metavar="./tmp",
        default=None,
        type=str,
        help=
        'If the results are saved to a file, then they are put in this directory.'
    )
    parser.add_argument('--formatter',
                        metavar='Tabulate',
                        dest='formatter',
                        default='console',
                        choices=api.get_all_formatter_names(),
                        help='How do you want to format the findings.')
    parser.add_argument('--flatten',
                        action='store_true',
                        dest='flatten',
                        help='Merge multiple results into one table.')

    checkernames = api.get_all_checker_names()
    parser.add_argument('--skipcheckers',
                        dest='unwanted',
                        nargs='*',
                        metavar='checkername',
                        default=[],
                        type=str,
                        help='A list of checkers to skip.',
                        choices=checkernames)
    parser.add_argument('--checkers',
                        dest='checks',
                        nargs='*',
                        metavar='Checker',
                        default=['Checker'],
                        type=str,
                        help='A list of checkers to run.',
                        choices=checkernames + ['Checker'])

    argcomplete.autocomplete(parser)
    args = parser.parse_args(args)

    if (args.listcheckers):
        sys.stdout.write(api.get_all_checker_names_as_tree_string() + "\n")
        sys.exit(0)
    elif (args.listformatters):
        sys.stdout.write(', '.join(api.get_all_formatter_names()) + "\n")
        sys.exit(0)
    elif (args.listheaders):
        sys.stdout.write(', '.join(api.get_all_header_names()) + "\n")
        sys.exit(0)
    else:
        if not args.url and not (args.headers or args.response):
            parser.print_help()
            sys.exit(0)
    if not args.temp:
        args.screen = True

    try:
        api.load_options_from_file(args.config)
    except Exception as error:
        sys.stderr.write('\033[91m' + str(error) + '\033[0m' + "\n")

    for key, value in vars(args).items():
        api.set_option(key, value)

    processer = ResultProcesser(args, api)
    if not args.flatten:
        callback = processer.callback
    else:
        callback = None

    if args.response:
        results = api.check_headers_from_file(args.response)
        callback(results)
    elif args.url:
        results = api.check_headers_parallel(create_urls(args),
                                             callback=callback)
    elif args.headers:
        results = api.check_headers_from_string(args.headers)
        callback(results)

    if args.flatten:
        results = [r.get() for r in results]
        results = [[item for sublist in results for item in sublist]]
    for result in results:
        if args.flatten:
            processer.process(result)

    sys.exit(0)
예제 #47
0
def main():
    parser = create_parser()
    argcomplete.autocomplete(parser)
    args = parser.parse_args()
    train(args)
예제 #48
0
def parse_args():
    """Parse argv into usable input."""
    description = (
        "Make queries against the Red Hat Security Data API\n"
        "Original announcement: https://access.redhat.com/blogs/766093/posts/2387601\n"
        "Docs: https://access.redhat.com/documentation/en/red-hat-security-data-api/\n"
    )
    version = "{0} v{1} last mod {2}".format(prog, vers['version'],
                                             vers['date'])
    epilog = ("VERSION:\n"
              "  {0}\n"
              "  See <http://github.com/ryran/rhsecapi> to report bugs or RFEs"
              ).format(version)
    fmt = lambda prog: CustomFormatter(prog)
    p = argparse.ArgumentParser(prog=prog,
                                description=description,
                                add_help=False,
                                epilog=epilog,
                                formatter_class=fmt)
    # New group
    g_listByAttr = p.add_argument_group('FIND CVES BY ATTRIBUTE')
    g_listByAttr.add_argument(
        '--q-before',
        metavar="YYYY-MM-DD",
        help="Narrow down results to before a certain time period")
    g_listByAttr.add_argument(
        '--q-after',
        metavar="YYYY-MM-DD",
        help="Narrow down results to after a certain time period")
    g_listByAttr.add_argument(
        '--q-bug',
        metavar="BZID",
        help=
        "Narrow down results by Bugzilla ID (specify one or more, e.g.: '1326598,1084875')"
    )
    g_listByAttr.add_argument(
        '--q-advisory',
        metavar="RHSA",
        help=
        "Narrow down results by errata advisory (specify one or more, e.g.: 'RHSA-2016:0614,RHSA-2016:0610')"
    )
    g_listByAttr.add_argument(
        '--q-severity',
        metavar="IMPACT",
        choices=['low', 'moderate', 'important', 'critical'],
        help=
        "Narrow down results by severity rating (specify one of 'low', 'moderate', 'important', or 'critical')"
    )
    g_listByAttr.add_argument(
        '--q-product',
        metavar="PRODUCT",
        help=
        "Narrow down results by product name via case-insensitive regex (e.g.: 'linux 7' or 'openstack platform [89]'); the API checks this against the 'FIXED_RELEASES' field so will only match CVEs where PRODUCT matches the 'product_name' of some released errata"
    )
    g_listByAttr.add_argument(
        '--q-package',
        metavar="PKG",
        help=
        "Narrow down results by package name (e.g.: 'samba' or 'thunderbird')")
    g_listByAttr.add_argument(
        '--q-cwe',
        metavar="CWEID",
        help=
        "Narrow down results by CWE ID (specify one or more, e.g.: '295,300')")
    g_listByAttr.add_argument(
        '--q-cvss',
        metavar="SCORE",
        help="Narrow down results by CVSS base score (e.g.: '8.0')")
    g_listByAttr.add_argument(
        '--q-cvss3',
        metavar="SCORE",
        help="Narrow down results by CVSSv3 base score (e.g.: '5.1')")
    g_listByAttr.add_argument(
        '--q-empty',
        action='store_true',
        help=
        "Allow performing an empty search; when used with no other --q-xxx options, this will return the first 1000 of the most recent CVEs (subject to below PAGESZ & PAGENUM)"
    )
    g_listByAttr.add_argument(
        '--q-pagesize',
        metavar="PAGESZ",
        type=int,
        help=
        "Set a cap on the number of results that will be returned (default: 1000)"
    )
    g_listByAttr.add_argument(
        '--q-pagenum',
        metavar="PAGENUM",
        type=int,
        help=
        "Select what page number to return (default: 1); only relevant when there are more than PAGESZ results"
    )
    g_listByAttr.add_argument(
        '--q-raw',
        metavar="RAWQUERY",
        action='append',
        help=
        "Narrow down results by RAWQUERY (e.g.: '--q-raw a=x --q-raw b=y'); this allows passing arbitrary params (e.g. something new that is unknown to {0})"
        .format(prog))
    # New group
    g_listByIava = p.add_argument_group('RETRIEVE SPECIFIC IAVAS')
    g_listByIava.add_argument(
        '-i',
        '--iava',
        dest='iavas',
        metavar='YYYY-?-NNNN',
        action='append',
        help=
        "Retrieve notice details for an IAVA number; specify option multiple times to retrieve multiple IAVAs at once (use below --extract-cves option to lookup mapped CVEs)"
    )
    # New group
    g_getCve = p.add_argument_group('RETRIEVE SPECIFIC CVES')
    g_getCve.add_argument(
        'cves',
        metavar="CVE-YYYY-NNNN",
        nargs='*',
        help=
        "Retrieve a CVE or list of CVEs (e.g.: 'CVE-2016-5387'); note that case-insensitive regex-matching is done -- extra characters & duplicate CVEs will be discarded"
    )
    g_getCve.add_argument(
        '-x',
        '--extract-cves',
        action='store_true',
        help=
        "Extract CVEs from search query (as initiated by at least one of the --q-xxx options or the --iava option)"
    )
    g_getCve.add_argument(
        '-0',
        '--stdin',
        action='store_true',
        help=
        "Extract CVEs from stdin (CVEs will be matched by case-insensitive regex '{0}' and duplicates will be discarded); note that terminal width auto-detection is not possible in this mode and WIDTH defaults to '70' (but can be overridden with '--width')"
        .format(rhsda.cve_regex_string))
    # New group
    g_cveDisplay = p.add_argument_group('CVE DISPLAY OPTIONS')
    g_cveDisplay0 = g_cveDisplay.add_mutually_exclusive_group()
    g_cveDisplay0.add_argument(
        '-f',
        '--fields',
        metavar="FIELDS",
        default='BASE',
        help=
        "Customize field display via comma-separated case-insensitive list (default: {0}); see --all-fields option for full list of official API-provided fields; shorter field aliases: {1}; optionally prepend FIELDS with plus (+) sign to add fields to the default (e.g., '-f +iava,cvss3') or a caret (^) to remove fields from all-fields (e.g., '-f ^mitigation,severity')"
        .format(", ".join(rhsda.cveFields.base),
                ", ".join(rhsda.cveFields.aliases_printable)))
    g_cveDisplay0.add_argument(
        '-a',
        '--all-fields',
        dest='fields',
        action='store_const',
        const='ALL',
        help="Display all supported fields (currently: {0})".format(", ".join(
            rhsda.cveFields.all)))
    g_cveDisplay0.add_argument(
        '-m',
        '--most-fields',
        dest='fields',
        action='store_const',
        const='MOST',
        help=
        "Display all fields mentioned above except the heavy-text ones -- (excludes: {0})"
        .format(", ".join(rhsda.cveFields.not_most)))
    g_cveDisplay.add_argument(
        '-p',
        '--product',
        help=
        "Spotlight a particular PRODUCT via case-insensitive regex; this hides CVEs where 'FIXED_RELEASES' or 'FIX_STATES' don't have an item with 'cpe' (e.g. 'cpe:/o:redhat:enterprise_linux:7') or 'product_name' (e.g. 'Red Hat Enterprise Linux 7') matching PRODUCT; this also hides all items in 'FIXED_RELEASES' & 'FIX_STATES' that don't match PRODUCT"
    )
    g_cveDisplay.add_argument('-j',
                              '--json',
                              action='store_true',
                              help="Print full & raw JSON output")
    g_cveDisplay.add_argument('-u',
                              '--urls',
                              dest='printUrls',
                              action='store_true',
                              help="Print URLs for all relevant fields")
    # New group
    g_general = p.add_argument_group('GENERAL OPTIONS')
    g_general.add_argument(
        '-w',
        '--wrap',
        metavar="WIDTH",
        dest='wrapWidth',
        nargs='?',
        default=1,
        const=70,
        type=int,
        help=
        "Change wrap-width of long fields (acknowledgement, details, statement, mitigation, references) in non-json output (default: wrapping WIDTH equivalent to TERMWIDTH-2 unless using '--pastebin' where default WIDTH is '168'; specify '0' to disable wrapping; WIDTH defaults to '70' if option is used but WIDTH is omitted)"
    )
    g_general.add_argument('-c',
                           '--count',
                           action='store_true',
                           help="Exit after printing CVE counts")
    g_general.add_argument(
        '-l',
        '--loglevel',
        choices=['debug', 'info', 'notice', 'warning'],
        default='notice',
        help=
        "Configure logging level threshold; lower from the default of 'notice' to see extra details printed to stderr"
    )
    g_general.add_argument(
        '-t',
        '--threads',
        metavar="THREDS",
        type=int,
        default=rhsda.numThreadsDefault,
        help=
        "Set number of concurrent worker threads to allow when making CVE queries (default on this system: {0})"
        .format(rhsda.numThreadsDefault))
    g_general.add_argument(
        '-P',
        '--pastebin',
        action='store_true',
        help=
        "Send output to Fedora Project Pastebin (paste.fedoraproject.org) and print only URL to stdout"
    )
    g_general.add_argument(
        '-E',
        '--pexpire',
        metavar="DAYS",
        nargs='?',
        const=1,
        default=28,
        type=int,
        help=
        "Set time in days after which paste will be deleted (defaults to '28'; specify '0' to disable expiration; DAYS defaults to '1' if option is used but DAYS is omitted)"
    )
    g_general.add_argument(
        '--dryrun',
        action='store_true',
        help=
        "Skip CVE retrieval; this option only makes sense in concert with --stdin, for the purpose of quickly getting a printable list of CVE ids from stdin"
    )
    g_general.add_argument('-h',
                           dest='showUsage',
                           action='store_true',
                           help="Show short usage summary and exit")
    g_general.add_argument('--help',
                           dest='showHelp',
                           action='store_true',
                           help="Show this help message and exit")
    if haveArgcomplete:
        # Parse and return
        argcomplete.autocomplete(p)
    o = p.parse_args()
    if o.showHelp:
        from tempfile import NamedTemporaryFile
        from subprocess import call
        tmp = NamedTemporaryFile(prefix='{0}-help-'.format(prog),
                                 suffix='.txt',
                                 mode='w')
        p.print_help(file=tmp)
        tmp.flush()
        call(['less', tmp.name])
        sys.exit()
    # Add search params to dict
    o.searchParams = {
        'before': o.q_before,
        'after': o.q_after,
        'bug': o.q_bug,
        'advisory': o.q_advisory,
        'severity': o.q_severity,
        'product': o.q_product,
        'package': o.q_package,
        'cwe': o.q_cwe,
        'cvss_score': o.q_cvss,
        'cvss3_score': o.q_cvss3,
        'per_page': o.q_pagesize,
        'page': o.q_pagenum,
    }
    if o.q_raw:
        for param in o.q_raw:
            p = param.split("=")
            o.searchParams[p[0]] = p[1]
    # Check for search params (--q-xxx) to determine if performing search
    if all(val is None for val in o.searchParams.values()) and not o.q_empty:
        o.doSearch = False
    else:
        o.doSearch = True
        if o.iavas:
            print(
                "{0}: error: --q-xxx options not allowed in concert with -i/--iava"
                .format(prog),
                file=sys.stderr)
            sys.exit(1)
        if o.cves or o.stdin:
            print(
                "{0}: error: --q-xxx options not allowed in concert with CVE args"
                .format(prog),
                file=sys.stderr)
            sys.exit(1)
    if o.cves:
        o.cves = rhsda.extract_cves_from_input(o.cves, "cmdline")
        if not o.cves:
            o.showUsage = True
    if o.stdin and not sys.stdin.isatty():
        found = rhsda.extract_cves_from_input(sys.stdin)
        o.cves.extend(found)
    # If no search (--q-xxx) and no CVEs mentioned
    if not o.showUsage and not (o.doSearch or o.cves or o.iavas):
        logger.error(
            "Must specify CVEs/IAVAs to retrieve or a search to perform (--q-xxx opts)"
        )
        o.showUsage = True
    if o.showUsage:
        p.print_usage()
        print("\nRun {0} --help for full help page\n\n{1}".format(
            prog, epilog))
        sys.exit()
    # If autowrap and using pastebin, set good width
    if o.wrapWidth == 1 and o.pastebin:
        o.wrapWidth = 168
    if o.json:
        o.outFormat = 'jsonpretty'
    else:
        o.outFormat = 'plaintext'
    logger.setLevel(o.loglevel.upper())
    return o
예제 #49
0
def parse_args():
    """Parse command line arguments."""
    try:
        import argparse
    except ImportError:
        if '--requirements' not in sys.argv:
            raise
        raw_command(generate_pip_install(find_pip(), 'ansible-test'))
        import argparse

    try:
        import argcomplete
    except ImportError:
        argcomplete = None

    if argcomplete:
        epilog = 'Tab completion available using the "argcomplete" python package.'
    else:
        epilog = 'Install the "argcomplete" python package to enable tab completion.'

    parser = argparse.ArgumentParser(epilog=epilog)

    common = argparse.ArgumentParser(add_help=False)

    common.add_argument('-e',
                        '--explain',
                        action='store_true',
                        help='explain commands that would be executed')

    common.add_argument('-v',
                        '--verbose',
                        dest='verbosity',
                        action='count',
                        default=0,
                        help='display more output')

    common.add_argument('--color',
                        metavar='COLOR',
                        nargs='?',
                        help='generate color output: %(choices)s',
                        choices=('yes', 'no', 'auto'),
                        const='yes',
                        default='auto')

    common.add_argument('--debug',
                        action='store_true',
                        help='run ansible commands in debug mode')

    common.add_argument(
        '--truncate',
        dest='truncate',
        metavar='COLUMNS',
        type=int,
        default=display.columns,
        help='truncate some long output (0=disabled) (default: auto)')

    common.add_argument('--redact',
                        dest='redact',
                        action='store_true',
                        help='redact sensitive values in output')

    test = argparse.ArgumentParser(add_help=False, parents=[common])

    test.add_argument(
        'include',
        metavar='TARGET',
        nargs='*',
        help='test the specified target').completer = complete_target

    test.add_argument(
        '--exclude',
        metavar='TARGET',
        action='append',
        help='exclude the specified target').completer = complete_target

    test.add_argument(
        '--require',
        metavar='TARGET',
        action='append',
        help='require the specified target').completer = complete_target

    test.add_argument('--coverage',
                      action='store_true',
                      help='analyze code coverage when running tests')

    test.add_argument('--coverage-label',
                      default='',
                      help='label to include in coverage output file names')

    test.add_argument('--metadata', help=argparse.SUPPRESS)

    add_changes(test, argparse)
    add_environments(test)

    integration = argparse.ArgumentParser(add_help=False, parents=[test])

    integration.add_argument('--python',
                             metavar='VERSION',
                             choices=SUPPORTED_PYTHON_VERSIONS + ('default', ),
                             help='python version: %s' %
                             ', '.join(SUPPORTED_PYTHON_VERSIONS))

    integration.add_argument(
        '--start-at', metavar='TARGET',
        help='start at the specified target').completer = complete_target

    integration.add_argument('--start-at-task',
                             metavar='TASK',
                             help='start at the specified task')

    integration.add_argument(
        '--tags',
        metavar='TAGS',
        help='only run plays and tasks tagged with these values')

    integration.add_argument(
        '--skip-tags',
        metavar='TAGS',
        help='only run plays and tasks whose tags do not match these values')

    integration.add_argument('--diff',
                             action='store_true',
                             help='show diff output')

    integration.add_argument(
        '--allow-destructive',
        action='store_true',
        help='allow destructive tests (--local and --tox only)')

    integration.add_argument('--retry-on-error',
                             action='store_true',
                             help='retry failed test with increased verbosity')

    integration.add_argument('--continue-on-error',
                             action='store_true',
                             help='continue after failed test')

    integration.add_argument(
        '--debug-strategy',
        action='store_true',
        help='run test playbooks using the debug strategy')

    integration.add_argument('--changed-all-target',
                             metavar='TARGET',
                             default='all',
                             help='target to run when all tests are needed')

    integration.add_argument(
        '--list-targets',
        action='store_true',
        help='list matching targets instead of running tests')

    subparsers = parser.add_subparsers(metavar='COMMAND')
    subparsers.required = True  # work-around for python 3 bug which makes subparsers optional

    posix_integration = subparsers.add_parser('integration',
                                              parents=[integration],
                                              help='posix integration tests')

    posix_integration.set_defaults(func=command_posix_integration,
                                   targets=walk_posix_integration_targets,
                                   config=PosixIntegrationConfig)

    add_extra_docker_options(posix_integration)

    network_integration = subparsers.add_parser(
        'network-integration',
        parents=[integration],
        help='network integration tests')

    network_integration.set_defaults(func=command_network_integration,
                                     targets=walk_network_integration_targets,
                                     config=NetworkIntegrationConfig)

    add_extra_docker_options(network_integration, integration=False)

    network_integration.add_argument(
        '--platform',
        metavar='PLATFORM',
        action='append',
        help='network platform/version').completer = complete_network_platform

    network_integration.add_argument('--inventory',
                                     metavar='PATH',
                                     help='path to inventory used for tests')

    network_integration.add_argument(
        '--testcase',
        metavar='TESTCASE',
        help='limit a test to a specified testcase'
    ).completer = complete_network_testcase

    windows_integration = subparsers.add_parser(
        'windows-integration',
        parents=[integration],
        help='windows integration tests')

    windows_integration.set_defaults(func=command_windows_integration,
                                     targets=walk_windows_integration_targets,
                                     config=WindowsIntegrationConfig)

    add_extra_docker_options(windows_integration, integration=False)

    windows_integration.add_argument(
        '--windows',
        metavar='VERSION',
        action='append',
        help='windows version').completer = complete_windows

    units = subparsers.add_parser('units', parents=[test], help='unit tests')

    units.set_defaults(func=command_units,
                       targets=walk_units_targets,
                       config=UnitsConfig)

    units.add_argument('--python',
                       metavar='VERSION',
                       choices=SUPPORTED_PYTHON_VERSIONS + ('default', ),
                       help='python version: %s' %
                       ', '.join(SUPPORTED_PYTHON_VERSIONS))

    units.add_argument('--collect-only',
                       action='store_true',
                       help='collect tests but do not execute them')

    add_extra_docker_options(units, integration=False)

    sanity = subparsers.add_parser('sanity',
                                   parents=[test],
                                   help='sanity tests')

    sanity.set_defaults(func=command_sanity,
                        targets=walk_sanity_targets,
                        config=SanityConfig)

    sanity.add_argument('--test',
                        metavar='TEST',
                        action='append',
                        choices=[test.name for test in sanity_get_tests()],
                        help='tests to run').completer = complete_sanity_test

    sanity.add_argument('--skip-test',
                        metavar='TEST',
                        action='append',
                        choices=[test.name for test in sanity_get_tests()],
                        help='tests to skip').completer = complete_sanity_test

    sanity.add_argument('--list-tests',
                        action='store_true',
                        help='list available tests')

    sanity.add_argument('--python',
                        metavar='VERSION',
                        choices=SUPPORTED_PYTHON_VERSIONS + ('default', ),
                        help='python version: %s' %
                        ', '.join(SUPPORTED_PYTHON_VERSIONS))

    sanity.add_argument('--base-branch', help=argparse.SUPPRESS)

    add_lint(sanity)
    add_extra_docker_options(sanity, integration=False)

    shell = subparsers.add_parser('shell',
                                  parents=[common],
                                  help='open an interactive shell')

    shell.set_defaults(func=command_shell, config=ShellConfig)

    add_environments(shell, tox_version=True)
    add_extra_docker_options(shell)

    coverage_common = argparse.ArgumentParser(add_help=False, parents=[common])

    add_environments(coverage_common, tox_version=True, tox_only=True)

    coverage = subparsers.add_parser(
        'coverage', help='code coverage management and reporting')

    coverage_subparsers = coverage.add_subparsers(metavar='COMMAND')
    coverage_subparsers.required = True  # work-around for python 3 bug which makes subparsers optional

    coverage_combine = coverage_subparsers.add_parser(
        'combine',
        parents=[coverage_common],
        help='combine coverage data and rewrite remote paths')

    coverage_combine.set_defaults(func=lib.cover.command_coverage_combine,
                                  config=lib.cover.CoverageConfig)

    add_extra_coverage_options(coverage_combine)

    coverage_erase = coverage_subparsers.add_parser(
        'erase', parents=[coverage_common], help='erase coverage data files')

    coverage_erase.set_defaults(func=lib.cover.command_coverage_erase,
                                config=lib.cover.CoverageConfig)

    coverage_report = coverage_subparsers.add_parser(
        'report',
        parents=[coverage_common],
        help='generate console coverage report')

    coverage_report.set_defaults(func=lib.cover.command_coverage_report,
                                 config=lib.cover.CoverageReportConfig)

    coverage_report.add_argument(
        '--show-missing',
        action='store_true',
        help='show line numbers of statements not executed')

    add_extra_coverage_options(coverage_report)

    coverage_html = coverage_subparsers.add_parser(
        'html',
        parents=[coverage_common],
        help='generate html coverage report')

    coverage_html.set_defaults(func=lib.cover.command_coverage_html,
                               config=lib.cover.CoverageConfig)

    add_extra_coverage_options(coverage_html)

    coverage_xml = coverage_subparsers.add_parser(
        'xml', parents=[coverage_common], help='generate xml coverage report')

    coverage_xml.set_defaults(func=lib.cover.command_coverage_xml,
                              config=lib.cover.CoverageConfig)

    add_extra_coverage_options(coverage_xml)

    if argcomplete:
        argcomplete.autocomplete(parser,
                                 always_complete_options=False,
                                 validator=lambda i, k: True)

    args = parser.parse_args()

    if args.explain and not args.verbosity:
        args.verbosity = 1

    if args.color == 'yes':
        args.color = True
    elif args.color == 'no':
        args.color = False
    else:
        args.color = sys.stdout.isatty()

    return args
예제 #50
0
파일: cli.py 프로젝트: mzha/HomewardBound
 def _ArgComplete(self):
     argcomplete.autocomplete(self.__parser, always_complete_options=False)
예제 #51
0
def res() -> None:
    from evo import main_res
    parser = main_res.parser()
    argcomplete.autocomplete(parser)
    launch(main_res, parser)
예제 #52
0
def enable_autocomplete(parser):
    argcomplete.autocomplete = argcomplete.CompletionFinder()
    argcomplete.autocomplete(
        parser,
        validator=lambda c, p: c.lower().startswith(p.lower()),
        default_completer=lambda _: ())
예제 #53
0
def main_entry(trinity_boot: BootFn, app_identifier: str,
               component_types: Tuple[Type[BaseComponentAPI], ...],
               sub_configs: Sequence[Type[BaseAppConfig]]) -> None:
    if is_prerelease():
        # this modifies the asyncio logger, but will be overridden by any custom settings below
        enable_warnings_by_default()

    for component_cls in component_types:
        component_cls.configure_parser(parser, subparser)

    argcomplete.autocomplete(parser)

    args = parser.parse_args()

    if not args.genesis and args.network_id not in PRECONFIGURED_NETWORKS:
        parser.error(
            f"Unsupported network id: {args.network_id}. To use a network besides "
            "mainnet, ropsten or goerli, you must supply a genesis file with a flag, like "
            "`--genesis path/to/genesis.json`, also you must specify a data "
            "directory with `--data-dir path/to/data/directory`")

    # The `common_log_level` is derived from `--log-level <Level>` / `-l <Level>` without
    # specifying any module. If present, it is used for both `stderr` and `file` logging.
    common_log_level = args.log_levels and args.log_levels.get(None)
    has_ambigous_logging_config = (
        (common_log_level is not None and args.stderr_log_level is not None)
        or (common_log_level is not None and args.file_log_level is not None))

    if has_ambigous_logging_config:
        parser.error(f"""\n
            Ambiguous logging configuration: The `--log-level (-l)` flag sets the
            log level for both file and stderr logging.
            To configure different log level for file and stderr logging,
            remove the `--log-level` flag and use `--stderr-log-level` and/or
            `--file-log-level` separately.
            Alternatively, remove the `--stderr-log-level` and/or `--file-log-level`
            flags to share one single log level across both handlers.
            """)

    try:
        trinity_config = TrinityConfig.from_parser_args(
            args, app_identifier, sub_configs)
    except AmbigiousFileSystem:
        parser.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO)

    if not is_data_dir_initialized(trinity_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(trinity_config)
        except AmbigiousFileSystem:
            parser.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO)
        except MissingPath as e:
            parser.error(
                "\n"
                f"It appears that {e.path} does not exist. "
                "Trinity does not attempt to create directories outside of its root path. "
                "Either manually create the path or ensure you are using a data directory "
                "inside the XDG_TRINITY_ROOT path")

    # +---------------+
    # | LOGGING SETUP |
    # +---------------+

    # Setup logging to stderr
    stderr_logger_level = (
        args.stderr_log_level if args.stderr_log_level is not None else
        (common_log_level if common_log_level is not None else logging.INFO))
    handler_stderr = setup_stderr_logging(stderr_logger_level)

    # Setup file based logging
    file_logger_level = (
        args.file_log_level if args.file_log_level is not None else
        (common_log_level if common_log_level is not None else logging.DEBUG))
    handler_file = setup_file_logging(trinity_config.logfile_path,
                                      file_logger_level)

    # Set the individual logger levels that have been specified.
    logger_levels = {} if args.log_levels is None else args.log_levels
    set_logger_levels(logger_levels)

    # get the root logger and set it to the level of the stderr logger.
    logger = logging.getLogger()
    logger.setLevel(stderr_logger_level)

    # This prints out the ASCII "trinity" header in the terminal
    display_launch_logs(trinity_config)

    # Setup the log listener which child processes relay their logs through
    log_listener = IPCListener(handler_stderr, handler_file)

    # Determine what logging level child processes should use.
    child_process_log_level = min(
        stderr_logger_level,
        file_logger_level,
        *logger_levels.values(),
    )

    boot_info = BootInfo(
        args=args,
        trinity_config=trinity_config,
        child_process_log_level=child_process_log_level,
        logger_levels=logger_levels,
        profile=bool(args.profile),
    )

    # Let the components do runtime validation
    for component_cls in component_types:
        try:
            component_cls.validate_cli(boot_info)
        except ValidationError as exc:
            parser.exit(message=str(exc))

    # Components can provide a subcommand with a `func` which does then control
    # the entire process from here.
    if hasattr(args, 'func'):
        args.func(args, trinity_config)
        return

    if hasattr(args, 'munge_func'):
        args.munge_func(args, trinity_config)

    runtime_component_types = tuple(component_cls
                                    for component_cls in component_types
                                    if issubclass(component_cls, ComponentAPI))

    with log_listener.run(trinity_config.logging_ipc_path):

        processes = trinity_boot(boot_info)

        loop = asyncio.get_event_loop()

        def kill_trinity_with_reason(reason: str) -> None:
            kill_trinity_gracefully(trinity_config,
                                    logger,
                                    processes,
                                    reason=reason)

        component_manager_service = ComponentManager(
            boot_info,
            runtime_component_types,
            kill_trinity_with_reason,
        )
        manager = AsyncioManager(component_manager_service)

        loop.add_signal_handler(
            signal.SIGTERM,
            manager.cancel,
            'SIGTERM',
        )
        loop.add_signal_handler(
            signal.SIGINT,
            component_manager_service.shutdown,
            'CTRL+C',
        )

        try:
            loop.run_until_complete(manager.run())
        except BaseException as err:
            logger.error("Error during trinity run: %r", err)
            raise
        finally:
            kill_trinity_with_reason(component_manager_service.reason)
            if trinity_config.trinity_tmp_root_dir:
                shutil.rmtree(trinity_config.trinity_root_dir)
예제 #54
0
def traj() -> None:
    from evo import main_traj
    parser = main_traj.parser()
    argcomplete.autocomplete(parser)
    launch(main_traj, parser)
예제 #55
0
        # chunk = np.column_stack((decompressed[0 : int(len(decompressed)/2)], decompressed[int(len(decompressed)/2)
        # : int(len(decompressed))]))

        # Return the sequence number an the original chunk
        return chunk_number, self.receiver_chunk_buffer


class Compression__verbose(Compression, buffer.Buffering__verbose):
    def __init__(self):
        super().__init__()


if __name__ == "__main__":
    buffer.minimal.parser.description = __doc__
    try:
        argcomplete.autocomplete(buffer.minimal.parser)
    except Exception:
        if __debug__:
            print("argcomplete not working :-/")
        else:
            pass
    buffer.minimal.args = buffer.minimal.parser.parse_known_args()[0]
    if buffer.minimal.args.show_stats or buffer.minimal.args.show_samples:
        intercom = Compression__verbose()
    else:
        intercom = Compression()
    try:
        intercom.run()
    except KeyboardInterrupt:
        buffer.minimal.parser.exit("\nInterrupted by user")
예제 #56
0
def rpe() -> None:
    from evo import main_rpe
    parser = main_rpe.parser()
    argcomplete.autocomplete(parser)
    launch(main_rpe, parser)
예제 #57
0
def main():
    parser = argparse.ArgumentParser(description='BigClown Firmware Tool')

    subparsers = {}
    subparser = parser.add_subparsers(dest='command', metavar='COMMAND')

    subparsers['update'] = subparser.add_parser(
        'update', help="update list of available firmware")

    subparsers['list'] = subparser.add_parser('list', help="list firmware")
    subparsers['list'].add_argument('--all',
                                    help='show all releases',
                                    action='store_true')
    subparsers['list'].add_argument('--description',
                                    help='show description',
                                    action='store_true')
    subparsers['list'].add_argument('--show-pre-release',
                                    help='show pre-release version',
                                    action='store_true')

    subparsers['flash'] = subparser.add_parser(
        'flash',
        help="flash firmware",
        usage=
        '%(prog)s\n       %(prog)s <firmware>\n       %(prog)s <file>\n       %(prog)s <url>'
    )
    subparsers['flash'].add_argument(
        'what', help=argparse.SUPPRESS, nargs='?',
        default="firmware.bin").completer = FirmwareChoicesCompleter(True)
    subparsers['flash'].add_argument('--device',
                                     help='device',
                                     required='--dfu' not in sys.argv)
    group = subparsers['flash'].add_mutually_exclusive_group()
    group.add_argument('--dfu', help='use dfu mode', action='store_true')
    group.add_argument('--log', help='run log', action='store_true')
    group_log = subparsers['flash'].add_argument_group(
        'optional for --log arguments')
    log.add_arguments(group_log)
    subparsers['flash'].add_argument('--erase-eeprom',
                                     help='erase eeprom',
                                     action='store_true')

    subparsers['devices'] = subparser.add_parser('devices',
                                                 help="show devices")
    subparsers['devices'].add_argument('-v',
                                       '--verbose',
                                       action='store_true',
                                       help='show more messages')
    subparsers['devices'].add_argument(
        '-s',
        '--include-links',
        action='store_true',
        help='include entries that are symlinks to real devices'
        if pyserial_34 else argparse.SUPPRESS)

    subparsers['search'] = subparser.add_parser(
        'search', help="search in firmware names and descriptions")
    subparsers['search'].add_argument('pattern', help='search pattern')
    subparsers['search'].add_argument('--all',
                                      help='show all releases',
                                      action='store_true')
    subparsers['search'].add_argument('--description',
                                      help='show description',
                                      action='store_true')
    subparsers['search'].add_argument('--show-pre-release',
                                      help='show pre-release version',
                                      action='store_true')

    subparsers['pull'] = subparser.add_parser(
        'pull',
        help="pull firmware to cache",
        usage='%(prog)s <firmware>\n       %(prog)s <url>')
    subparsers['pull'].add_argument(
        'what',
        help=argparse.SUPPRESS).completer = FirmwareChoicesCompleter(False)

    subparsers['clean'] = subparser.add_parser('clean', help="clean cache")

    subparsers['create'] = subparser.add_parser('create',
                                                help="create new firmware")
    subparsers['create'].add_argument('name', help=argparse.SUPPRESS)
    subparsers['create'].add_argument('--no-git',
                                      help='disable git',
                                      action='store_true')

    subparsers['read'] = subparser.add_parser('read',
                                              help="download firmware to file")
    subparsers['read'].add_argument('filename', help=argparse.SUPPRESS)
    subparsers['read'].add_argument('--device', help='device', required=True)
    subparsers['read'].add_argument('--length',
                                    help='length',
                                    default=196608,
                                    type=int)

    subparsers['log'] = subparser.add_parser('log', help="show log")
    subparsers['log'].add_argument('--device', help='device', required=True)
    log.add_arguments(subparsers['log'])

    subparsers['reset'] = subparser.add_parser(
        'reset', help="reset core module, not work for r1.3")
    subparsers['reset'].add_argument('--device', help='device', required=True)
    subparsers['reset'].add_argument('--log',
                                     help='run log',
                                     action='store_true')
    group_log = subparsers['reset'].add_argument_group(
        'optional for --log arguments')
    log.add_arguments(group_log)

    subparsers['eeprom'] = subparser.add_parser('eeprom', help="eeprom")
    subparsers['eeprom'].add_argument('--device',
                                      help='device',
                                      required='--dfu' not in sys.argv)
    subparsers['eeprom'].add_argument('--dfu',
                                      help='use dfu mode',
                                      action='store_true')
    group = subparsers['eeprom'].add_mutually_exclusive_group()
    group.add_argument('--erase', help='erase', action='store_true')

    subparser_help = subparser.add_parser('help', help="show help")
    subparser_help.add_argument('what',
                                help=argparse.SUPPRESS,
                                nargs='?',
                                choices=subparsers.keys())
    subparser_help.add_argument('--all',
                                help='show help for all commands',
                                action='store_true')

    parser.add_argument('-v',
                        '--version',
                        action='version',
                        version='%(prog)s ' + __version__)

    argcomplete.autocomplete(parser)
    args = parser.parse_args()

    if not args.command:
        parser.print_help()
        sys.exit()

    if args.command == 'help':
        if args.what:
            subparsers[args.what].print_help()
        else:
            parser.print_help()
            print("  --all          show help for all commands")
            if args.all:
                print("=" * 60 + os.linesep)
                for subparser in subparser.choices:
                    if subparser in subparsers:
                        subparsers[subparser].print_help()
                        print(os.linesep)
        sys.exit()

    fwlist = FirmwareList(user_cache_dir)

    if args.command == 'list' or args.command == 'search':
        # labels = ['Name:Bin:Version']
        # if args.description:
        #     labels.append('description')

        rows = fwlist.get_firmware_table(
            search=args.pattern if args.command == 'search' else None,
            all=args.all,
            description=args.description,
            show_pre_release=args.show_pre_release)

        if rows:
            print_table([], rows)
        elif args.command == 'list':
            print('Nothing found, try updating first')
        else:
            print('Nothing found')

    elif args.command == 'flash':
        test_log_argumensts(args, subparsers['flash'])
        command_flash(args, fwlist)

    elif args.command == 'update':
        fwlist.update()

    elif args.command == 'devices':
        command_devices(verbose=args.verbose, include_links=args.include_links)

    elif args.command == 'pull':
        if args.what == 'last':
            for name in fwlist.get_firmware_list():
                firmware = fwlist.get_firmware(name)
                print('pull', name)
                download_url(firmware['url'], True)
                print()

        elif args.what.startswith('http'):
            download_url(args.what, True)
        else:
            firmware = fwlist.get_firmware(args.what)
            if not firmware:
                print(
                    'Firmware not found, try updating first, command: bcf update'
                )
                sys.exit(1)
            download_url(firmware['url'], True)

    elif args.command == 'clean':
        fwlist.clear()
        for filename in os.listdir(user_cache_dir):
            os.unlink(os.path.join(user_cache_dir, filename))

    elif args.command == 'create':
        name = args.name

        if os.path.exists(name):
            print('Directory already exists')
            sys.exit(1)

        skeleton_zip_filename = download_url(SKELETON_URL_ZIP)
        print()

        tmp_dir = tempfile.mkdtemp()

        zip_ref = zipfile.ZipFile(skeleton_zip_filename, 'r')
        zip_ref.extractall(tmp_dir)
        zip_ref.close()

        skeleton_path = os.path.join(tmp_dir, os.listdir(tmp_dir)[0])
        shutil.move(skeleton_path, name)

        os.rmdir(os.path.join(name, 'sdk'))
        os.rmdir(os.path.join(name, '.vscode'))
        os.unlink(os.path.join(name, '.gitmodules'))

        os.chdir(name)

        if args.no_git:
            sdk_zip_filename = download_url(SDK_URL_ZIP)
            zip_ref = zipfile.ZipFile(sdk_zip_filename, 'r')
            zip_ref.extractall(tmp_dir)
            zip_ref.close()
            sdk_path = os.path.join(tmp_dir, os.listdir(tmp_dir)[0])
            shutil.move(sdk_path, 'sdk')

            sdk_zip_filename = download_url(VSCODE_URL_ZIP)
            zip_ref = zipfile.ZipFile(sdk_zip_filename, 'r')
            zip_ref.extractall(tmp_dir)
            zip_ref.close()
            sdk_path = os.path.join(tmp_dir, os.listdir(tmp_dir)[0])
            shutil.move(sdk_path, '.vscode')

        else:
            os.system('git init')
            os.system('git submodule add --depth 1 "' + SDK_GIT + '" sdk')
            os.system('git submodule add --depth 1 "' + VSCODE_GIT +
                      '" .vscode')

        os.rmdir(tmp_dir)

    elif args.command == 'read':
        flasher.uart.clone(args.device,
                           args.filename,
                           args.length,
                           reporthook=print_progress_bar)

    elif args.command == 'log':
        log.run_args(args)

    elif args.command == 'reset':
        test_log_argumensts(args, subparsers['reset'])
        command_reset(args)

    elif args.command == 'eeprom':
        command_eeprom(args)
예제 #58
0
파일: main.py 프로젝트: srinivas32/yotta
def main():
    # standard library modules, , ,
    import logging
    from functools import reduce

    # logging setup, , setup the logging system, internal
    from yotta.lib import logging_setup
    # options, , common argument parser options, internal
    import yotta.options as options

    logging_setup.init(level=logging.INFO, enable_subsystems=None, plain=False)

    # we override many argparse things to make options more re-usable across
    # subcommands, and allow lazy loading of subcommand modules:
    parser = options.parser.ArgumentParser(
        formatter_class=argparse.RawTextHelpFormatter,
        description='Build software using re-usable components.\n'+
        'For more detailed help on each subcommand, run: yotta <subcommand> --help'
    )
    subparser = parser.add_subparsers(dest='subcommand_name', metavar='<subcommand>')

    parser.add_argument('--version', action='version', version=__version__,
        help='display the version'
    )

    # add re-usable top-level options which subcommands may also accept
    options.verbosity.addTo(parser)
    options.debug.addTo(parser)
    options.plain.addTo(parser)
    options.noninteractive.addTo(parser)
    options.registry.addTo(parser)
    options.target.addTo(parser)
    options.config.addTo(parser)

    def addParser(name, module_name, description, help=None):
        if help is None:
            help = description
        def onParserAdded(parser):
            import importlib
            module = importlib.import_module('.' + module_name, 'yotta')
            module.addOptions(parser)
            parser.set_defaults(command=module.execCommand)
        subparser.add_parser_async(
            name, description=description, help=help,
            formatter_class=argparse.RawTextHelpFormatter,
            callback=onParserAdded
        )

    addParser('search', 'search',
        'Search for open-source modules and targets that have been published '+
        'to the yotta registry (with yotta publish). See help for `yotta '+
        'install` for installing modules, and for `yotta target` for '+
        'switching targets.',
        'Search for published modules and targets'
    )
    addParser('init', 'init', 'Create a new module.')
    addParser('install', 'install',
        'Add a specific module as a dependency, and download it, or install all '+
        'dependencies for the current module. Use yotta install '+
        'modulename@version to install a specific version.'
    )
    addParser('build', 'build',
        'Build the current module. Options can be passed to the underlying '+
        'build tool by passing them after --, e.g. to do a verbose build '+
        'which will display each command as it is run, use:\n'+
        '  yotta build -- -v\n\n'+
        'The programs or libraries to build can be specified (by default '+
        'only the libraries needed by the current module and the current '+
        "module's own tests are built). For example, to build the tests of "+
        'all dependencies, run:\n  yotta build all_tests\n\n',
        'Build the current module.'
    )
    addParser('version', 'version', 'Bump the module version, or (with no arguments) display the current version.')
    addParser('link', 'link',
        'Symlink a module to be used into another module.\n\n'+
        'Use: "yotta link" in a module to link it globally, then use "yotta '+
        'link <modulename>" to link it into the module where you want to use '+
        'it.\n\n'+
        '"yotta link ../path/to/module" is also supported, which will create '+
        'the global link and a link into the current module in a single step.',
        'Symlink a module'
    )
    addParser('link-target', 'link_target',
        'Symlink a target to be used into another module.\n\n'+
        'Use: "yotta link" in a target to link it globally, then use "yotta '+
        'link-target <targetname>" to link it into the module where you want to use '+
        'it.\n\n'+
        '"yotta link ../path/to/target" is also supported, which will create '+
        'the global link and a link into the current module in a single step.',
        'Symlink a target'
    )
    addParser('update', 'update', 'Update dependencies for the current module, or a specific module.')
    addParser('target', 'target', 'Set or display the target device.')
    addParser('debug', 'debug', 'Attach a debugger to the current target.  Requires target support.')
    addParser('test', 'test_subcommand',
        'Run the tests for the current module on the current target. A build '+
        'will be run first, and options to the build subcommand are also '+
        'accepted by test.\nThis subcommand requires the target to provide a '+
        '"test" script that will be used to run each test. Modules may also '+
        'define a "testReporter" script, which will be piped the output from '+
        'each test, and may produce a summary.',
        'Run the tests for the current module on the current target. Requires target support for cross-compiling targets.'
    )
    addParser('start', 'start',
        'Launch the compiled program (available for executable modules only). Requires target support for cross-compiling targets.'
    )
    addParser('publish', 'publish', 'Publish a module or target to the public registry.')
    addParser('unpublish', 'unpublish', 'Un-publish a recently published module or target.')
    addParser('login', 'login', 'Authorize for access to private github repositories and publishing to the yotta registry.')
    addParser('logout', 'logout', 'Remove saved authorization token for the current user.')
    addParser('whoami', 'whoami', 'Display who the currently logged in user is (if any).')
    addParser('list', 'list', 'List the dependencies of the current module, or the inherited targets of the current target.')
    addParser('outdated', 'outdated', 'Display information about dependencies which have newer versions available.')
    addParser('uninstall', 'uninstall', 'Remove a specific dependency of the current module, both from module.json and from disk.')
    addParser('remove', 'remove',
        'Remove the downloaded version of a dependency module or target, or '+
        'un-link a linked module or target (see yotta link --help for details '+
        'of linking). This command does not modify your module.json file.',
        'Remove or unlink a dependency without removing it from module.json.'
    )
    addParser('owners', 'owners', 'Add/remove/display the owners of a module or target.')
    addParser('licenses', 'licenses', 'List the licenses of the current module and its dependencies.')
    addParser('clean', 'clean', 'Remove files created by yotta and the build.')
    addParser('config', 'config', 'Display the target configuration info.')
    addParser('shrinkwrap', 'shrinkwrap', 'Create a yotta-shrinkwrap.json file to freeze dependency versions.')

    # short synonyms, subparser.choices is a dictionary, so use update() to
    # merge in the keys from another dictionary
    short_commands = {
                'up':subparser.choices['update'],
                'in':subparser.choices['install'],
                'un':subparser.choices['uninstall'],
                'ln':subparser.choices['link'],
                 'v':subparser.choices['version'],
                'ls':subparser.choices['list'],
                'rm':subparser.choices['remove'],
            'unlink':subparser.choices['remove'],
     'unlink-target':subparser.choices['remove'],
             'owner':subparser.choices['owners'],
              'lics':subparser.choices['licenses'],
               'who':subparser.choices['whoami'],
               'run':subparser.choices['start']
    }
    subparser.choices.update(short_commands)

    # split the args into those before and after any '--'
    # argument - subcommands get raw access to arguments following '--', and
    # may pass them on to (for example) the build tool being used
    split_args = splitList(sys.argv, '--')
    following_args = reduce(lambda x,y: x + ['--'] + y, split_args[1:], [])[1:]

    # complete all the things :)
    argcomplete.autocomplete(
         parser,
        exclude = list(short_commands.keys()) + ['-d', '--debug', '-v', '--verbose']
    )

    # when args are passed directly we need to strip off the program name
    # (hence [:1])
    args = parser.parse_args(split_args[0][1:])

    # set global arguments that are shared everywhere and never change
    globalconf.set('interactive', args.interactive)
    globalconf.set('plain', args.plain)

    # finally, do stuff!
    if 'command' not in args:
        parser.print_usage()
        sys.exit(0)

    try:
        status = args.command(args, following_args)
    except KeyboardInterrupt:
        logging.warning('interrupted')
        status = -1
    except Exception as e:
        logging.error(e)
        status = -1

    sys.exit(status or 0)
예제 #59
0
def parse_args():
    """Parse command line arguments."""
    try:
        import argparse
    except ImportError:
        if '--requirements' not in sys.argv:
            raise
        # install argparse without using constraints since pip may be too old to support them
        # not using the ansible-test requirements file since this install is for sys.executable rather than the delegated python (which may be different)
        # argparse has no special requirements, so upgrading pip is not required here
        raw_command(
            generate_pip_install(generate_pip_command(sys.executable),
                                 '',
                                 packages=['argparse'],
                                 use_constraints=False))
        import argparse

    try:
        import argcomplete
    except ImportError:
        argcomplete = None

    if argcomplete:
        epilog = 'Tab completion available using the "argcomplete" python package.'
    else:
        epilog = 'Install the "argcomplete" python package to enable tab completion.'

    def key_value_type(value):  # type: (str) -> t.Tuple[str, str]
        """Wrapper around key_value."""
        return key_value(argparse, value)

    parser = argparse.ArgumentParser(epilog=epilog)

    common = argparse.ArgumentParser(add_help=False)

    common.add_argument('-e',
                        '--explain',
                        action='store_true',
                        help='explain commands that would be executed')

    common.add_argument('-v',
                        '--verbose',
                        dest='verbosity',
                        action='count',
                        default=0,
                        help='display more output')

    common.add_argument('--color',
                        metavar='COLOR',
                        nargs='?',
                        help='generate color output: %(choices)s',
                        choices=('yes', 'no', 'auto'),
                        const='yes',
                        default='auto')

    common.add_argument('--debug',
                        action='store_true',
                        help='run ansible commands in debug mode')

    # noinspection PyTypeChecker
    common.add_argument(
        '--truncate',
        dest='truncate',
        metavar='COLUMNS',
        type=int,
        default=display.columns,
        help='truncate some long output (0=disabled) (default: auto)')

    common.add_argument('--redact',
                        dest='redact',
                        action='store_true',
                        default=True,
                        help='redact sensitive values in output')

    common.add_argument('--no-redact',
                        dest='redact',
                        action='store_false',
                        default=False,
                        help='show sensitive values in output')

    common.add_argument('--check-python',
                        choices=SUPPORTED_PYTHON_VERSIONS,
                        help=argparse.SUPPRESS)

    test = argparse.ArgumentParser(add_help=False, parents=[common])

    test.add_argument(
        'include',
        metavar='TARGET',
        nargs='*',
        help='test the specified target').completer = complete_target

    test.add_argument(
        '--include',
        metavar='TARGET',
        action='append',
        help='include the specified target').completer = complete_target

    test.add_argument(
        '--exclude',
        metavar='TARGET',
        action='append',
        help='exclude the specified target').completer = complete_target

    test.add_argument(
        '--require',
        metavar='TARGET',
        action='append',
        help='require the specified target').completer = complete_target

    test.add_argument('--coverage',
                      action='store_true',
                      help='analyze code coverage when running tests')

    test.add_argument('--coverage-label',
                      default='',
                      help='label to include in coverage output file names')

    test.add_argument('--coverage-check',
                      action='store_true',
                      help='only verify code coverage can be enabled')

    test.add_argument('--metadata', help=argparse.SUPPRESS)

    test.add_argument('--base-branch',
                      help='base branch used for change detection')

    add_changes(test, argparse)
    add_environments(test)

    integration = argparse.ArgumentParser(add_help=False, parents=[test])

    integration.add_argument('--python',
                             metavar='VERSION',
                             choices=SUPPORTED_PYTHON_VERSIONS + ('default', ),
                             help='python version: %s' %
                             ', '.join(SUPPORTED_PYTHON_VERSIONS))

    integration.add_argument(
        '--start-at', metavar='TARGET',
        help='start at the specified target').completer = complete_target

    integration.add_argument('--start-at-task',
                             metavar='TASK',
                             help='start at the specified task')

    integration.add_argument(
        '--tags',
        metavar='TAGS',
        help='only run plays and tasks tagged with these values')

    integration.add_argument(
        '--skip-tags',
        metavar='TAGS',
        help='only run plays and tasks whose tags do not match these values')

    integration.add_argument('--diff',
                             action='store_true',
                             help='show diff output')

    integration.add_argument('--allow-destructive',
                             action='store_true',
                             help='allow destructive tests')

    integration.add_argument('--allow-root',
                             action='store_true',
                             help='allow tests requiring root when not root')

    integration.add_argument(
        '--allow-disabled',
        action='store_true',
        help='allow tests which have been marked as disabled')

    integration.add_argument(
        '--allow-unstable',
        action='store_true',
        help='allow tests which have been marked as unstable')

    integration.add_argument(
        '--allow-unstable-changed',
        action='store_true',
        help=
        'allow tests which have been marked as unstable when focused changes are detected'
    )

    integration.add_argument(
        '--allow-unsupported',
        action='store_true',
        help='allow tests which have been marked as unsupported')

    integration.add_argument('--retry-on-error',
                             action='store_true',
                             help='retry failed test with increased verbosity')

    integration.add_argument('--continue-on-error',
                             action='store_true',
                             help='continue after failed test')

    integration.add_argument(
        '--debug-strategy',
        action='store_true',
        help='run test playbooks using the debug strategy')

    integration.add_argument('--changed-all-target',
                             metavar='TARGET',
                             default='all',
                             help='target to run when all tests are needed')

    integration.add_argument(
        '--changed-all-mode',
        metavar='MODE',
        choices=('default', 'include', 'exclude'),
        help='include/exclude behavior with --changed-all-target: %(choices)s')

    integration.add_argument(
        '--list-targets',
        action='store_true',
        help='list matching targets instead of running tests')

    integration.add_argument(
        '--no-temp-workdir',
        action='store_true',
        help=
        'do not run tests from a temporary directory (use only for verifying broken tests)'
    )

    integration.add_argument(
        '--no-temp-unicode',
        action='store_true',
        help=
        'avoid unicode characters in temporary directory (use only for verifying broken tests)'
    )

    subparsers = parser.add_subparsers(metavar='COMMAND')
    subparsers.required = True  # work-around for python 3 bug which makes subparsers optional

    posix_integration = subparsers.add_parser('integration',
                                              parents=[integration],
                                              help='posix integration tests')

    posix_integration.set_defaults(func=command_posix_integration,
                                   targets=walk_posix_integration_targets,
                                   config=PosixIntegrationConfig)

    add_extra_docker_options(posix_integration)
    add_httptester_options(posix_integration, argparse)

    network_integration = subparsers.add_parser(
        'network-integration',
        parents=[integration],
        help='network integration tests')

    network_integration.set_defaults(func=command_network_integration,
                                     targets=walk_network_integration_targets,
                                     config=NetworkIntegrationConfig)

    add_extra_docker_options(network_integration, integration=False)

    network_integration.add_argument(
        '--platform',
        metavar='PLATFORM',
        action='append',
        help='network platform/version').completer = complete_network_platform

    network_integration.add_argument(
        '--platform-collection',
        type=key_value_type,
        metavar='PLATFORM=COLLECTION',
        action='append',
        help='collection used to test platform'
    ).completer = complete_network_platform_collection

    network_integration.add_argument(
        '--platform-connection',
        type=key_value_type,
        metavar='PLATFORM=CONNECTION',
        action='append',
        help='connection used to test platform'
    ).completer = complete_network_platform_connection

    network_integration.add_argument('--inventory',
                                     metavar='PATH',
                                     help='path to inventory used for tests')

    network_integration.add_argument(
        '--testcase',
        metavar='TESTCASE',
        help='limit a test to a specified testcase'
    ).completer = complete_network_testcase

    windows_integration = subparsers.add_parser(
        'windows-integration',
        parents=[integration],
        help='windows integration tests')

    windows_integration.set_defaults(func=command_windows_integration,
                                     targets=walk_windows_integration_targets,
                                     config=WindowsIntegrationConfig)

    add_extra_docker_options(windows_integration, integration=False)
    add_httptester_options(windows_integration, argparse)

    windows_integration.add_argument(
        '--windows',
        metavar='VERSION',
        action='append',
        help='windows version').completer = complete_windows

    windows_integration.add_argument('--inventory',
                                     metavar='PATH',
                                     help='path to inventory used for tests')

    units = subparsers.add_parser('units', parents=[test], help='unit tests')

    units.set_defaults(func=command_units,
                       targets=walk_units_targets,
                       config=UnitsConfig)

    units.add_argument('--python',
                       metavar='VERSION',
                       choices=SUPPORTED_PYTHON_VERSIONS + ('default', ),
                       help='python version: %s' %
                       ', '.join(SUPPORTED_PYTHON_VERSIONS))

    units.add_argument('--collect-only',
                       action='store_true',
                       help='collect tests but do not execute them')

    # noinspection PyTypeChecker
    units.add_argument('--num-workers',
                       type=int,
                       help='number of workers to use (default: auto)')

    units.add_argument('--requirements-mode',
                       choices=('only', 'skip'),
                       help=argparse.SUPPRESS)

    add_extra_docker_options(units, integration=False)

    sanity = subparsers.add_parser('sanity',
                                   parents=[test],
                                   help='sanity tests')

    sanity.set_defaults(func=command_sanity,
                        targets=walk_sanity_targets,
                        config=SanityConfig)

    sanity.add_argument('--test',
                        metavar='TEST',
                        action='append',
                        choices=[test.name for test in sanity_get_tests()],
                        help='tests to run').completer = complete_sanity_test

    sanity.add_argument('--skip-test',
                        metavar='TEST',
                        action='append',
                        choices=[test.name for test in sanity_get_tests()],
                        help='tests to skip').completer = complete_sanity_test

    sanity.add_argument(
        '--allow-disabled',
        action='store_true',
        help='allow tests to run which are disabled by default')

    sanity.add_argument('--list-tests',
                        action='store_true',
                        help='list available tests')

    sanity.add_argument('--python',
                        metavar='VERSION',
                        choices=SUPPORTED_PYTHON_VERSIONS + ('default', ),
                        help='python version: %s' %
                        ', '.join(SUPPORTED_PYTHON_VERSIONS))

    sanity.add_argument('--enable-optional-errors',
                        action='store_true',
                        help='enable optional errors')

    add_lint(sanity)
    add_extra_docker_options(sanity, integration=False)

    shell = subparsers.add_parser('shell',
                                  parents=[common],
                                  help='open an interactive shell')

    shell.add_argument('--python',
                       metavar='VERSION',
                       choices=SUPPORTED_PYTHON_VERSIONS + ('default', ),
                       help='python version: %s' %
                       ', '.join(SUPPORTED_PYTHON_VERSIONS))

    shell.set_defaults(func=command_shell, config=ShellConfig)

    shell.add_argument('--raw',
                       action='store_true',
                       help='direct to shell with no setup')

    add_environments(shell)
    add_extra_docker_options(shell)
    add_httptester_options(shell, argparse)

    coverage_common = argparse.ArgumentParser(add_help=False, parents=[common])

    add_environments(coverage_common, isolated_delegation=False)

    coverage = subparsers.add_parser(
        'coverage', help='code coverage management and reporting')

    coverage_subparsers = coverage.add_subparsers(metavar='COMMAND')
    coverage_subparsers.required = True  # work-around for python 3 bug which makes subparsers optional

    add_coverage_analyze(coverage_subparsers, coverage_common)

    coverage_combine = coverage_subparsers.add_parser(
        'combine',
        parents=[coverage_common],
        help='combine coverage data and rewrite remote paths')

    coverage_combine.set_defaults(func=command_coverage_combine,
                                  config=CoverageConfig)

    add_extra_coverage_options(coverage_combine)

    coverage_erase = coverage_subparsers.add_parser(
        'erase', parents=[coverage_common], help='erase coverage data files')

    coverage_erase.set_defaults(func=command_coverage_erase,
                                config=CoverageConfig)

    coverage_report = coverage_subparsers.add_parser(
        'report',
        parents=[coverage_common],
        help='generate console coverage report')

    coverage_report.set_defaults(func=command_coverage_report,
                                 config=CoverageReportConfig)

    coverage_report.add_argument(
        '--show-missing',
        action='store_true',
        help='show line numbers of statements not executed')
    coverage_report.add_argument(
        '--include',
        metavar='PAT1,PAT2,...',
        help='include only files whose paths match one of these '
        'patterns. Accepts shell-style wildcards, which must be '
        'quoted.')
    coverage_report.add_argument(
        '--omit',
        metavar='PAT1,PAT2,...',
        help='omit files whose paths match one of these patterns. '
        'Accepts shell-style wildcards, which must be quoted.')

    add_extra_coverage_options(coverage_report)

    coverage_html = coverage_subparsers.add_parser(
        'html',
        parents=[coverage_common],
        help='generate html coverage report')

    coverage_html.set_defaults(func=command_coverage_html,
                               config=CoverageConfig)

    add_extra_coverage_options(coverage_html)

    coverage_xml = coverage_subparsers.add_parser(
        'xml', parents=[coverage_common], help='generate xml coverage report')

    coverage_xml.set_defaults(func=command_coverage_xml, config=CoverageConfig)

    add_extra_coverage_options(coverage_xml)

    env = subparsers.add_parser(
        'env',
        parents=[common],
        help='show information about the test environment')

    env.set_defaults(func=command_env, config=EnvConfig)

    env.add_argument('--show',
                     action='store_true',
                     help='show environment on stdout')

    env.add_argument('--dump',
                     action='store_true',
                     help='dump environment to disk')

    env.add_argument('--list-files',
                     action='store_true',
                     help='list files on stdout')

    # noinspection PyTypeChecker
    env.add_argument(
        '--timeout',
        type=int,
        metavar='MINUTES',
        help='timeout for future ansible-test commands (0 clears)')

    if argcomplete:
        argcomplete.autocomplete(parser,
                                 always_complete_options=False,
                                 validator=lambda i, k: True)

    args = parser.parse_args()

    if args.explain and not args.verbosity:
        args.verbosity = 1

    if args.color == 'yes':
        args.color = True
    elif args.color == 'no':
        args.color = False
    else:
        args.color = sys.stdout.isatty()

    return args
예제 #60
0
def rlaunch():

    m_description = 'This program launches one or more Rockets. A Rocket retrieves a job from the ' \
                    'central database and runs it. The "single-shot" option launches a single Rocket, ' \
                    'whereas the "rapidfire" option loops until all FireWorks are completed.'

    parser = ArgumentParser(description=m_description)
    subparsers = parser.add_subparsers(help='command', dest='command')
    single_parser = subparsers.add_parser('singleshot', help='launch a single Rocket')
    rapid_parser = subparsers.add_parser('rapidfire',
                                         help='launch multiple Rockets (loop until all FireWorks complete)')
    multi_parser = subparsers.add_parser('multi',
                                         help='launches multiple Rockets simultaneously')

    single_parser.add_argument('-f', '--fw_id', help='specific fw_id to run', default=None, type=int)
    single_parser.add_argument('--offline', help='run in offline mode (FW.json required)', action='store_true')
    single_parser.add_argument('--pdb', help='shortcut to invoke debugger on error', action='store_true')

    rapid_parser.add_argument('--nlaunches', help='num_launches (int or "infinite"; '
                                                  'default 0 is all jobs in DB)', default=0)
    rapid_parser.add_argument('--timeout', help='timeout (secs) after which to quit (default None)',
                              default=None, type=int)
    rapid_parser.add_argument('--max_loops', help='after this many sleep loops, quit even in '
                                                  'infinite nlaunches mode (default -1 is infinite loops)',
                              default=-1, type=int)
    rapid_parser.add_argument('--sleep', help='sleep time between loops (secs)', default=None,
                              type=int)
    rapid_parser.add_argument('--local_redirect', help="Redirect stdout and stderr to the launch directory",
                              action="store_true")

    multi_parser.add_argument('num_jobs', help='the number of jobs to run in parallel', type=int)
    multi_parser.add_argument('--nlaunches', help='number of FireWorks to run in series per '
                                                  'parallel job (int or "infinite"; default 0 is '
                                                  'all jobs in DB)',
                              default=0)
    multi_parser.add_argument('--sleep', help='sleep time between loops in infinite launch mode'
                                              '(secs)',
                              default=None, type=int)
    multi_parser.add_argument('--timeout', help='timeout (secs) after which to quit (default None)',
                              default=None, type=int)
    multi_parser.add_argument('--nodefile', help='nodefile name or environment variable name '
                                                 'containing the node file name (for populating'
                                                 ' FWData only)',
                              default=None, type=str)
    multi_parser.add_argument('--ppn', help='processors per node (for populating FWData only)',
                              default=1, type=int)
    multi_parser.add_argument('--exclude_current_node', help="Don't use the script launching node"
                                                             "as compute node",
                              action="store_true")
    multi_parser.add_argument('--local_redirect', help="Redirect stdout and stderr to the launch directory",
                              action="store_true")

    parser.add_argument('-l', '--launchpad_file', help='path to launchpad file')
    parser.add_argument('-w', '--fworker_file', help='path to fworker file')
    parser.add_argument('-c', '--config_dir', help='path to a directory containing the config file '
                                                   '(used if -l, -w unspecified)',
                        default=CONFIG_FILE_DIR)

    parser.add_argument('--loglvl', help='level to print log messages', default='INFO')
    parser.add_argument('-s', '--silencer', help='shortcut to mute log messages', action='store_true')

    try:
        import argcomplete
        argcomplete.autocomplete(parser)
        # This supports bash autocompletion. To enable this, pip install
        # argcomplete, activate global completion, or add
        #      eval "$(register-python-argcomplete rlaunch)"
        # into your .bash_profile or .bashrc
    except ImportError:
        pass

    args = parser.parse_args()

    signal.signal(signal.SIGINT, handle_interrupt)  # graceful exit on ^C

    if not args.launchpad_file and os.path.exists(os.path.join(args.config_dir, 'my_launchpad.yaml')):
        args.launchpad_file = os.path.join(args.config_dir, 'my_launchpad.yaml')
    elif not args.launchpad_file:
        args.launchpad_file = LAUNCHPAD_LOC

    if not args.fworker_file and os.path.exists(os.path.join(args.config_dir, 'my_fworker.yaml')):
        args.fworker_file = os.path.join(args.config_dir, 'my_fworker.yaml')
    elif not args.fworker_file:
        args.fworker_file = FWORKER_LOC

    args.loglvl = 'CRITICAL' if args.silencer else args.loglvl

    if args.command == 'singleshot' and args.offline:
        launchpad = None
    else:
        launchpad = LaunchPad.from_file(args.launchpad_file) if args.launchpad_file else LaunchPad(
            strm_lvl=args.loglvl)

    if args.fworker_file:
        fworker = FWorker.from_file(args.fworker_file)
    else:
        fworker = FWorker()

    # prime addr lookups
    _log = get_fw_logger("rlaunch", stream_level="INFO")
    _log.info("Hostname/IP lookup (this will take a few seconds)")
    get_my_host()
    get_my_ip()

    if args.command == 'rapidfire':
        rapidfire(launchpad, fworker=fworker, m_dir=None, nlaunches=args.nlaunches,
                  max_loops=args.max_loops, sleep_time=args.sleep, strm_lvl=args.loglvl,
                  timeout=args.timeout,local_redirect=args.local_redirect)
    elif args.command == 'multi':
        total_node_list = None
        if args.nodefile:
            if args.nodefile in os.environ:
                args.nodefile = os.environ[args.nodefile]
            with open(args.nodefile, 'r') as f:
                total_node_list = [line.strip() for line in f.readlines()]
        launch_multiprocess(launchpad, fworker, args.loglvl, args.nlaunches, args.num_jobs,
                            args.sleep, total_node_list, args.ppn, timeout=args.timeout,
                            exclude_current_node=args.exclude_current_node,
                            local_redirect=args.local_redirect)
    else:
        launch_rocket(launchpad, fworker, args.fw_id, args.loglvl, pdb_on_exception=args.pdb)