Ejemplo n.º 1
0

# fmt: off

args_description = [
    Cmd("template tpl", None, "manage config templates", [
        Cmd("list ls",
            list_template,
            "list config templates", [
                Arg("-d",
                    "--details",
                    action="store_true",
                    help="show the configs of the templates"),
            ],
            is_default=True),
        Cmd("describe", describe_template, "describe config template", [
            Arg("template_name", type=str, help="template name"),
        ]),
        Cmd("set", set_template, "set config template", [
            Arg("template_name", help="template name"),
            Arg("template_file",
                type=FileType("r"),
                help="config template file (.yaml)")
        ]),
        Cmd("remove rm", remove_templates, "remove config template",
            [Arg("template_name", help="template name")]),
    ])
]  # type: List[Any]

# fmt: on
Ejemplo n.º 2
0
def safe_rename(src, dst):
    i = 1
    if path.exists(dst):
        new_dst = dst + "_" + str(i).zfill(3)
        while path.exists(new_dst):
            i += 1
            new_dst = dst + "_" + str(i).zfill(3)
        rename(src, new_dst)
    else:
        rename(src, dst)


parser = ArgumentParser(description='process recorded mandarin sounds.')

parser.add_argument('-i', '--input-file', type=FileType('r'))
parser.add_argument('-n', '--name', type=str)

input_file = parser.parse_args().input_file
name = parser.parse_args().name

lst = input_file.read().splitlines()
names = []
for i in lst:
    if len(i) > 0:
        names.append(i.split(' ')[1])

call("sox recording_" + name + ".mp3 " + name + ".mp3 "
     "silence 1 .25 0.1% 1 0.1 0.1% : newfile : restart",
     shell=True)
def main():
    parser = ArgumentParser(
        epilog=f"{__license__} @ {__author__}",
        formatter_class=ArgumentDefaultsHelpFormatter,
        description=__doc__,
    )
    parser.add_argument(
        "-d",
        "--dummy-data",
        type=FileType(),
        help=
        f"""Use dummy values instead of connecting to an openstack instance. Usage
        values are calculated based on the configured existence. Toml files can be updated on the fly as they are read 
        every time a dummy-cloud function is called (functions of nested classes excluded).
        Take a look at the example file for an explanation {default_dummy_file}. Can also be provided via
        environment variable ${dummy_file_env_var}""",
    )
    parser.add_argument(
        "-w",
        "--dummy-weights",
        type=FileType(),
        help=
        f"""Use dummy weight endpoint instead of connecting to the api. Take a look at the
        example file for an explanation {default_dummy_weights_file}. Can also be provided via
        environment variable ${dummy_weights_file_env_var}""",
    )
    parser.add_argument(
        "--domain",
        default=[
            domain.strip()
            for domain in getenv(project_domain_env_var, ",".join(
                default_project_domains)).split(",") if domain
        ],
        type=str,
        nargs="*",
        help=
        f"""Only export usages of projects belonging to one of the given domains.
        Separate them via comma if passing via environment variable
        ${project_domain_env_var}. If no domains are specified all readable projects
        are exported.""",
    )
    parser.add_argument(
        "--domain-id",
        default=getenv(project_domain_id_env_var, "").strip(),
        help=
        f"""Only export usages of projects belonging to the domain identified by
        the given ID. Takes precedence over any specified domain and default values. Can
        also be set via ${project_domain_id_env_var}""",
    )
    parser.add_argument(
        "--simple-vm-id",
        default=getenv(simple_vm_project_id_env_var, "").strip(),
        type=str,
        help=
        f"""The ID of the Openstack project, that hosts the SimpleVm projects. 
        Can also be set vis ${simple_vm_project_id_env_var}""",
    )
    parser.add_argument(
        "--simple-vm-tag",
        default=getenv(simple_vm_project_name_tag_env_var,
                       "project_name").strip(),
        type=str,
        help=
        f"""The metadata of the Openstack project, that hosts the SimpleVm projects. It is used
        to differentiate the simple vm projects, default: project_name
        Can also be set vis ${simple_vm_project_name_tag_env_var}""",
    )
    parser.add_argument(
        "--weight-update-frequency",
        type=int,
        default=int(getenv(weights_update_frequency_env_var, 10)),
        help=
        f"""The frequency of checking if there is a weight update. Is  a multiple of the update interval length
        . Defaults to the value of environment variable ${weights_update_frequency_env_var} or 10""",
    )
    parser.add_argument(
        "--weight-update-endpoint",
        type=str,
        default=getenv(weights_update_endpoint_env_var, "").strip(),
        help=f"""The endpoint url where the current weights can be updated
        . Defaults to the value of environment variable ${weights_update_endpoint_env_var} or will be left blank""",
    )
    parser.add_argument(
        "--start-date-endpoint",
        type=nullable_string,
        default=getenv(start_date_endpoint_env_var, "").strip(),
        help=f"""The endpoint url where the start date can be requested.
        If defined, requested date takes precedence over all other start date arguments.
        Defaults to the value of environment variable ${start_date_endpoint_env_var} or will be left blank""",
    )
    parser.add_argument(
        "-s",
        "--start",
        type=valid_date,
        default=getenv(start_date_env_var, datetime.today()),
        help=
        f"""Beginning time of stats (YYYY-MM-DD). If set the value of environment
        variable ${start_date_env_var} is used. Uses maya for parsing.""",
    )
    parser.add_argument(
        "-i",
        "--update-interval",
        type=int,
        default=int(getenv(update_interval_env_var, 30)),
        help=
        f"""Time to sleep between intervals, in case the calls cause to much load on
        your openstack instance. Defaults to the value of environment variable
        ${update_interval_env_var} or 300 (in seconds)""",
    )
    parser.add_argument("-p",
                        "--port",
                        type=int,
                        default=8080,
                        help="Port to provide metrics on")
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        default=convert_verbose(),
                        help="Activate logging debug level")
    args = parser.parse_args()
    if args.verbose:
        logger.setLevel(logging.DEBUG)
        logger.debug("Debug mode activated.")
    if args.start_date_endpoint:
        try:
            start_date_response = requests.get(args.start_date_endpoint)
            start_date_response = start_date_response.json()
            args.start = maya.when(
                start_date_response[0]["start_date"]).datetime()
        except Exception as e:
            logger.exception(
                f"Exception when getting start date from endpoint. Exception message: {e}. "
                f"Traceback following:\n")
            return 1
    if args.dummy_data:
        logger.info("Using dummy export with data from %s",
                    args.dummy_data.name)
        try:
            exporter = OpenstackExporter(domains=args.domain,
                                         stats_start=args.start,
                                         domain_id=args.domain_id,
                                         simple_vm_project=args.simple_vm_id,
                                         simple_vm_tag=args.simple_vm_tag,
                                         dummy_file=args.dummy_data)
        except ValueError as e:
            return 1
    elif getenv(dummy_file_env_var):
        logger.info("Using dummy export with data from %s",
                    getenv(dummy_file_env_var))
        # if the default dummy data have been used we need to open them, argparse
        # hasn't done this for us since the default value has not been a string
        try:
            with open(getenv(dummy_file_env_var)) as file:
                exporter = OpenstackExporter(
                    domains=args.domain,
                    stats_start=args.start,
                    domain_id=args.domain_id,
                    simple_vm_project=args.simple_vm_id,
                    simple_vm_tag=args.simple_vm_tag,
                    dummy_file=file)
        except ValueError as e:
            return 1
    else:
        try:
            logger.info("Using regular openstack exporter")
            exporter = OpenstackExporter(domains=args.domain,
                                         stats_start=args.start,
                                         domain_id=args.domain_id,
                                         simple_vm_project=args.simple_vm_id,
                                         simple_vm_tag=args.simple_vm_tag)
        except ValueError as e:
            return 1
    logger.info(f"Beginning to serve metrics on port {args.port}")
    prometheus_client.start_http_server(args.port)
    laps = args.weight_update_frequency
    if args.dummy_weights or getenv(dummy_weights_file_env_var):
        args.weight_update_endpoint = "dummy-endpoint"
    while True:
        if args.weight_update_endpoint != "":
            if laps >= args.weight_update_frequency:
                try:
                    if args.dummy_weights:
                        weight_response = get_dummy_weights(args.dummy_weights)
                    elif getenv(dummy_weights_file_env_var):
                        with open(getenv(dummy_weights_file_env_var)) as file:
                            weight_response = get_dummy_weights(file)
                    else:
                        weight_response = requests.get(
                            args.weight_update_endpoint)
                    current_weights = {
                        x['resource_set_timestamp']: {
                            'memory_mb':
                            {y['value']: y['weight']
                             for y in x['memory_mb']},
                            'vcpus':
                            {y['value']: y['weight']
                             for y in x['vcpus']}
                        }
                        for x in weight_response.json()
                    }
                    logger.debug("Updated credits weights, new weights: " +
                                 str(current_weights))
                    exporter.update_weights(current_weights)
                except Exception as e:
                    logger.exception(
                        f"Received exception {e} while trying to update the credit weights, check if credit endpoint {args.weight_update_endpoint}"
                        f" is accessible or contact the denbi team to check if the weights are set correctly. Traceback following."
                    )
                finally:
                    laps = 0
            else:
                laps += 1
        try:
            sleep(args.update_interval)
            exporter.update()
        except KeyboardInterrupt:
            logger.info("Received Ctrl-c, exiting.")
            return 0
        except Exception as e:
            logger.exception(
                f"Received unexpected exception {e}. Traceback following.")
            return 1
Ejemplo n.º 4
0
def main():
    language_count = {}
    descr = 'Validate Datacite / OpenAire Schemas.'
    parser = ArgumentParser(description=descr)
    parser.add_argument('--schema', type=str, help='XSD file', required=True)
    parser.add_argument('--xmlfile', type=str, help='input file')
    parser.add_argument('--baseurl',
                        type=str,
                        help='url',
                        default="http://127.0.0.1:5000")
    parser.add_argument('--token', type=FileType('r'))
    parser.add_argument('--prefix',
                        type=str,
                        help='MetaData prefix',
                        choices=[
                            'oai_openaire', 'oai_datacite', 'oai_datacite3',
                            'oai_datacite4', 'marcxml'
                        ],
                        default='oai_openaire')
    parser.add_argument('--verbose', action="store_true")
    parser.add_argument('--print_xml',
                        action="store_true",
                        help="print xml documents")
    parser.add_argument('--lang',
                        help="aggregate languages",
                        action="store_true")
    parser.add_argument('--validate',
                        help="validate oai_pmh",
                        action="store_true")
    args = parser.parse_args()
    TOKEN = "".join(args.token.readlines())
    schema = load_xsd_file(args.schema)
    prefix = args.prefix
    print_xml = args.print_xml
    print("i,community,name,errtype,element")
    if args.xmlfile is not None:
        doc = load_xml_file(args.xmlfile)
        result = validate_xml(schema, doc, name=args.xmlfile)
        print(json.dumps(result))
    else:
        baseurl = args.baseurl
        i = 0
        for record in iterate_records(baseurl):
            i += 1
            community = record.get('metadata', {}).get('community', '')
            language = record.get('metadata', {}).get('language', '')
            if language not in language_count:
                language_count[language] = 0
            language_count[language] += 1
            if args.validate:
                try:
                    oai_pmh = get_oai_pmh(baseurl, record['id'], prefix)
                    resource = get_data_cite_resource(oai_pmh)
                    validation = validate_xml(schema,
                                              resource,
                                              community=community,
                                              name=record['id'])
                    if print_xml:
                        print_validation_result(resource, validation)
                    if not validation['errtype']:
                        validation['errtype'] = ''
                except Exception as e:
                    validation = {
                        'community': community,
                        'name': record['id'],
                        'errtype': str(e).replace(',', ''),
                        'element': ''
                    }
                    print("{i},{community},{name},{errtype},{element}".format(
                        i=i, **validation))

    if args.lang:
        import pprint
        pprint.pprint(language_count, indent=4)
    print("number of records: {0}".format(i))
Ejemplo n.º 5
0
    def _add_smac_options(self) -> None:
        """Add SMAC Options"""
        self.smac_parser = SMACArgumentParser(
            formatter_class=ConfigurableHelpFormatter, add_help=False)
        smac_opts = self.smac_parser.add_argument_group("SMAC Options")
        smac_opts.add_argument(
            "--abort-on-first-run-crash",
            "--abort_on_first_run_crash",
            dest='abort_on_first_run_crash',
            default=True,
            type=truthy,
            help="If true, *SMAC* will abort if the first run of "
            "the target algorithm crashes.")
        smac_opts.add_argument(
            "--limit-resources",
            "--limit_resources",
            dest='limit_resources',
            default=True,
            type=truthy,
            help=
            "If true, *SMAC* will use pynisher to limit time and memory for "
            "the target algorithm. Allows SMAC to use all resources available. "
            "Applicable only to func TAEs. Set to 'True' by default. (Use with caution!)"
        )

        smac_opts.add_argument(
            "--minr",
            "--minR",
            dest='minR',
            default=1,
            type=int,
            help="[dev] Minimum number of calls per configuration.")
        smac_opts.add_argument(
            "--maxr",
            "--maxR",
            dest='maxR',
            default=2000,
            type=int,
            help="[dev] Maximum number of calls per configuration.")
        self.output_dir_arg = \
            smac_opts.add_argument("--output-dir", "--output_dir", dest='output_dir',
                                   type=str, action=ProcessOutputDirAction,
                                   default="smac3-output_%s" % (
                                       datetime.datetime.fromtimestamp(
                                           time.time()).strftime(
                                           '%Y-%m-%d_%H:%M:%S_%f')),
                                   help="Specifies the output-directory for all emerging "
                                        "files, such as logging and results.")
        smac_opts.add_argument(
            "--input-psmac-dirs",
            "--input_psmac_dirs",
            dest='input_psmac_dirs',
            default=None,
            help="For parallel SMAC, multiple output-directories "
            "are used.")  # TODO: type (list of strings? --> str, nargs=*)
        smac_opts.add_argument("--shared-model",
                               "--shared_model",
                               dest='shared_model',
                               default=False,
                               type=truthy,
                               help="Whether to run SMAC in parallel mode.")
        smac_opts.add_argument(
            "--random-configuration-chooser",
            "--random_configuration_chooser",
            dest="random_configuration_chooser",
            default=None,
            type=FileType('r'),
            action=ParseRandomConfigurationChooserAction,
            help="[dev] path to a python module containing a class"
            "`RandomConfigurationChooserImpl` implementing"
            "the interface of `RandomConfigurationChooser`")
        smac_opts.add_argument(
            "--hydra-iterations",
            "--hydra_iterations",
            dest="hydra_iterations",
            default=3,
            type=int,
            help=
            "[dev] number of hydra iterations. Only active if mode is set to Hydra"
        )
        smac_opts.add_argument(
            "--use-ta-time",
            "--use_ta_time",
            dest="use_ta_time",
            default=False,
            type=truthy,
            help="[dev] Instead of measuring SMAC's wallclock time, "
            "only consider time reported by the target algorithm (ta).")

        # Hyperparameters
        smac_opts.add_argument(
            "--always-race-default",
            "--always_race_default",
            dest='always_race_default',
            default=False,
            type=truthy,
            help="[dev] Race new incumbents always against default "
            "configuration.")
        smac_opts.add_argument("--intensification-percentage",
                               "--intensification_percentage",
                               dest='intensification_percentage',
                               default=0.5,
                               type=float,
                               help="[dev] The fraction of time to be used on "
                               "intensification (versus choice of next "
                               "Configurations).")
        smac_opts.add_argument(
            "--transform_y",
            "--transform-y",
            dest='transform_y',
            choices=["NONE", "LOG", "LOGS", "INVS"],
            default="NONE",
            help="[dev] Transform all observed cost values"
            " via log-transformations or inverse scaling."
            " The subfix \"s\" indicates that SMAC scales the"
            " y-values accordingly to apply the transformation.")

        # RF Hyperparameters
        smac_opts.add_argument(
            "--rf_num_trees",
            "--rf-num-trees",
            dest='rf_num_trees',
            default=N_TREES,
            type=int,
            help="[dev] Number of trees in the random forest (> 1).")
        smac_opts.add_argument("--rf_do_bootstrapping",
                               "--rf-do-bootstrapping",
                               dest='rf_do_bootstrapping',
                               default=True,
                               type=bool,
                               help="[dev] Use bootstraping in random forest.")
        smac_opts.add_argument(
            "--rf_ratio_features",
            "--rf-ratio-features",
            dest='rf_ratio_features',
            default=5. / 6.,
            type=float,
            help="[dev] Ratio of sampled features in each split ([0.,1.]).")
        smac_opts.add_argument(
            "--rf_min_samples_split",
            "--rf-min-samples-split",
            dest='rf_min_samples_split',
            default=3,
            type=int,
            help="[dev] Minimum number of samples"
            " to split for building a tree in the random forest.")
        smac_opts.add_argument(
            "--rf_min_samples_leaf",
            "--rf-min-samples-leaf",
            dest='rf_min_samples_leaf',
            default=3,
            type=int,
            help="[dev] Minimum required number of"
            " samples in each leaf of a tree in the random forest.")
        smac_opts.add_argument(
            "--rf_max_depth",
            "--rf-max-depth",
            dest='rf_max_depth',
            default=20,
            type=int,
            help="[dev] Maximum depth of each tree in the random forest.")
        # AcquisitionOptimizer SLS
        smac_opts.add_argument(
            "--sls_n_steps_plateau_walk",
            "--sls-n-steps-plateau-walk",
            dest='sls_n_steps_plateau_walk',
            default=10,
            type=int,
            help="[dev] Maximum number of steps on plateaus during "
            "the optimization of the acquisition function.")
        smac_opts.add_argument(
            "--sls_max_steps",
            "--sls-max-steps",
            dest='sls_max_steps',
            default=None,
            type=int,
            help="[dev] Maximum number of local search steps in one iteration"
            " during the optimization of the acquisition function.")
        smac_opts.add_argument(
            "--acq_opt_challengers",
            "--acq-opt-challengers",
            dest='acq_opt_challengers',
            default=5000,
            type=int,
            help="[dev] Number of challengers returned by acquisition function"
            " optimization. Also influences the number of randomly sampled"
            " configurations to optimized the acquisition function")

        # Intensification
        smac_opts.add_argument(
            "--intens_adaptive_capping_slackfactor",
            "--intens-adaptive-capping-slackfactork",
            dest='intens_adaptive_capping_slackfactor',
            default=1.2,
            type=float,
            help=
            "[dev] Slack factor of adpative capping (factor * adpative cutoff)."
            " Only active if obj is runtime."
            " If set to very large number it practically deactivates adaptive capping."
        )
        smac_opts.add_argument(
            "--intens_min_chall",
            "--intens-min-chall",
            dest='intens_min_chall',
            default=2,
            type=int,
            help="[dev] Minimal number of challengers to be"
            " considered in each intensification run (> 1)."
            " Set to 1 and in combination with very small intensification-percentage."
            " it will deactivate randomly sampled configurations"
            " (and hence, extrapolation of random forest will be an issue.)")
        smac_opts.add_argument(
            "--rand_prob",
            "--rand-prob",
            dest='rand_prob',
            default=0.5,
            type=float,
            help="[dev] probablity to run a random configuration"
            " instead of configuration optimized on the acquisition function")
        self.parser.add_parser(self.smac_parser)
        self.smac_cmd_actions, self.smac_cmd_translations = CMDReader._extract_action_info(
            self.smac_parser._actions)
Ejemplo n.º 6
0
def main():
    """standard main function"""
    # standard options
    nWorkers = 8  # was 8
    blocksize = 10000

    parser = ArgumentParser(prog='css-extract',
                            description='spike extraction from .ncs files',
                            epilog='Johannes Niediek ([email protected])')
    parser.add_argument('--files',
                        nargs='+',
                        help='.ncs files to be extracted')
    parser.add_argument('--start', type=int, help='start index for extraction')
    parser.add_argument('--stop', type=int, help='stop index for extraction')
    parser.add_argument('--jobs',
                        nargs=1,
                        help='job file contains one filename per row')
    parser.add_argument('--matfile',
                        nargs=1,
                        help='extract data from a matlab file')
    parser.add_argument('--i16file',
                        nargs=1,
                        help='extract data from a flat binary file')
    parser.add_argument(
        '--i16filesr',
        nargs=1,
        help='sampling rate for flat binary file (requires i16file)')
    parser.add_argument('--destination',
                        nargs=1,
                        help='folder where spikes should be saved')
    parser.add_argument('--refscheme',
                        nargs=1,
                        type=FileType(mode='r'),
                        help='scheme for re-referencing')
    args = parser.parse_args()

    if (args.files is None) and (args.matfile is None) and\
            (args.jobs is None) and (args.i16file is None):
        parser.print_help()
        print('Supply either files or jobs or matfile or binary file.')
        return

    if args.destination is not None:
        destination = args.destination[0]
    else:
        destination = ''

    # special case for a matlab file
    if args.matfile is not None:
        jname = os.path.splitext(os.path.basename(args.matfile[0]))[0]
        jobs = [{
            'name': jname,
            'filename': args.matfile[0],
            'is_matfile': True,
            'count': 0,
            'destination': destination
        }]
        mp_extract(jobs, 1)
        return

    # special case for I16 file
    #if args.i16file is not None:
    #jname = os.path.splitext(os.path.basename(args.i16file[0]))[0]
    #jobs = [{'name': jname,
    #'filename': args.i16file[0],
    #'is_i16file': True,
    #'count': 0,
    #'destination': destination}]
    #mp_extract(jobs, 1)
    #return

    # generate file list
    if args.jobs:  # if we give a text file of mutiple files
        with open(args.jobs[0], 'r') as f:
            files = [a.strip() for a in f.readlines()]
        f.close()
        print('Read jobs from ' + args.jobs[0])
    elif args.i16file:
        files = args.i16file
    else:
        files = args.files

    print(files)

    if files[0] is None:
        print('Specify files!')
        return

    # construct the jobs
    jobs = []

    references = None
    if args.refscheme:
        import csv
        reader = csv.reader(args.refscheme[0], delimiter=';')
        references = {line[0]: line[1] for line in reader}

    for f in files:
        if args.start:
            start = args.start
        else:
            start = 0
        nrecs = 1000000  # cheat. later look up length of file here if not ncs file
        #nrecs = get_nrecs(f) # get length of file
        if args.stop:
            stop = min(args.stop, nrecs)
        else:
            stop = nrecs

        if stop % blocksize > blocksize / 2:  # dont try to read past end of file
            laststart = stop - blocksize
        else:
            laststart = stop

        if args.i16filesr:
            i16filesr = args.i16filesr
        else:
            i16filesr = 20000

        starts = range(start, laststart, blocksize)
        stops = starts[1:] + [stop]
        name = os.path.splitext(os.path.basename(f))[0]
        if references is not None:
            reference = references[f]
            print('{} (re-referenced to {})'.format(f, reference))

        else:
            reference = None
            print(name)

        for i in range(len(starts)):
            jdict = {
                'name': name,
                'filename': f,
                'start': starts[i],
                'stop': stops[i],
                'count': i,
                'destination': destination,
                'reference': reference,
                'i16filesr': i16filesr
            }

            jobs.append(jdict)

    mp_extract(jobs, nWorkers)  # recursively go through all these blocks
Ejemplo n.º 7
0
def main():
    """Main routine"""
    debug = False
    try:
        argparser = ArgumentParser(description=modules[__name__].__doc__)
        argparser.add_argument('device',
                               nargs='?',
                               default='ftdi:///?',
                               help='serial port device name')
        argparser.add_argument('-x',
                               '--hexdump',
                               action='store_true',
                               help='dump EEPROM content as ASCII')
        argparser.add_argument('-X',
                               '--hexblock',
                               type=int,
                               help='dump EEPROM as indented hexa blocks')
        argparser.add_argument('-i',
                               '--input',
                               type=FileType('rt'),
                               help='input ini file to load EEPROM content')
        argparser.add_argument('-l',
                               '--load',
                               default='all',
                               choices=('all', 'raw', 'values'),
                               help='section(s) to load from input file')
        argparser.add_argument('-o',
                               '--output',
                               type=FileType('wt'),
                               help='output ini file to save EEPROM content')
        argparser.add_argument('-s',
                               '--serial-number',
                               help='set serial number')
        argparser.add_argument('-m',
                               '--manufacturer',
                               help='set manufacturer name')
        argparser.add_argument('-p', '--product', help='set product name')
        argparser.add_argument('-c',
                               '--config',
                               action='append',
                               help='change/configure a property '
                               'as key=value pair')
        argparser.add_argument('-e',
                               '--erase',
                               action='store_true',
                               help='erase the whole EEPROM content')
        argparser.add_argument('-u',
                               '--update',
                               action='store_true',
                               help='perform actual update, use w/ care')
        argparser.add_argument('-P',
                               '--vidpid',
                               action='append',
                               help='specify a custom VID:PID device ID, '
                               'may be repeated')
        argparser.add_argument('-V',
                               '--virtual',
                               type=FileType('r'),
                               help='use a virtual device, specified as YaML')
        argparser.add_argument('-v',
                               '--verbose',
                               action='count',
                               default=0,
                               help='increase verbosity')
        argparser.add_argument('-d',
                               '--debug',
                               action='store_true',
                               help='enable debug mode')
        args = argparser.parse_args()
        debug = args.debug

        if not args.device:
            argparser.error('Serial device not specified')

        loglevel = max(DEBUG, ERROR - (10 * args.verbose))
        loglevel = min(ERROR, loglevel)
        if debug:
            formatter = Formatter(
                '%(asctime)s.%(msecs)03d %(name)-20s '
                '%(message)s', '%H:%M:%S')
        else:
            formatter = Formatter('%(message)s')
        FtdiLogger.set_formatter(formatter)
        FtdiLogger.set_level(loglevel)
        FtdiLogger.log.addHandler(StreamHandler(stderr))

        if args.virtual:
            from pyftdi.usbtools import UsbTools
            # Force PyUSB to use PyFtdi test framework for USB backends
            UsbTools.BACKENDS = ('pyftdi.tests.backend.usbvirt', )
            # Ensure the virtual backend can be found and is loaded
            backend = UsbTools.find_backend()
            loader = backend.create_loader()()
            loader.load(args.virtual)

        try:
            add_custom_devices(Ftdi, args.vidpid)
        except ValueError as exc:
            argparser.error(str(exc))

        eeprom = FtdiEeprom()
        eeprom.open(args.device)
        if args.erase:
            eeprom.erase()
        if args.input:
            eeprom.load_config(args.input, args.load)
        if args.serial_number:
            eeprom.set_serial_number(args.serial_number)
        if args.manufacturer:
            eeprom.set_manufacturer_name(args.manufacturer)
        if args.product:
            eeprom.set_product_name(args.product)
        for conf in args.config or []:
            if conf == '?':
                helpstr = ', '.join(sorted(eeprom.properties))
                print(
                    fill(helpstr, initial_indent='  ', subsequent_indent='  '))
                exit(1)
            for sep in ':=':
                if sep in conf:
                    name, value = conf.split(sep, 1)
                    if not value:
                        argparser.error('Configuration %s without value' %
                                        conf)
                    helpio = StringIO()
                    eeprom.set_property(name, value, helpio)
                    helpstr = helpio.getvalue()
                    if helpstr:
                        print(
                            fill(helpstr,
                                 initial_indent='  ',
                                 subsequent_indent='  '))
                        exit(1)
                    break
            else:
                argparser.error('Missing name:value separator in %s' % conf)
        if args.hexdump:
            print(hexdump(eeprom.data))
        if args.hexblock is not None:
            indent = ' ' * args.hexblock
            for pos in range(0, len(eeprom.data), 16):
                hexa = ' '.join(
                    ['%02x' % x for x in eeprom.data[pos:pos + 16]])
                print(indent, hexa, sep='')
        if args.update:
            if eeprom.commit(False):
                eeprom.reset_device()
        if args.verbose > 0:
            eeprom.dump_config()
        if args.output:
            eeprom.save_config(args.output)

    except (ImportError, IOError, NotImplementedError, ValueError) as exc:
        print('\nError: %s' % exc, file=stderr)
        if debug:
            print(format_exc(chain=False), file=stderr)
        exit(1)
    except KeyboardInterrupt:
        exit(2)
#!/usr/bin/env python3

import re
import json
from argparse import ArgumentParser, FileType
from jinja2 import Environment

p = ArgumentParser()
p.add_argument('template',
               type=FileType(),
               help='path to index.md jinja template')
p.add_argument('docsjson', type=FileType(), help='path to docs.json')
args = p.parse_args()

env = Environment()
docs = json.load(args.docsjson)
tmpl = env.from_string(args.template.read())


def highlight_symbols(value):
    return re.sub(r'(#t)', r'`\1`', value)


def func(name):
    data = docs[name]
    return tmpl_func.render(signature=data['Signature'],
                            returns=data['Returns'],
                            example=data['Example'])


env.globals['func'] = func
Ejemplo n.º 9
0
def main(argv=None):
    '''Command line options.'''

    global DEBUG

    if argv is None:
        argv = sys.argv
    else:
        sys.argv.extend(argv)

    program_name = os.path.basename(sys.argv[0])
    program_version = "v%s" % __version__
    program_build_date = str(__updated__)
    program_version_message = '%%(prog)s %s (%s)' % (program_version,
                                                     program_build_date)
    program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
    program_license = '''%s

  Created by user_name on %s.
  Copyright 2013 organization_name. All rights reserved.

  Licensed under the Apache License 2.0
  http://www.apache.org/licenses/LICENSE-2.0

  Distributed on an "AS IS" basis without warranties
  or conditions of any kind, either express or implied.

USAGE
''' % (program_shortdesc, str(__date__))

    verbose = False
    DEBUG = False
    with_offline_tweets = False

    try:
        # Setup argument parser
        parser = ArgumentParser(description=program_license,
                                formatter_class=RawDescriptionHelpFormatter)
        parser.add_argument("-v",
                            "--verbose",
                            action="store_true",
                            help="set verbosity level [default: %(default)s]")
        parser.add_argument("-d",
                            "--debug",
                            action="store_true",
                            help="produce debug output [default: %(default)s]")
        parser.add_argument(
            "-o",
            "--with-offline-tweets",
            action="store_true",
            help="use offline tweet database [default: %(default)s]")
        parser.add_argument(
            "-r",
            "--run",
            action="store_true",
            help="run without waiting for browser [default: %(default)s]")
        parser.add_argument("-V",
                            "--version",
                            action="version",
                            version=program_version_message)
        parser.add_argument("-p",
                            "--port",
                            type=int,
                            default=7737,
                            metavar="N",
                            help="set port to listen on "
                            "[default: %(default)s]")
        parser.add_argument("-s",
                            "--speed",
                            type=int,
                            default=100000,
                            metavar="N",
                            help="set rule engine speed "
                            "[default: %(default)s]")
        parser.add_argument(
            "-b",
            "--begin",
            type=str,
            default='',
            metavar="yyyy-MM-dd[:HH[:mm[:ss]]]",
            help="set begin date and time [default: first tweet]")
        parser.add_argument("-e",
                            "--end",
                            type=str,
                            default='',
                            metavar="yyyy-MM-dd[:HH[:mm[:ss]]]",
                            help="set end date [default: last tweet]")
        parser.add_argument("infile",
                            nargs="?",
                            type=FileType("r"),
                            default=sys.stdin,
                            help="file containing event "
                            "messages [default: %(default)s]")

        # Process arguments
        args = parser.parse_args()

        verbose = args.verbose
        DEBUG = args.debug  # pylint: disable=W0603
        with_offline_tweets = args.with_offline_tweets
        port = args.port
        speed = args.speed
        begin_time = str2timefloat.ds2tf(args.begin)
        end_time = str2timefloat.ds2tf(args.end)
        infile = args.infile

        if verbose > 0:
            logging.basicConfig(level=logging.INFO)

        if DEBUG > 0:
            logging.basicConfig(level=logging.DEBUG)

            # if with_offline_tweets > 0:
            import dboffline as dbconnect
            logging.info("Using offline tweet database")
        # else:
        # import dbconnect
        # logging.info("Using tweets from online database")

        logging.info("noDUI.py: Verbosity level %s.", verbose)
        logging.info("noDUI.py: Running %s, output via port %s.", infile.name,
                     port)

        # Command line parameter processing done, now the real work starts.

        # 1. Connect to the database:
        # TODO: everything's currently hardcoded. Make this more flexible,
        # e.g. by reading settings.ini.
        # try:
        # dbconnect.connect_to_db('130.89.10.35', 'antwan', 'batatweets',
        # 'anton_tweets')
        # except Exception as ex:
        # logging.error("Cannot connect to database")
        # return 1

        # 2. Try to see if we can parse the input file:
        rengine.load_file_stream(infile)

        # 3. Start the server component of the OUI:
        # Tweetprocessor starts a HTTP server that runs forever,
        # so it needs its own thread:
        tweetprocessor_thread = threading.Thread(
            target=tweetprocessor.process_tweets,
            args=[port],
            name="tweetprocessor_thread")
        tweetprocessor_thread.start()
        logging.info("Server started.")

        # 4. Start the rule engine:
        # TODO: this needs an observer
        produce_function = tweetprocessor.get_produce_function()
        threadsync_event = tweetprocessor.EVENT
        if args.run:
            threadsync_event = None
        result = rengine.start_rule_engine(start_time=begin_time,
                                           stop_time=end_time,
                                           speed=speed,
                                           produce=produce_function,
                                           threadsync_event=threadsync_event)
        if result:
            logging.error(result)
        else:
            logging.info("Control handed to rule engine")

        # 5. Wait for rule engine to finish (this can be interrupted with CTRL-C):
        while (rengine.engine_thread is not None
               and rengine.engine_thread.is_alive()):
            logging.debug("Rule engine still active")
            rengine.engine_thread.join(0.5)

        # 6. Rule engine has finished. Shut down tweetprocessor:
        cleanup("rule engine stopped")
        return 0

    except KeyboardInterrupt:
        # This exception is raised upon receiving CTRL-C or SIGINT
        cleanup("CTRL-C or SIGINT received")
        return 0
    except Exception as ex:
        if DEBUG or TESTRUN:
            raise (ex)
        indent = len(program_name) * " "
        sys.stderr.write(program_name + ": " + repr(ex) + "\n")
        sys.stderr.write(indent + "  for help use --help")
        return 2
Ejemplo n.º 10
0
        key = string
        arguments = ()

    class_object = generators.get(key)
    if not class_object:
        raise argparse.ArgumentTypeError('Invalid generator: '
                                         '{}; choose one from: {}'.format(key,
                                                                          available_generators_keys_list()))

    try:
        return class_object(*arguments)
    except Exception as err:
        raise argparse.ArgumentTypeError('An error while evaluating '
                                         'the expression "{}": {}'.format(string,
                                                                          err))


if __name__ == '__main__':
    parser = ArgumentParser()
    parser.add_argument('specification', type=FileType('r'))
    parser.add_argument('out_directory', type=str)
    parser.add_argument('--update', '-u', action='store_true', default=False)

    parser.add_argument('-g', '--add-generator', dest='generators',
                        # choices=available_generators_keys_list(),
                        action='append',
                        type=generator_keys_to_modules,
                        required=True)

    main(parser.parse_args())
Ejemplo n.º 11
0
def parse_cli():
    parser = ArgumentParser(
        description='** RouterOS Backup Tools by BigNerd95 **')
    subparser = parser.add_subparsers(dest='subparser_name')

    infoParser = subparser.add_parser('info', help='Backup info')
    infoParser.add_argument('-i',
                            '--input',
                            required=True,
                            metavar='INPUT_FILE',
                            type=FileType('rb'))

    decryptParser = subparser.add_parser('decrypt', help='Decrypt backup')
    decryptParser.add_argument('-i',
                               '--input',
                               required=True,
                               metavar='INPUT_FILE',
                               type=FileType('rb'))
    decryptParser.add_argument('-o',
                               '--output',
                               required=True,
                               metavar='OUTPUT_FILE',
                               type=FileType('wb'))
    decryptParser.add_argument('-p',
                               '--password',
                               required=True,
                               metavar='PASSWORD')

    encryptParser = subparser.add_parser('encrypt', help='Encrypt backup')
    encryptParser.add_argument('-i',
                               '--input',
                               required=True,
                               metavar='INPUT_FILE',
                               type=FileType('rb'))
    encryptParser.add_argument('-o',
                               '--output',
                               required=True,
                               metavar='OUTPUT_FILE',
                               type=FileType('wb'))
    encryptParser.add_argument('-p',
                               '--password',
                               required=True,
                               metavar='PASSWORD')

    recoverParser = subparser.add_parser('recover',
                                         help='Recover backup password')
    recoverParser.add_argument('-i',
                               '--input',
                               required=True,
                               metavar='INPUT_FILE',
                               type=FileType('rb'))
    recoverParser.add_argument('-w',
                               '--wordlist',
                               required=True,
                               metavar='WORDLIST')

    unpackParser = subparser.add_parser('unpack', help='Unpack backup')
    unpackParser.add_argument('-i',
                              '--input',
                              required=True,
                              metavar='INPUT_FILE',
                              type=FileType('rb'))
    unpackParser.add_argument('-d',
                              '--directory',
                              required=True,
                              metavar='UNPACK_DIRECTORY')

    packParser = subparser.add_parser('pack', help='Unpack backup')
    packParser.add_argument('-d',
                            '--directory',
                            required=True,
                            metavar='PACK_DIRECTORY')
    packParser.add_argument('-o',
                            '--output',
                            required=True,
                            metavar='OUTPUT_FILE',
                            type=FileType('wb'))

    if len(sys.argv) < 2:
        parser.print_help()

    return parser.parse_args()
Ejemplo n.º 12
0
    def configure(self, parser):
        run_subcommand_parser = parser.subcommands.choices.get('run', None)
        if run_subcommand_parser is None:
            return

        virt_parser = run_subcommand_parser.add_argument_group(
            'virtualization '
            'testing arguments')
        virt_parser.add_argument(
            '--qemu-bin',
            type=str,
            default=defaults.qemu_bin,
            help=(
                'Path to a custom qemu binary to be tested. Current path: %s' %
                defaults.qemu_bin))
        virt_parser.add_argument(
            '--qemu-dst-bin',
            type=str,
            default=defaults.qemu_dst,
            help=('Path to a destination qemu binary to be tested. Used as '
                  'incoming qemu in migration tests. Current path: %s' %
                  defaults.qemu_dst))
        virt_parser.add_argument(
            '--qemu-img-bin',
            type=str,
            default=defaults.qemu_img_bin,
            help=('Path to a custom qemu-img binary to be tested. '
                  'Current path: %s' % defaults.qemu_img_bin))
        virt_parser.add_argument(
            '--qemu-io-bin',
            type=str,
            default=defaults.qemu_io_bin,
            help=('Path to a custom qemu-io binary to be tested. '
                  'Current path: %s' % defaults.qemu_io_bin))
        virt_parser.add_argument(
            '--guest-image-path',
            type=str,
            default=defaults.guest_image_path,
            help=('Path to a guest image to be used in tests. '
                  'Current path: %s' % defaults.guest_image_path))
        virt_parser.add_argument(
            '--guest-user',
            type=str,
            default=defaults.guest_user,
            help=(
                'User that avocado should use for remote logins. Current: %s' %
                defaults.guest_user))
        virt_parser.add_argument(
            '--guest-password',
            type=str,
            default=defaults.guest_password,
            help=(
                'Password for the user avocado should use for remote logins. '
                'You may omit this if SSH keys are setup in the guest. '
                'Current: %s' % defaults.guest_password))
        virt_parser.add_argument(
            '--take-screendumps',
            action='store_true',
            default=defaults.screendump_thread_enable,
            help=('Take regular QEMU screendumps (PPMs) from VMs under test. '
                  'Current: %s' % defaults.screendump_thread_enable))
        if VIDEO_ENCODING_SUPPORT:
            virt_parser.add_argument(
                '--record-videos',
                action='store_true',
                default=defaults.video_encoding_enable,
                help=('Encode videos from VMs under test. '
                      'Implies --take-screendumps. Current: %s' %
                      defaults.video_encoding_enable))
        virt_parser.add_argument(
            '--qemu-template',
            nargs='?',
            type=FileType('r'),
            help='Create qemu command line from a template')
Ejemplo n.º 13
0
def main(self, args: List[str]=sys.argv[1:]):
    """
    A deep learning program generator for the OpenCL programming language.

    The core operations of CLgen are:

       1. OpenCL files are collected from a model specification file.
       2. These files are preprocessed into an OpenCL kernel database.
       3. A training corpus is generated from the input files.
       4. A machine learning model is trained on the corpus of files.
       5. The trained model is sampled for new kernels.
       6. The samples are tested for compilability.

    This program automates the execution of all six stages of the pipeline.
    The pipeline can be interrupted and resumed at any time. Results are cached
    across runs. If installed with CUDA support, NVIDIA GPUs will be used to
    improve performance where possible.
    """
    parser = ArgumentParser(
        prog="clgen",
        description=inspect.getdoc(self),
        epilog="""
For information about a specific command, run `clgen <command> --help`.

""" + __help_epilog__,
        formatter_class=RawDescriptionHelpFormatter)

    # TODO:
    # parser.add_argument(
    #     "-l", "--lang", metavar="<language>",
    #     help="programming language (default: OpenCL)")
    parser.add_argument(
        "-v", "--verbose", action="store_true",
        help="increase output verbosity")
    parser.add_argument(
        "--version", action="store_true",
        help="show version information and exit")
    parser.add_argument(
        "--debug", action="store_true",
        help="in case of error, print debugging information")
    parser.add_argument(
        "--profile", action="store_true",
        help=("enable internal API profiling. When combined with --verbose, "
              "prints a complete profiling trace"))

    parser.add_argument(
        "--corpus-dir", metavar="<corpus>",
        type=FileType("r"),
        help="print path to corpus cache")
    parser.add_argument(
        "--model-dir", metavar="<model>",
        type=FileType("r"),
        help="print path to model cache")
    parser.add_argument(
        "--sampler-dir", metavar=("<model>", "<sampler>"),
        type=FileType("r"), nargs=2,
        help="print path to sampler cache")

    subparser = parser.add_subparsers(title="available commands")

    subparsers = [
        _register_test_parser,
        _register_train_parser,
        _register_sample_parser,
        _register_db_parser,
        _register_fetch_parser,
        _register_ls_parser,
        _register_preprocess_parser,
        _register_features_parser,
        _register_atomize_parser,
        _register_cache_parser,
    ]

    for register_fn in subparsers:
        register_fn(subparser)

    args = parser.parse_args(args)

    # set log level
    log.init(args.verbose)

    # set debug option
    if args.debug:
        os.environ["DEBUG"] = "1"

    # set profile option
    if args.profile:
        prof.enable()

    # options whch override the normal argument parsing process.
    if args.version:
        version = clgen.version()
        print(f"clgen {version} made with \033[1;31m♥\033[0;0m by "
              "Chris Cummins <*****@*****.**>.")
    elif args.corpus_dir:
        model = clgen.Model.from_json(jsonutil.loads(args.corpus_dir.read()))
        print(model.corpus.cache.path)
    elif args.model_dir:
        model = clgen.Model.from_json(jsonutil.loads(args.model_dir.read()))
        print(model.cache.path)
    elif args.sampler_dir:
        model = clgen.Model.from_json(jsonutil.loads(args.sampler_dir[0].read()))
        sampler = clgen.Sampler.from_json(jsonutil.loads(args.sampler_dir[1].read()))
        print(sampler.cache(model).path)
    else:
        # strip the arguments from the top-level parser
        dispatch_func = args.dispatch_func
        opts = vars(args)
        del opts["version"]
        del opts["verbose"]
        del opts["debug"]
        del opts["profile"]
        del opts["corpus_dir"]
        del opts["model_dir"]
        del opts["sampler_dir"]
        del opts["dispatch_func"]

        run(dispatch_func, **opts)
Ejemplo n.º 14
0
def _add_args():
    """
    Init command-line args
    """
    parser = ArgumentParser()
    parser.add_argument('-t',
                        '--token',
                        action='store',
                        help="Token auth string")
    parser.add_argument('-u',
                        '--url',
                        action='store',
                        help="Netbox main URL (with ending /)")

    # Sync
    subparsers = parser.add_subparsers()
    sync_parser = subparsers.add_parser('sync', help='Syncing a one device')
    sync_parser.add_argument('hostname',
                             action='store',
                             help='Host in netbox, which need to be syncing')
    sync_parser.add_argument(
        'commandname',
        action='store',
        help='Command, which contain information about a device.')
    sync_parser.add_argument('data',
                             nargs='?',
                             type=FileType('r'),
                             default=sys.stdin,
                             help="Output of command")
    sync_parser.set_defaults(func=_sync_device)

    # List
    list_parser = subparsers.add_parser('ls', help='List netbox API content')
    list_parser.add_argument('field', nargs='*', help='List API fields')
    list_parser.set_defaults(func=_list_api)

    # Multy-sync
    mulsync_parser = subparsers.add_parser('mulsync',
                                           help='Syncing a multiple device')
    mulsync_parser.add_argument(
        'data',
        nargs='?',
        type=FileType('r'),
        default=sys.stdin,
        help="Json data, contains a host,command, data values")
    mulsync_parser.add_argument('-f',
                                '--filter',
                                action='store',
                                help='Filter string')

    # Search
    search_parser = subparsers.add_parser(
        'search', help='Find a device by name, id, asset_tag, etc')
    search_parser.add_argument('query', help="String, what you want to search")
    search_parser.set_defaults(func=_search_api)

    # CMDList
    commands_parser = subparsers.add_parser(
        'cmd_list', help='Get a list of all available commands from server')
    commands_parser.set_defaults(func=_get_cmd_list)

    args = parser.parse_args()
    return args
Ejemplo n.º 15
0
from json import load, JSONEncoder
from argparse import ArgumentParser, FileType
from re import compile
import sys

float_pat = compile(r'^-?\d+\.\d+(e-?\d+)?$')
charfloat_pat = compile(r'^[\[,\,]-?\d+\.\d+(e-?\d+)?$')

parser = ArgumentParser(description="Group (merge) multiple GeoJSON files.")

defaults = dict(precision=6, outfile=sys.stdout)

parser.set_defaults(**defaults)

parser.add_argument('files',
                    type=FileType('r'), help='Files to be merged', nargs="+")
parser.add_argument('-p', '--precision', dest='precision',
                    type=int, help='Digits of precision')
parser.add_argument('-o', '--outfile', dest='outfile',
                    type=FileType('wb', 0), help='Outfile')

if __name__ == '__main__':
    args = parser.parse_args()
    infiles = args.files
    outfile = args.outfile

    outjson = dict(type='FeatureCollection', features=[])

    for infile in infiles:
        injson = load(infile)
Ejemplo n.º 16
0
from argparse import FileType, ArgumentParser
import csv
import os

# In order to work with kive, scripts that have a inputs
# and b outputs must have a+b command line arguments, the first a
# arguments specifying paths of input files, the subsequent b
# arguments specifying the paths where outputs are written.

# ArgumentParser facilitates parsing inputs from sys.argv, and
# generates help messages based on the expected input specification
parser = ArgumentParser(
    description="Takes CSV with (x,y), outputs CSV with (x+y),(x*y)")
parser.add_argument("input_csv",
                    type=FileType('rU'),
                    help="CSV containing (x,y) pairs")
parser.add_argument("output_csv",
                    type=FileType('wb'),
                    help="CSV containing (x+y,xy) pairs")
args = parser.parse_args()

reader = csv.DictReader(args.input_csv)
writer = csv.DictWriter(args.output_csv, ['sum', 'product'],
                        lineterminator=os.linesep)
writer.writeheader()

for row in reader:
    x = int(row['x'])
    y = int(row['y'])
    writer.writerow(dict(sum=x + y, product=x * y))
Ejemplo n.º 17
0
def remove_templates(args: Namespace) -> None:
    api.delete(args.master, path="templates/" + args.template_name)
    print(colored("Removed template {}".format(args.template_name), "green"))


# fmt: off

args_description = [
    Cmd("template tpl", None, "manage config templates", [
        Cmd("list ls", list_template, "list config templates", [
            Arg("-d", "--details", action="store_true",
                help="show the configs of the templates"),
        ], is_default=True),
        Cmd("describe", describe_template,
            "describe config template", [
                Arg("template_name", type=str, help="template name"),
            ]),
        Cmd("set", set_template, "set config template", [
            Arg("template_name", help="template name"),
            Arg("template_file", type=FileType("r"),
                help="config template file (.yaml)")
        ]),
        Cmd("remove rm", remove_templates,
            "remove config template", [
                Arg("template_name", help="template name")
            ]),
    ])
]  # type: List[Any]

# fmt: on
Ejemplo n.º 18
0
 def add_arguments(self, parser: ArgumentParser):
     parser.add_argument('input', nargs='?', type=FileType('r'),
                         default=stdin)
     parser.add_argument("--server-addr")
Ejemplo n.º 19
0
def main():
    """Entry point."""
    debug = False
    try:
        argparser = ArgumentParser(description=modules[__name__].__doc__)
        argparser.add_argument('device',
                               nargs='?',
                               default='ftdi:///?',
                               help='serial port device name')
        argparser.add_argument('-P',
                               '--vidpid',
                               action='append',
                               help='specify a custom VID:PID device ID, '
                               'may be repeated')
        argparser.add_argument('-V',
                               '--virtual',
                               type=FileType('r'),
                               help='use a virtual device, specified as YaML')
        argparser.add_argument('-v',
                               '--verbose',
                               action='count',
                               default=0,
                               help='increase verbosity')
        argparser.add_argument('-d',
                               '--debug',
                               action='store_true',
                               help='enable debug mode')
        args = argparser.parse_args()
        debug = args.debug

        if not args.device:
            argparser.error('Serial device not specified')

        loglevel = max(DEBUG, ERROR - (10 * args.verbose))
        loglevel = min(ERROR, loglevel)
        if debug:
            formatter = Formatter(
                '%(asctime)s.%(msecs)03d %(name)-20s '
                '%(message)s', '%H:%M:%S')
        else:
            formatter = Formatter('%(message)s')
        FtdiLogger.set_formatter(formatter)
        FtdiLogger.set_level(loglevel)
        FtdiLogger.log.addHandler(StreamHandler(stderr))

        if args.virtual:
            from pyftdi.usbtools import UsbTools
            # Force PyUSB to use PyFtdi test framework for USB backends
            UsbTools.BACKENDS = ('pyftdi.tests.backend.usbvirt', )
            # Ensure the virtual backend can be found and is loaded
            backend = UsbTools.find_backend()
            loader = backend.create_loader()()
            loader.load(args.virtual)

        try:
            add_custom_devices(Ftdi, args.vidpid)
        except ValueError as exc:
            argparser.error(str(exc))

        I2cBusScanner.scan(args.device)

    except (ImportError, IOError, NotImplementedError, ValueError) as exc:
        print('\nError: %s' % exc, file=stderr)
        if debug:
            print(format_exc(chain=False), file=stderr)
        exit(1)
    except KeyboardInterrupt:
        exit(2)
Ejemplo n.º 20
0
        ],
        require_auth=False,
    ),
]

POST_COMMANDS = [
    Command(
        name="post",
        description="Post a status text to your timeline",
        arguments=[
            (["text"], {
                "help": "The status text to post.",
                "nargs": "?",
            }),
            (["-m", "--media"], {
                "type": FileType('rb'),
                "help": "path to the media file to attach"
            }),
            (["-v", "--visibility"], {
                "type":
                visibility,
                "default":
                "public",
                "help":
                'post visibility, one of: %s' % ", ".join(VISIBILITY_CHOICES),
            }),
            (["-s", "--sensitive"], {
                "action": 'store_true',
                "default": False,
                "help": "mark the media as NSFW",
            }),
Ejemplo n.º 21
0
def main():
    """Main routine"""
    debug = False
    try:
        default_device = get_default_device()
        argparser = ArgumentParser(description=modules[__name__].__doc__)
        if platform != 'win32':
            argparser.add_argument('-f', '--fullmode', dest='fullmode',
                                   action='store_true',
                                   help='use full terminal mode, exit with '
                                        '[Ctrl]+B')
        argparser.add_argument('device', nargs='?', default=default_device,
                               help='serial port device name (default: %s)' %
                               default_device)
        argparser.add_argument('-b', '--baudrate',
                               help='serial port baudrate (default: %d)' %
                               MiniTerm.DEFAULT_BAUDRATE,
                               default='%s' % MiniTerm.DEFAULT_BAUDRATE)
        argparser.add_argument('-w', '--hwflow',
                               action='store_true',
                               help='hardware flow control')
        argparser.add_argument('-e', '--localecho',
                               action='store_true',
                               help='local echo mode (print all typed chars)')
        argparser.add_argument('-r', '--crlf',
                               action='count', default=0,
                               help='prefix LF with CR char, use twice to '
                                    'replace all LF with CR chars')
        argparser.add_argument('-l', '--loopback',
                               action='store_true',
                               help='loopback mode (send back all received '
                                    'chars)')
        argparser.add_argument('-s', '--silent', action='store_true',
                               help='silent mode')
        argparser.add_argument('-P', '--vidpid', action='append',
                               help='specify a custom VID:PID device ID, '
                                    'may be repeated')
        argparser.add_argument('-V', '--virtual', type=FileType('r'),
                               help='use a virtual device, specified as YaML')
        argparser.add_argument('-v', '--verbose', action='count',
                               help='increase verbosity')
        argparser.add_argument('-d', '--debug', action='store_true',
                               help='enable debug mode')
        args = argparser.parse_args()
        debug = args.debug

        if not args.device:
            argparser.error('Serial device not specified')

        loglevel = max(DEBUG, ERROR - (10 * (args.verbose or 0)))
        loglevel = min(ERROR, loglevel)
        if debug:
            formatter = Formatter('%(asctime)s.%(msecs)03d %(name)-20s '
                                  '%(message)s', '%H:%M:%S')
        else:
            formatter = Formatter('%(message)s')
        FtdiLogger.set_formatter(formatter)
        FtdiLogger.set_level(loglevel)
        FtdiLogger.log.addHandler(StreamHandler(stderr))

        if args.virtual:
            from pyftdi.usbtools import UsbTools
            # Force PyUSB to use PyFtdi test framework for USB backends
            UsbTools.BACKENDS = ('pyftdi.tests.backend.usbvirt', )
            # Ensure the virtual backend can be found and is loaded
            backend = UsbTools.find_backend()
            loader = backend.create_loader()()
            loader.load(args.virtual)

        try:
            add_custom_devices(Ftdi, args.vidpid)
        except ValueError as exc:
            argparser.error(str(exc))

        init_term(args.fullmode)
        miniterm = MiniTerm(device=args.device,
                            baudrate=to_bps(args.baudrate),
                            parity='N',
                            rtscts=args.hwflow,
                            debug=args.debug)
        miniterm.run(args.fullmode, args.loopback, args.silent, args.localecho,
                     args.crlf)

    except (IOError, ValueError) as exc:
        print('\nError: %s' % exc, file=stderr)
        if debug:
            print(format_exc(chain=False), file=stderr)
        exit(1)
    except KeyboardInterrupt:
        exit(2)
Ejemplo n.º 22
0
    def create(description=DESCRIPTION):

        evars = envvars.to_doc(KalturaArgParser.ENV_VARS)
        for k in evars:
            description = description + "\n\t%-15s:  %s" % (k, evars[k])

        loglevels = ['ERROR', 'WARN', 'INFO', 'DEBUG']
        parser = KalturaArgParser(description=description, formatter_class=RawDescriptionHelpFormatter)
        parser.add_argument("--loglevel", "-l", choices=loglevels,  default=logging.WARN, help="log level  - default: WARN")

        subparsers = parser.add_subparsers(help='sub-command help')

        subparsers.add_parser('config', description='test access to Kaltura KMC, AWS').set_defaults(func=check_config)

        subparser = subparsers.add_parser('repair', description="repair matching videos - look at tags and replace original flavor as tags indicate ")
        subparser.add_argument("--repair", action="store_true", default=False, help="performs in dryrun mode, unless repair param is given")
        subparser.add_argument("--tmp", default=".", help="directory for temporary files")
        KalturaArgParser._add_filter_params(KalturaArgParser._add_pager_params(subparser))
        subparser.add_argument('--idfile', '-I',  type=FileType('r'), required=False, help="file with kaltura ids, one per line")
        subparser.set_defaults(func=repair)

        subparser = subparsers.add_parser('s3copy', description="copy original flavors of matching videos to AWS-s3; skip flavors bigger than {} kb".format(CheckAndLog.SIZE_LIMIT_KB))
        subparser.add_argument("--s3copy", action="store_true", default=False, help="performs in dryrun mode, unless save param is given")
        subparser.add_argument("--tmp", default=".", help="directory for temporary files")
        KalturaArgParser._add_filter_params(KalturaArgParser._add_pager_params(subparser))
        subparser.add_argument('--idfile', '-I',  type=FileType('r'), required=False, help="file with kaltura ids, one per line")
        subparser.set_defaults(func=copy_to_s3)

        subparser = subparsers.add_parser('restore_from_s3', description="restore matching videos from AWS-s3")
        subparser.add_argument("--restore", action="store_true", default=False, help="performs in dryrun mode, unless restore param is given")
        subparser.add_argument("--wait_ready", '-w', action="store_true", default=True, help="wait for original flavor status to be ready before restoring next video")
        subparser.add_argument("--tmp", default=".", help="directory for temporary files")
        KalturaArgParser._add_filter_params(KalturaArgParser._add_pager_params(subparser))
        subparser.add_argument('--idfile', '-I',  type=FileType('r'), required=False, help="file with kaltura ids, one per line")
        subparser.set_defaults(func=restore_from_s3)

        subparser = subparsers.add_parser('replace_video', description="delete flavors and replace original with place holder video of matching entries  \
        IF entries have healthy archived copy in AWS-s3")
        subparser.add_argument("--replace", action="store_true", default=False, help="performs in dryrun mode, unless replace param is given")
        subparser.add_argument("--wait_ready", '-w', action="store_true", default=True, help="wait for original flavor status to be ready before replacing next video")
        KalturaArgParser._add_filter_params(KalturaArgParser._add_pager_params(subparser))
        subparser.add_argument('--idfile', '-I',  type=FileType('r'), required=False, help="file with kaltura ids, one per line")
        subparser.set_defaults(func=replace_videos)

        subparser = subparsers.add_parser('download', description="download original for given video ")
        subparser.add_argument("--id", "-i",  required=True, help="kaltura media entry id")
        subparser.add_argument("--tmp", default=".", help="directory for temporary files")
        subparser.add_argument('--idfile', '-I',  type=FileType('r'), required=False, help="file with kaltura ids, one per line")
        subparser.set_defaults(func=download)

        subparser = subparsers.add_parser('count', description="count matching videos ")
        KalturaArgParser._add_filter_params(KalturaArgParser._add_pager_params(subparser, page_size=0))
        subparser.set_defaults(func=count)

        subparser = subparsers.add_parser('list', description="list matching videos ")
        subparser.add_argument("--mode", "-m", choices=["video", "flavor"], default="video", help="list video or flavor information")
        KalturaArgParser._add_filter_params(KalturaArgParser._add_pager_params(subparser))
        subparser.add_argument('--idfile', '-I',  type=FileType('r'), required=False, help="file with kaltura ids, one per line")
        subparser.set_defaults(func=list)

        description = """
check status of entries, that is check each matching entry for the following: 
  +  has original flavor in READY status,
  +  the {} tag is set iff and only iff there is a corresponding entry in S3 
  +  if it does not have an {} tag the S# entry's size should match the size of the original flavor  
""".format(SAVED_TO_S3, PLACE_HOLDER_VIDEO)
        subparser = subparsers.add_parser('health', description=description)
        KalturaArgParser._add_filter_params(KalturaArgParser._add_pager_params(subparser))
        subparser.add_argument('--idfile', '-I',  type=FileType('r'), required=False, help="file with kaltura ids, one per line")
        subparser.set_defaults(func=health_check)

        return parser
Ejemplo n.º 23
0
    def _add_main_options(self) -> None:
        """Add main Options"""
        prog = sys.argv[0]
        if re.match("^python[0-9._-]*$", sys.argv[0]):
            prog = sys.argv[1]
        self.parser = SMACArgumentParser(
            formatter_class=ConfigurableHelpFormatter,
            add_help=False,
            prog=prog)
        # let a help message begin with "[dev]" to add a developer option
        req_opts = self.parser.add_argument_group("Required Options")
        req_opts.add_argument("--scenario",
                              "--scenario-file",
                              "--scenario_file",
                              dest="scenario_file",
                              required=True,
                              type=str,
                              action=CheckScenarioFileAction,
                              help="Scenario file in AClib format.")
        opt_opts = self.parser.add_argument_group("Optional Options")
        opt_opts.add_argument("--help",
                              action=StandardHelpAction,
                              help="Show help messages for standard options.")
        opt_opts.add_argument(
            "--help-all",
            action=DevHelpAction,
            help="Show help messages for both standard and developer options.")
        opt_opts.add_argument("--seed",
                              default=1,
                              type=int,
                              help="Random Seed.")
        opt_opts.add_argument("--verbose",
                              "--verbose-level",
                              "--verbose_level",
                              dest="verbose_level",
                              default=logging.INFO,
                              choices=["INFO", "DEBUG"],
                              help="Verbosity level.")
        opt_opts.add_argument("--mode",
                              default="SMAC4AC",
                              choices=[
                                  "SMAC4AC", "ROAR", "Hydra", "PSMAC",
                                  "SMAC4HPO", "SMAC4BO"
                              ],
                              help="Configuration mode.")
        opt_opts.add_argument("--restore-state",
                              "--restore_state",
                              dest="restore_state",
                              default=None,
                              help="Path to directory with SMAC-files.")
        # list of runhistory dump files
        # scenario corresponding to --warmstart_runhistory;
        # pcs and feature space has to be identical to --scenario_file
        opt_opts.add_argument("--warmstart-runhistory",
                              "--warmstart_runhistory",
                              dest="warmstart_runhistory",
                              default=None,
                              nargs="*",
                              help=SUPPRESS)
        opt_opts.add_argument("--warmstart-scenario",
                              "--warmstart_scenario",
                              dest="warmstart_scenario",
                              default=None,
                              nargs="*",
                              help=SUPPRESS)
        # list of trajectory dump files, reads runhistory and uses final incumbent as challenger
        opt_opts.add_argument("--warmstart-incumbent",
                              "--warmstart_incumbent",
                              dest="warmstart_incumbent",
                              default=None,
                              nargs="*",
                              help=SUPPRESS)
        req_opts.add_argument(
            "--random_configuration_chooser",
            default=None,
            type=FileType('r'),
            help=
            "[dev] path to a python module containing a class `RandomConfigurationChooserImpl`"
            "implementing the interface of `RandomConfigurationChooser`")
        req_opts.add_argument(
            "--hydra_iterations",
            default=3,
            type=int,
            help=
            "[dev] number of hydra iterations. Only active if mode is set to Hydra"
        )
        req_opts.add_argument(
            "--hydra_validation",
            default='train',
            choices=[
                'train', 'val10', 'val20', 'val30', 'val40', 'val50', 'none'
            ],
            type=str.lower,
            help="[dev] set to validate incumbents on. valX =>"
            " validation set of size training_set * 0.X")
        req_opts.add_argument(
            "--incumbents_per_round",
            default=1,
            type=int,
            help=
            "[dev] number of configurations to keep per psmac/hydra iteration.",
            dest="hydra_incumbents_per_round")
        req_opts.add_argument(
            "--n_optimizers",
            default=1,
            type=int,
            help=
            "[dev] number of optimizers to run in parallel per psmac/hydra iteration.",
            dest="hydra_n_optimizers")
        req_opts.add_argument("--psmac_validate",
                              default=False,
                              type=truthy,
                              help="[dev] Validate all psmac configurations.")

        self.main_cmd_actions, self.main_cmd_translations = CMDReader._extract_action_info(
            self.parser._actions)
Ejemplo n.º 24
0
def get_args():
    """ Command line arguments """
    parser = ArgumentParser(description="Convert from XML or JSON to TXT format.")
    parser.add_argument("file", nargs='?', type=FileType('r'), default=sys.stdin, help="File with RTS.")
    parser.add_argument("--rts", type=str, help="RTS number inside file.", default="1")
    return parser.parse_args()
Ejemplo n.º 25
0
def print_json(sents):
    print('[')
    first = True
    for sent in sents:
        # specially format the output
        if first:
            first = False
        else:
            print(',')
        print_sent_json(sent)
    print(']')


if __name__ == '__main__':
    argparser = ArgumentParser(description=desc)
    argparser.add_argument("inF", type=FileType(encoding="utf-8"))
    argparser.add_argument("--no-morph-syn",
                           action="store_false",
                           dest="morph_syn")
    argparser.add_argument("--no-misc", action="store_false", dest="misc")
    argparser.add_argument("--no-validate-pos",
                           action="store_false",
                           dest="validate_pos")
    argparser.add_argument("--no-validate-type",
                           action="store_false",
                           dest="validate_type")
    argparser.add_argument("--store-conllulex",
                           choices=(False, 'full', 'toks'))
    print_json(load_sents(**vars(argparser.parse_args())))
Ejemplo n.º 26
0
    Always stream the output by line, i.e., behave like `tail -f'.

    Without --stream and with --pretty (either set or implied),
    HTTPie fetches the whole response before it outputs the processed data.

    Set this option when you want to continuously display a prettified
    long-lived response, such as one from the Twitter streaming API.

    It is useful also without --pretty: It ensures that the output is flushed
    more often and in smaller chunks.

    ''')
)
output_processing.add_argument(
    '--output', '-o',
    type=FileType('a+b'),
    dest='output_file',
    metavar='FILE',
    help=_(
        '''
        Save output to FILE. If --download is set, then only the response
        body is saved to the file. Other parts of the HTTP exchange are
        printed to stderr.

        '''
    )
)

output_options.add_argument(
    '--download', '-d',
    action='store_true',
Ejemplo n.º 27
0
                    help='Path to the gallery dataset csv file.')

parser.add_argument(
    '--gallery_embeddings',
    required=True,
    help='Path to the h5 file containing the gallery embeddings.')

parser.add_argument(
    '--metric',
    required=True,
    choices=loss.cdist.supported_metrics,
    help='Which metric to use for the distance between embeddings.')

parser.add_argument(
    '--filename',
    type=FileType('w'),
    help='Optional name of the json file to store the results in.')

parser.add_argument(
    '--batch_size',
    default=256,
    type=common.positive_int,
    help='Batch size used during evaluation, adapt based on your memory usage.'
)

parser.add_argument(
    '--use_market_ap',
    action='store_true',
    default=False,
    help='When this flag is provided, the average precision is computed exactly'
    ' as done by the Market-1501 evaluation script, rather than the '
Ejemplo n.º 28
0

def partOne(inp):
    pass


def partTwo(inp):
    pass


if __name__ == '__main__':
    from argparse import ArgumentParser, FileType

    args = ArgumentParser()
    args.add_argument("-t", "--test", help='Unit tests', action='store_true')
    args.add_argument("-i",
                      "--input",
                      help='Your input file',
                      type=FileType('r'))
    options = args.parse_args()

    if options.test:
        testOne()
        print()
        testTwo()
        print()
    if options.input:
        inp = options.input.read().strip()
        print("Answer for part one is : {res}".format(res=partOne(inp)))
        print("Answer for part two is : {res}".format(res=partTwo(inp)))
Ejemplo n.º 29
0
                else:
                    neighbors[new_node] = new_distance
        explored[smallest_node] = distance
        neighbors.pop(smallest_node)


def part_one(inp):
    first_pass = DictMap(inp)
    parsed = parse_portals(first_pass)
    G, start, end = _to_donut(parsed)
    return len(shortest_path(G, start, end)) - 1


def part_two(inp):
    first_pass = DictMap(inp)
    parsed = parse_portals(first_pass)
    portals = _precompute_single_level_paths(parsed)
    return recursive_maze_dijkstra(portals) - 1


if __name__ == '__main__':
    from argparse import ArgumentParser, FileType

    args = ArgumentParser()
    args.add_argument("input", help='Your input file', type=FileType('r'))
    options = args.parse_args()

    inp = options.input.read()
    print("Answer for part one is : {res}".format(res=part_one(inp)))
    print("Answer for part two is : {res}".format(res=part_two(inp)))
#!/usr/bin/env python
'''
simple_bootstrap_init -- Init. for bias_bootstrap.py

simple_bootstrap_init is an initialization code for simple_bootstrap.py that 
sets up the argument parser and data to process for bias_bootstrap.py.

@author:     jpwalker
@contact:    [email protected]
@deffield    updated: 2-5-16
'''

from argparse import ArgumentParser, FileType

rf = FileType('r') #Read in file type
hlp = ('Path to file for halo_tbl_1. Bootstraps will be created from \
halo_tbl_1.', 
'Path to file for halo_tbl_2. The created bootstraps from halo_tbl_1 will be \
cross-correlated with halo_tbl_2.', 
'Path to file containing the 2pt-autocorrelation function for matter in the \
universe.', 
'The 2pt- autocorrelation function for halo_tbl_2. If not given it has to be \
calculated.')
std_args = (('halo_tbl_1',{'action':'store', 'nargs':1, 'type':rf,
                           'help':hlp[0]}),
            ('halo_tbl_2', {'action':'store', 'nargs':1, 'type':rf,
                            'help':hlp[1]}),
            ('xi_m_m', {'action':'store', 'nargs':1, 'type':rf, 
                        'help':hlp[2]}),
            ('--xi_ht2', {'action':'store', 'nargs':1, 'type':rf,
                          'help':hlp[3], 'metavar':'xi_halo_tbl_2'}))