Ejemplo n.º 1
0
def main(argv=None):
    """The main entry-point to salvo."""
    if argv is None:
        argv = sys.argv[1:]

    parser = argparse.ArgumentParser(description='Provision a new salvo.')
    parser.add_argument('config',
                        type=argparse.FileType('r'),
                        help='salvo configuration file to run')
    parser.add_argument('--playbook',
                        '-p',
                        type=argparse.FileType('r'),
                        default='./deploy/playbook.yml',
                        help='directory where playbooks reside')
    parser.add_argument('--wait',
                        '-w',
                        default=False,
                        action='store_true',
                        help='wait for [Enter] before cleaning up')
    parser.add_argument('--deployment',
                        '-d',
                        type=str,
                        default='salvo',
                        help='deployment name for this salvo')
    parser.add_argument('--set',
                        '-s',
                        nargs='*',
                        type=str,
                        help='key:value pair to set for this salvo execution')
    parser.add_argument('--dry-run',
                        '-n',
                        action='store_true',
                        default=False,
                        help='only print what actions would be taken')
    args = parser.parse_args(argv)

    args.set = dict(item.split(":", maxsplit=1)
                    for item in args.set) if args.set is not None else {}
    topology = Topology.load_file(args.config, args.set)

    hq = Cluster('hq', {
        'expose': [22],
    }, {})
    topology.clusters = [hq] + topology.clusters

    agenda.section("Set up network")

    client = boto3.client('ec2')
    ec2 = boto3.resource('ec2')

    # Set up VPC
    agenda.task("Create VPC")
    vpc = client.create_vpc(DryRun=args.dry_run, CidrBlock='10.0.0.0/16')
    vpc = ec2.Vpc(vpc['Vpc']['VpcId'])

    agenda.task("Attach VPC internet gateway")
    gateway = client.create_internet_gateway(DryRun=args.dry_run)
    gateway = ec2.InternetGateway(
        gateway['InternetGateway']['InternetGatewayId'])
    gateway.attach_to_vpc(DryRun=args.dry_run, VpcId=vpc.id)

    agenda.task("Create internet-enabled route table")
    iroutable = vpc.create_route_table(DryRun=args.dry_run)
    iroutable.create_route(DryRun=args.dry_run,
                           DestinationCidrBlock='0.0.0.0/0',
                           GatewayId=gateway.id)

    subnets = []
    secs = []
    for i, c in enumerate(topology.clusters):
        agenda.task("Allocate subnet #{}".format(i + 1))
        subnet = vpc.create_subnet(DryRun=args.dry_run,
                                   CidrBlock='10.0.{}.0/24'.format(i))

        if c.internet:
            agenda.subtask("Hook in internet-enable route table")
            iroutable.associate_with_subnet(DryRun=args.dry_run,
                                            SubnetId=subnet.id)

        # set up security croups
        agenda.subtask("Create network security group")
        sec = vpc.create_security_group(
            DryRun=args.dry_run,
            GroupName='{}-cluster-{}'.format(args.deployment, i + 1),
            Description='Ingress rules for cluster {}-{}'.format(
                args.deployment, c.name))
        # allow all internal traffic
        sec.authorize_ingress(DryRun=args.dry_run,
                              IpProtocol='tcp',
                              FromPort=1,
                              ToPort=65535,
                              CidrIp='10.0.0.0/16')

        if c.expose is not False:
            for p in c.expose:
                agenda.subtask("Allow ingress traffic on port {}".format(p))
                sec.authorize_ingress(DryRun=args.dry_run,
                                      IpProtocol='tcp',
                                      FromPort=p,
                                      ToPort=p,
                                      CidrIp='0.0.0.0/0')

        secs.append(sec)
        subnets.append(subnet)

    # Tag all our VPC resources
    agenda.task("Tag all VPC resources")
    ec2.create_tags(DryRun=args.dry_run,
                    Resources=[
                        vpc.id,
                        gateway.id,
                        iroutable.id,
                    ] + [sn.id for sn in subnets] + [sg.id for sg in secs],
                    Tags=[{
                        'Key': 'salvo',
                        'Value': args.deployment,
                    }])

    # Create access keys
    agenda.task("Generate VPC key pair")
    try:
        keys = client.create_key_pair(DryRun=args.dry_run,
                                      KeyName=args.deployment)
    except botocore.exceptions.ClientError:
        # Key probably already exists. Delete and re-create.
        agenda.subfailure("Could not create key pair")
        agenda.subtask("Attempting to delete old key pair")
        client.delete_key_pair(DryRun=args.dry_run, KeyName=args.deployment)
        agenda.subtask("Attempting to generate new key pair")
        keys = client.create_key_pair(DryRun=args.dry_run,
                                      KeyName=args.deployment)

    keymat = keys['KeyMaterial']
    keys = ec2.KeyPair(keys['KeyName'])

    agenda.section("Launch instances")

    # Launch instances
    clusters = []
    for i, c in enumerate(topology.clusters):
        nics = [{
            "DeviceIndex": 0,
            "Groups": [secs[i].id],
            "SubnetId": subnets[i].id,
            "DeleteOnTermination": True,
            "AssociatePublicIpAddress": c.internet,
        }]

        agenda.task("Launching {} instances in cluster {}".format(
            c.attrs['count'], c.name))
        clusters.append(
            list(
                map(lambda x: ec2.Instance(x), [
                    instance['InstanceId']
                    for instance in client.run_instances(
                        DryRun=args.dry_run,
                        KeyName=keys.name,
                        NetworkInterfaces=nics,
                        ImageId=c.attrs['image'],
                        MinCount=c.attrs['count'],
                        MaxCount=c.attrs['count'],
                        InstanceType=c.attrs['itype'],
                        InstanceInitiatedShutdownBehavior='terminate')
                    ['Instances']
                ])))

    exit = 1
    try:
        agenda.task("Wait for HQ to start running")

        hq = clusters[0][0]
        while hq.state['Name'] == 'pending':
            agenda.subtask("Still in 'pending' state")
            sleep(3)
            hq.load()

        if hq.state['Name'] != 'running':
            agenda.failure(hq.state_reason['Message'])
            raise ChildProcessError(hq.state_reason['Message'])

        def prepare(ci, instance):
            global hq
            print("instance {} in {} now available through {}",
                  instance.private_ip_address, topology.clusters[ci].name,
                  hq.public_ip_address)

        agenda.task("Wait for workers to reach 'running' state")

        done = []
        p = Pool(5)
        pending = True
        while pending:
            pending = False
            for i, cluster in enumerate(clusters):
                for ii, instance in enumerate(cluster):
                    if instance.state['Name'] == 'pending':
                        agenda.subtask(
                            "Instance {}.{} is still pending".format(
                                i + 1, ii + 1))

                        pending = True
                        instance.load()
                        break
                    elif instance.state['Name'] != 'running':
                        agenda.subfailure("Instance {}.{} failed: {}".format(
                            i + 1, ii + 1, instance.state_reason['Message']))
                        raise ChildProcessError(
                            instance.state_reason['Message'])
                    else:
                        # State is now 'running'
                        tag = (i, ii)
                        if tag not in done:
                            # State hasn't been 'running' before
                            done.append(tag)
                            p.apply_async(prepare, [i, instance])
                if pending:
                    break
            sleep(3)
        p.close()
        p.join()

        agenda.task("Wait for HQ to become pingable")

        # Wait for hq to be pingable
        deployment = Deployer(args.playbook.name, topology, keymat, clusters)
        while not deployment.test(hq.public_ip_address):
            sleep(1)

        agenda.task("Wait for workers to become pingable")

        # Wait for workers to be pingable
        for i, cluster in enumerate(clusters):
            for ii, instance in enumerate(cluster):
                while not deployment.test(instance.private_ip_address):
                    sleep(1)

        # Deploy!
        agenda.section("Deploy application")
        exit = deployment.deploy()
    except:
        import traceback
        traceback.print_exc()
    finally:
        agenda.section("Clean up VPC")

        if args.wait:
            agenda.prompt("Press [Enter] when you are ready to clean")
            input()

        # Terminate instances and delete VPC resources
        agenda.task("Terminate all instances")
        instances = list(vpc.instances.all())
        vpc.instances.terminate(DryRun=args.dry_run)
        still_running = True
        while still_running:
            still_running = False
            for i in instances:
                i.load()
                if i.state['Name'] != 'terminated':
                    agenda.subtask("At least one instance still shutting down")
                    still_running = True
                    sleep(3)
                    break

        agenda.task("Delete network resources")
        agenda.subtask("key pair")
        keys.delete(DryRun=args.dry_run)
        agenda.subtask("internet-enabled route associations")
        for r in iroutable.associations.all():
            r.delete(DryRun=args.dry_run)
        agenda.subtask("internet-enabled route table")
        iroutable.delete(DryRun=args.dry_run)
        agenda.subtask("internet gateway")
        gateway.detach_from_vpc(DryRun=args.dry_run, VpcId=vpc.id)
        gateway.delete(DryRun=args.dry_run)
        agenda.subtask("subnets")
        try:
            for sn in subnets:
                sn.delete(DryRun=args.dry_run)
        except:
            agenda.subfailure("failed to delete subnet:")
            import traceback
            traceback.print_exc()
        agenda.subtask("security groups")
        for sg in secs:
            sg.delete()
        agenda.subtask("network interfaces")
        for i in vpc.network_interfaces.all():
            i.delete(DryRun=args.dry_run)

        agenda.task("Delete the VPC")
        vpc.delete(DryRun=args.dry_run)

    return exit
Ejemplo n.º 2
0
import argparse, sys

parser = argparse.ArgumentParser()

g0 = parser.add_argument_group('common', 'Common args')
g0.add_argument("problemfile",
                type=argparse.FileType("r"),
                help="Problem description YAML file")
g0.add_argument("planner",
                choices=["trajopt", "ompl", "chomp", "chomp2"],
                help="Planner to run")
g0.add_argument(
    "-o",
    "--outfile",
    type=argparse.FileType("w"),
    help="File to dump results (generated trajectories, timing info, etc.)")
g0.add_argument("--record_failed_problems",
                type=argparse.FileType("w"),
                help="File to save failed start/goal pairs")
g0.add_argument(
    "--problems",
    type=argparse.FileType("r"),
    help=
    "ignore the problems in problemfile and use these only (good for output from --record_failed_problems)"
)
g0.add_argument("--animate_all",
                action="store_true",
                help="animate solutions to every problem after solving")

# chomp+ompl options
g1 = parser.add_argument_group('chomp+ompl', 'Options for CHOMP and OMPL')
Ejemplo n.º 3
0
                if not self.link:
                    if os.path.isdir(src):
                        dir_util.copy_tree(src, dest)
                    else:
                        file_util.copy_file(src, dest)
                else:
                    self._create_link(src, dest)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('-l', '--link', action='store_true')
    parser.add_argument('-m',
                        '--manifest',
                        default='/tmp/dep-manifest.yml',
                        type=argparse.FileType())
    parser.add_argument('-vp', '--version-manifest', default=None)
    parser.add_argument("--no-key", default=False, action="store_true")
    options = parser.parse_args()
    key_path = "/root/.ssh/id_rsa"
    ssh_dir = os.path.dirname(key_path)
    privatekey = os.environ.get("PRIVATEKEY")
    if privatekey:
        if not os.path.exists(ssh_dir):
            os.makedirs(ssh_dir, 700)
        with open(key_path, "w") as key_data:
            key_data.write(privatekey)
        os.chmod(key_path, 0o600)
    elif subprocess.call(['ssh-add', '-l']) != 0 and not options.no_key:
        raise RuntimeError("Need either PRIVATEKEY env or key in agent")
    dep_data = yaml.load(options.manifest)
Ejemplo n.º 4
0
def main():
    p = argparse.ArgumentParser()
    p.add_argument('catlas_prefix', help='catlas prefix')
    p.add_argument('query')
    p.add_argument('-k',
                   '--ksize',
                   default=31,
                   type=int,
                   help='k-mer size (default: 31)')
    p.add_argument('-o', '--output', type=argparse.FileType('wt'))
    p.add_argument('-v', '--verbose', action='store_true')
    args = p.parse_args()

    contigs = os.path.join(args.catlas_prefix, 'contigs.fa.gz')

    assert args.output, 'must specify -o'
    outfp = args.output
    outname = args.output.name

    # load k-mer MPHF index
    kmer_idx = search_utils.load_kmer_index(args.catlas_prefix)

    # build hashes for all the query k-mers
    print('loading query kmers...')
    bf = khmer.Nodetable(args.ksize, 1, 1)

    x = set()
    n = 0

    query_kmers = set()
    for record in screed.open(args.query):
        query_kmers.update(bf.get_kmer_hashes(record.sequence))

    cdbg_match_counts = kmer_idx.get_match_counts(query_kmers)

    f_found = sum(cdbg_match_counts.values()) / len(query_kmers)
    print('...done loading & counting query k-mers in cDBG.')
    print('containment: {:.1f}%'.format(f_found * 100))

    print('loading catlas...', end=' ')
    catlas = os.path.join(args.catlas_prefix, 'catlas.csv')
    domfile = os.path.join(args.catlas_prefix, 'first_doms.txt')
    top_node_id, dag, dag_up, dag_levels, catlas_to_cdbg = search_utils.load_dag(
        catlas)
    layer1_to_cdbg = search_utils.load_layer1_to_cdbg(catlas_to_cdbg, domfile)
    print('done.')

    cdbg_shadow = set()
    catlas_nodes = set()
    for layer1_node, cdbg_list in layer1_to_cdbg.items():
        for cdbg_node in cdbg_list:
            if cdbg_match_counts.get(cdbg_node, 0):
                # keep catlas node & all associated cdbg nodes
                catlas_nodes.add(layer1_node)
                cdbg_shadow.update(cdbg_list)
                break

    total_bp = 0
    total_seqs = 0

    # do some nodelist post-processing & output a response curve
    response_curve_filename = os.path.basename(outname) + '.response.txt'
    search_utils.output_response_curve(response_curve_filename,
                                       cdbg_match_counts, kmer_idx,
                                       layer1_to_cdbg)

    print('extracting contigs to {}.'.format(outname))
    for n, record in enumerate(screed.open(contigs)):
        if n % 10000 == 0:
            offset_f = total_seqs / len(cdbg_shadow)
            print('...at n {} ({:.1f}% of shadow)'.format(
                total_seqs, offset_f * 100),
                  end='\r')

        contig_id = int(record.name)
        if contig_id not in cdbg_shadow:
            continue

        outfp.write('>{}\n{}\n'.format(record.name, record.sequence))

        total_bp += len(record.sequence)
        total_seqs += 1

    print('')
    print('fetched {} contigs, {} bp matching node list.'.format(
        total_seqs, total_bp))

    sys.exit(0)
Ejemplo n.º 5
0
def main():
    """Entry point for RWS.

    return (int): exit code (0 on success, 1 on error)

    """
    parser = argparse.ArgumentParser(description="Ranking for CMS.")
    parser.add_argument("--config",
                        type=argparse.FileType("rt"),
                        help="override config file")
    parser.add_argument("-d",
                        "--drop",
                        action="store_true",
                        help="drop the data already stored")
    args = parser.parse_args()

    config.load(args.config)

    if args.drop:
        print("Are you sure you want to delete directory %s? [y/N]" %
              config.lib_dir,
              end='')
        ans = raw_input().lower()
        if ans in ['y', 'yes']:
            print("Removing directory %s." % config.lib_dir)
            shutil.rmtree(config.lib_dir)
        else:
            print("Not removing directory %s." % config.lib_dir)
        return 1

    Contest.store.load_from_disk()
    Task.store.load_from_disk()
    Team.store.load_from_disk()
    User.store.load_from_disk()
    Submission.store.load_from_disk()
    Subchange.store.load_from_disk()

    Scoring.store.init_store()

    toplevel_handler = RoutingHandler(
        DataWatcher(),
        ImageHandler(os.path.join(config.lib_dir, '%(name)s'),
                     os.path.join(config.web_dir, 'img', 'logo.png')))

    wsgi_app = SharedDataMiddleware(
        DispatcherMiddleware(
            toplevel_handler, {
                '/contests':
                StoreHandler(Contest.store),
                '/tasks':
                StoreHandler(Task.store),
                '/teams':
                StoreHandler(Team.store),
                '/users':
                StoreHandler(User.store),
                '/submissions':
                StoreHandler(Submission.store),
                '/subchanges':
                StoreHandler(Subchange.store),
                '/faces':
                ImageHandler(os.path.join(config.lib_dir, 'faces', '%(name)s'),
                             os.path.join(config.web_dir, 'img', 'face.png')),
                '/flags':
                ImageHandler(os.path.join(config.lib_dir, 'flags', '%(name)s'),
                             os.path.join(config.web_dir, 'img', 'flag.png')),
            }), {'/': config.web_dir})

    servers = list()
    if config.http_port is not None:
        http_server = WSGIServer((config.bind_address, config.http_port),
                                 wsgi_app)
        servers.append(http_server)
    if config.https_port is not None:
        https_server = WSGIServer((config.bind_address, config.https_port),
                                  wsgi_app,
                                  certfile=config.https_certfile,
                                  keyfile=config.https_keyfile)
        servers.append(https_server)

    try:
        gevent.joinall(list(gevent.spawn(s.serve_forever) for s in servers))
    except KeyboardInterrupt:
        pass
    finally:
        gevent.joinall(list(gevent.spawn(s.stop) for s in servers))
    return 0
def main():
    parser = argparse.ArgumentParser(
        description="Combine XCCDF or datastream and a tailoring file to form "
                    "just one file that contains all the profiles."
    )
    parser.add_argument(
        "SCAP_INPUT", type=argparse.FileType("r"),
        help="XCCDF or Source DataStream"
    )
    parser.add_argument(
        "TAILORING_FILE", type=argparse.FileType("r"),
        help="XCCDF 1.2 Tailoring file to insert"
    )
    parser.add_argument(
        "--output", type=argparse.FileType("w"), required=False,
        default=sys.stdout,
        help="Resulting XCCDF or Source DataStream"
    )

    args = parser.parse_args()

    input_tree = ElementTree.ElementTree()
    input_tree.parse(args.SCAP_INPUT)
    input_root = input_tree.getroot()

    tailoring_tree = ElementTree.ElementTree()
    tailoring_tree.parse(args.TAILORING_FILE)
    tailoring_root = tailoring_tree.getroot()

    benchmarks = list(input_root.findall(".//{%s}Benchmark" % (XCCDF12_NS)))

    if len(benchmarks) == 0:
        sys.stderr.write(
            "There is no Benchmark elements in input file %s \n" % (args.SCAP_INPUT.name))
        sys.exit(1)

    t_profiles = tailoring_root.findall(".//{%s}Profile" % (XCCDF12_NS))

    if len(t_profiles) == 0:
        sys.stderr.write(
            "There is no Profile elements in the tailored file %s \n" % (args.TAILORING_FILE.name))
        sys.exit(1)

    # As far as my tests goes you cannot have a tailored file that has
    # profiles belongs to different benchmark checklists
    # we just need to figure out which benchmark tailored profiles belongs to

    b_index = -1
    extended_profile = t_profiles[0].get("extends")
    benchmark = None
    profile_insert_point = None

    for i, bench in enumerate(benchmarks):
        if b_index != -1:
            break
        for profile in bench.findall("./{%s}Profile" % (XCCDF12_NS)):
            if b_index == -1 and profile.get("id") == extended_profile:
                b_index = i
                benchmark = benchmarks[i]
            profile_insert_point = profile

    for profile_to_add in t_profiles:
        index = list(benchmark).index(profile_insert_point)
        benchmark.insert(index + 1, profile_to_add)
        profile_insert_point = profile_to_add

    input_tree.write(args.output)
from arboreto.utils import load_tf_names
from arboreto.algo import genie3, grnboost2, _prepare_input
from arboreto.core import SGBM_KWARGS, RF_KWARGS, EARLY_STOP_WINDOW_LENGTH
from arboreto.core import to_tf_matrix, target_gene_indices, infer_partial_network

from pyscenic.cli.utils import load_exp_matrix

################################################################################
################################################################################

parser_grn = argparse.ArgumentParser(
    description='Run Arboreto using a multiprocessing pool')

parser_grn.add_argument(
    'expression_mtx_fname',
    type=argparse.FileType('r'),
    help=
    'The name of the file that contains the expression matrix for the single cell experiment.'
    ' Two file formats are supported: csv (rows=cells x columns=genes) or loom (rows=genes x columns=cells).'
)
parser_grn.add_argument(
    'tfs_fname',
    type=argparse.FileType('r'),
    help=
    'The name of the file that contains the list of transcription factors (TXT; one TF per line).'
)
parser_grn.add_argument(
    '-m',
    '--method',
    choices=['genie3', 'grnboost2'],
    default='grnboost2',
Ejemplo n.º 8
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from __future__ import print_function

import sys
import json
import jsonpatch
import argparse


parser = argparse.ArgumentParser(description='Diff two JSON files')
parser.add_argument('FILE1', type=argparse.FileType('r'))
parser.add_argument('FILE2', type=argparse.FileType('r'))
parser.add_argument('--indent', type=int, default=None,
                    help='Indent output by n spaces')
parser.add_argument('-v', '--version', action='version',
                    version='%(prog)s ' + jsonpatch.__version__)


def main():
  try:
    diff_files()
  except KeyboardInterrupt:
    sys.exit(1)


def diff_files():
  """ Diffs two JSON files and prints a patch """
  args = parser.parse_args()
  doc1 = json.load(args.FILE1)
Ejemplo n.º 9
0
def main():
    """Parses arguments and starts console."""
    parser = argparse.ArgumentParser()
    parser.add_argument("--config-file",
                        default=None,
                        type=argparse.FileType('r'),
                        help="The configuration file in JSON format")
    parser.add_argument("--poll",
                        action="store_true",
                        help="Disable console and start host controller "
                        "threads polling TFC.")
    parser.add_argument("--use-tfc",
                        action="store_true",
                        help="Enable TFC (TradeFed Cluster).")
    parser.add_argument("--vti",
                        default=None,
                        help="The base address of VTI endpoint APIs")
    parser.add_argument("--script",
                        default=None,
                        help="The path to a script file in .py format")
    parser.add_argument("--serial",
                        default=None,
                        help="The default serial numbers for flashing and "
                        "testing in the console. Multiple serial numbers "
                        "are separated by comma.")
    parser.add_argument("--loop",
                        default=None,
                        metavar="INTERVAL",
                        type=_ParseInterval,
                        help="The interval of repeating the script. "
                        "The format is a float followed by unit which is "
                        "one of 'm' (minute), 'h' (hour), and 'd' (day). "
                        "If this option is unspecified, the script will "
                        "be processed once.")
    parser.add_argument("--console",
                        action="store_true",
                        help="Whether to start a console after processing "
                        "a script.")
    args = parser.parse_args()
    if args.config_file:
        config_json = json.load(args.config_file)
    else:
        config_json = {}
        config_json["log_level"] = "DEBUG"
        config_json["hosts"] = []
        host_config = {}
        host_config["cluster_ids"] = ["local-cluster-1", "local-cluster-2"]
        host_config["lease_interval_sec"] = 30
        config_json["hosts"].append(host_config)

    env_vars = env_utils.SaveAndClearEnvVars([_ANDROID_BUILD_TOP])

    root_logger = logging.getLogger()
    root_logger.setLevel(getattr(logging, config_json["log_level"]))

    if args.vti:
        vti_endpoint = vti_endpoint_client.VtiEndpointClient(args.vti)
    else:
        vti_endpoint = None

    tfc = None
    if args.use_tfc:
        if args.config_file:
            tfc = tfc_client.CreateTfcClient(
                config_json["tfc_api_root"],
                config_json["service_key_json_path"],
                api_name=config_json["tfc_api_name"],
                api_version=config_json["tfc_api_version"],
                scopes=config_json["tfc_scopes"])
        else:
            print("WARN: If --use_tfc is set, --config_file argument value "
                  "must be provided. Starting without TFC.")

    pab = build_provider_pab.BuildProviderPAB()

    hosts = []
    for host_config in config_json["hosts"]:
        cluster_ids = host_config["cluster_ids"]
        # If host name is not specified, use local host.
        hostname = host_config.get("hostname", socket.gethostname())
        port = host_config.get("port", remote_client.DEFAULT_PORT)
        cluster_ids = host_config["cluster_ids"]
        remote = remote_client.RemoteClient(hostname, port)
        host = tfc_host_controller.HostController(remote, tfc, hostname,
                                                  cluster_ids)
        hosts.append(host)
        if args.poll:
            lease_interval_sec = host_config["lease_interval_sec"]
            host_thread = threading.Thread(target=host.Run,
                                           args=(lease_interval_sec, ))
            host_thread.daemon = True
            host_thread.start()

    if args.poll:
        while True:
            sys.stdin.readline()
    else:
        main_console = console.Console(vti_endpoint,
                                       tfc,
                                       pab,
                                       hosts,
                                       vti_address=args.vti)
        main_console.StartJobThreadAndProcessPool()
        try:
            if args.serial:
                main_console.SetSerials(args.serial.split(","))
            if args.script:
                if args.loop is None:
                    main_console.ProcessScript(args.script)
                else:
                    _ScriptLoop(main_console, args.script, args.loop)

                if args.console:
                    main_console.cmdloop()
            else:  # if not script, the default is console mode.
                main_console.cmdloop()
        finally:
            main_console.TearDown()

    env_utils.RestoreEnvVars(env_vars)
Ejemplo n.º 10
0
Archivo: wwz.py Proyecto: yinzhq/WWZ
    The above command will use the settings in args.txt but will
    use c=0.0125 instead of c=0.001

    Comments and blank lines are NOT allowed in argument files.

    Import this script via Python to use it as a module, rather than
    a standalone script. (import wwz)

    """

    parser = argparse.ArgumentParser(prog='wwz.py', \
                 formatter_class=argparse.RawDescriptionHelpFormatter,\
                 fromfile_prefix_chars="@", description=description)

    parser.add_argument("-f", "--file", type=argparse.FileType("r"),\
                        default=sys.stdin, required=True,\
                        help="the Input File, Raw Lightcurve")
    parser.add_argument("-o", "--output", type=argparse.FileType('w'),\
                        default=sys.stdout, required=True,\
                        help="the Output File Name")
    parser.add_argument("-l", "--freq-low", type=float, required=True,\
                        help="the Low Frequency Value")
    parser.add_argument("-hi", "--freq-high", type=float, required=True,\
                        help="the High Frequency Value")
    parser.add_argument("-d", "--freq-step", type=float, required=True,\
                        help="the dF value, incremental step for Frequency")
    parser.add_argument("-c", "--dcon", type=float, required=True,\
                        help="the C constant for the Window Function")
    parser.add_argument("-g", "--gnuplot-compatible", action="store_true",\
                        default=False, help="the Output file is GNUPlot \
Ejemplo n.º 11
0
def create_parser():
    """Returns argument parser """

    parser = argparse.ArgumentParser(
        description="Extract structured data from PDF files and save to CSV or JSON."
    )

    parser.add_argument(
        "--input-reader",
        choices=input_mapping.keys(),
        default="pdftotext",
        help="Choose text extraction function. Default: pdftotext",
    )

    parser.add_argument(
        "--output-format",
        choices=output_mapping.keys(),
        default="none",
        help="Choose output format. Default: none",
    )

    parser.add_argument(
        "--output-date-format",
        dest="output_date_format",
        default="%Y-%m-%d",
        help="Choose output date format. Default: %%Y-%%m-%%d (ISO 8601 Date)",
    )

    parser.add_argument(
        "--output-name",
        "-o",
        dest="output_name",
        default="invoices-output",
        help="Custom name for output file. Extension is added based on chosen format.",
    )

    parser.add_argument(
        "--debug", dest="debug", action="store_true", help="Enable debug information."
    )

    parser.add_argument(
        "--copy",
        "-c",
        dest="copy",
        help="Copy and rename processed PDFs to specified folder.",
    )

    parser.add_argument(
        "--move",
        "-m",
        dest="move",
        help="Move and rename processed PDFs to specified folder.",
    )

    parser.add_argument(
        "--filename-format",
        dest="filename",
        default="{date} {invoice_number} {desc}.pdf",
        help="Filename format to use when moving or copying processed PDFs."
        'Default: "{date} {invoice_number} {desc}.pdf"',
    )

    parser.add_argument(
        "--template-folder",
        "-t",
        dest="template_folder",
        help="Folder containing invoice templates in yml file. Always adds built-in templates.",
    )

    parser.add_argument(
        "--exclude-built-in-templates",
        dest="exclude_built_in_templates",
        default=False,
        help="Ignore built-in templates.",
        action="store_true",
    )

    parser.add_argument(
        "input_files",
        type=argparse.FileType("r"),
        nargs="+",
        help="File or directory to analyze.",
    )

    return parser
Ejemplo n.º 12
0
def create_parser():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter,
        description="learn BPE-based word segmentation")

    parser.add_argument('--input',
                        '-i',
                        type=argparse.FileType('r'),
                        default=sys.stdin,
                        metavar='PATH',
                        help="Input file (default: standard input).")
    parser.add_argument('--codes',
                        '-c',
                        type=argparse.FileType('r'),
                        metavar='PATH',
                        required=True,
                        help="File with BPE codes (created by learn_bpe.py).")
    parser.add_argument(
        '--merges',
        '-m',
        type=int,
        default=-1,
        metavar='INT',
        help="Use this many BPE operations (<= number of learned symbols)" +
        "default: Apply all the learned merge operations")
    parser.add_argument('--output',
                        '-o',
                        type=argparse.FileType('w'),
                        default=sys.stdout,
                        metavar='PATH',
                        help="Output file (default: standard output)")
    parser.add_argument(
        '--separator',
        '-s',
        type=str,
        default='@@',
        metavar='STR',
        help=
        "Separator between non-final subword units (default: '%(default)s'))")
    parser.add_argument(
        '--vocabulary',
        type=argparse.FileType('r'),
        default=None,
        metavar="PATH",
        help=
        "Vocabulary file (built with get_vocab.py). If provided, this script reverts any merge operations that produce an OOV."
    )
    parser.add_argument(
        '--vocabulary-threshold',
        type=int,
        default=None,
        metavar="INT",
        help=
        "Vocabulary threshold. If vocabulary is provided, any word with frequency < threshold will be treated as OOV"
    )
    parser.add_argument(
        '--glossaries',
        type=str,
        nargs='+',
        default=None,
        metavar="STR",
        help=
        "Glossaries. The strings provided in glossaries will not be affected" +
        "by the BPE (i.e. they will neither be broken into subwords, nor concatenated with other subwords"
    )

    return parser
Ejemplo n.º 13
0
def main():

    parser = argparse.ArgumentParser(description="Process alignment files.")

    parser.add_argument(
        "-i",
        "--input",
        type=argparse.FileType("r"),
        help="Merged (interlaced) alignment file",
        required=True,
    )

    parser.add_argument("-f",
                        "--reference",
                        help="Reference fasta file",
                        nargs="+")

    parser.add_argument("-o",
                        "--output",
                        help="Output directory",
                        required=True)

    parser.add_argument(
        "-q",
        "--map-quality",
        type=int,
        help="Minimum mapping quality threshold",
        default=DEFAULT_PARAMETERS["mapq_threshold"],
    )

    parser.add_argument(
        "-c",
        "--chunk-size",
        type=int,
        help="Standard chunk size for nodes in the network",
        default=DEFAULT_PARAMETERS["chunk_size"],
    )

    parser.add_argument(
        "-s",
        "--size-chunk-threshold",
        type=int,
        help="Minimum size for tail ends to be integrated"
        " as chunks in the network",
        default=DEFAULT_PARAMETERS["size_chunk_threshold"],
    )

    parser.add_argument(
        "-a",
        "--self-contacts",
        action="store_true",
        help="Do not discard self contacts",
        default=DEFAULT_PARAMETERS["self_contacts"],
    )

    parser.add_argument(
        "-n",
        "--normalize",
        action="store_true",
        help="Normalize contacts by the geometric mean"
        " of both coverages of the chunks",
        default=DEFAULT_PARAMETERS["normalized"],
    )

    parser.add_argument(
        "-r",
        "--read-size",
        help="Read size",
        default=DEFAULT_PARAMETERS["read_size"],
    )

    parser.add_argument(
        "-F",
        "--fastq",
        help="Reconstruct FASTQ from "
        "reference and interleaved alignment file",
        nargs=2,
    )

    parser.add_argument(
        "-D",
        "--fastq-contig",
        help="Reconstruct FASTQ from "
        "contig data and interleaved alignment file",
        nargs=1,
    )

    parser.add_argument(
        "-m",
        "--mem",
        help="Save memory by only writing "
        " read names in FASTQ mode",
        action="store_true",
    )

    args = parser.parse_args()

    merged_file = args.input
    reference_file = args.reference
    output_dir = args.output
    fastq_files = args.fastq
    contig_data = args.fastq_contig
    save_memory = args.mem

    parameters = copy.deepcopy(DEFAULT_PARAMETERS)

    parameters["mapq_threshold"] = args.map_quality
    parameters["chunk_size"] = args.chunk_size
    parameters["read_size"] = args.read_size
    parameters["size_chunk_threshold"] = args.size_chunk_threshold
    parameters["self_contacts"] = args.self_contacts
    parameters["normalized"] = args.normalize

    if fastq_files:

        fastq_forward, fastq_reverse = fastq_files
        read_names = alignment_to_reads(merged_file, output_dir, parameters,
                                        save_memory, *reference_file)

        if save_memory:
            retrieve_reads_from_fastq(fastq_forward, fastq_reverse, read_names,
                                      output_dir)

    elif contig_data:
        retrieve_reads_contig_wise(merged_file, contig_data, output_dir)

    else:

        my_assembly, _ = reference_file

        alignment_to_contacts(
            sam_merged=merged_file,
            assembly=my_assembly,
            output_dir=output_dir,
            parameters=parameters,
        )
Ejemplo n.º 14
0
 def add_arguments(self, parser):
     parser.add_argument("-f", type=argparse.FileType(), required=True)
Ejemplo n.º 15
0
def main():
    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)-8s: %(message)s')
    parser = argparse.ArgumentParser(description=__doc__.strip())
    parser.add_argument('filename', help='Beancount input file')

    parser.add_argument('-C',
                        '--currency',
                        action='store',
                        help=("Override the default output currency "
                              "(default is first operating currency)"))

    parser.add_argument('-n', '--dry-run', action='store_true')

    for shortname, longname in [('-c', 'commodities'), ('-a', 'accounts'),
                                ('-p', 'prices'), ('-r', 'rates'),
                                ('-m', 'postings')]:
        parser.add_argument(
            shortname,
            '--output_{}'.format(longname),
            type=argparse.FileType('w'),
            help="CSV filename to write out the {} table to.".format(longname))

    parser.add_argument(
        '-o',
        '--output',
        type=argparse.FileType('w'),
        help="CSV filename to write out the final joined table to.")

    args = parser.parse_args()

    # Load the file contents.
    entries, errors, options_map = loader.load_file(args.filename)
    validate_entries(entries)

    # Initialize main output currency.
    main_currency = args.currency or options_map['operating_currency'][0]
    logging.info("Operating currency: %s", main_currency)

    # Get the map of commodities to their meta tags.
    commodities_table = get_commodities_table(
        entries, ['export', 'assetcls', 'strategy', 'issuer'])
    if args.output_commodities is not None:
        write_table(commodities_table, args.output_commodities)

    # Get the map of accounts to their meta tags.
    accounts_table, accounts_map = get_accounts_table(entries,
                                                      ['tax', 'liquid'])
    if args.output_accounts is not None:
        write_table(accounts_table, args.output_accounts)

    # Enumerate the list of assets.
    postings_table = get_postings_table(entries, options_map, accounts_map)
    if args.output_postings is not None:
        write_table(postings_table, args.output_postings)

    # Get the list of prices.
    prices_table = get_prices_table(entries, main_currency)
    if args.output_prices is not None:
        write_table(prices_table, args.output_prices)

    # Get the list of exchange rates.
    index = postings_table.header.index('cost_currency')
    currencies = set(row[index] for row in postings_table.rows)
    rates_table = get_rates_table(entries, currencies, main_currency)
    if args.output_rates is not None:
        write_table(rates_table, args.output_rates)

    # Join all the tables.
    joined_table = join(postings_table, (('currency', ), commodities_table),
                        (('account', ), accounts_table),
                        (('currency', 'cost_currency'), prices_table),
                        (('cost_currency', ), rates_table))

    # Reorder columns.
    # We do this in order to avoid having to change the spreadsheet when we add new columns.
    headers = list(joined_table.header)
    headers.remove('issuer')
    headers.append('issuer')
    final_table = reorder_columns(joined_table, headers)

    # Filter table.
    rows = [row for row in final_table.rows if row[7].lower() != 'ignore']
    table = Table(final_table.header, rows)

    if args.output is not None:
        table[0][0] += ' ({:%Y-%m-%d %H:%M})'.format(datetime.datetime.now())
        write_table(table, args.output)

    return 0
Ejemplo n.º 16
0
def parse_args(argv, out, err):
    parser = argparse.ArgumentParser(
        prog='python -m camkes.runner',
        description='instantiate templates based on a CAmkES specification')
    parser.add_argument('--quiet',
                        '-q',
                        help='No diagnostics.',
                        dest='verbosity',
                        default=1,
                        action='store_const',
                        const=0)
    parser.add_argument('--verbose',
                        '-v',
                        help='Verbose diagnostics.',
                        dest='verbosity',
                        action='store_const',
                        const=2)
    parser.add_argument('--debug',
                        '-D',
                        help='Extra verbose diagnostics.',
                        dest='verbosity',
                        action='store_const',
                        const=3)
    parser.add_argument('--outfile',
                        '-O',
                        help='Output to the given file.',
                        type=argparse.FileType('w'),
                        required=True,
                        action='append',
                        default=[])
    parser.add_argument(
        '--verification-base-name',
        type=str,
        help='Prefix to use when generating Isabelle theory files.')
    parser.add_argument('--item',
                        '-T',
                        help='AST entity to produce code for.',
                        required=True,
                        action='append',
                        default=[])
    parser.add_argument('--template',
                        help='template to use to produce code.',
                        required=True,
                        action='append',
                        default=[])
    parser.add_argument('--templates',
                        '-t',
                        help='Extra directories to '
                        'search for templates (before builtin templates).',
                        action='append',
                        default=[])
    parser.add_argument('--frpc-lock-elision',
                        action='store_true',
                        default=True,
                        help='Enable lock elision optimisation in seL4RPC '
                        'connector.')
    parser.add_argument('--fno-rpc-lock-elision',
                        action='store_false',
                        dest='frpc_lock_elision',
                        help='Disable lock elision optimisation in '
                        'seL4RPC connector.')
    parser.add_argument(
        '--fprovide-tcb-caps',
        action='store_true',
        default=True,
        help='Hand out TCB caps to components, allowing them to '
        'exit cleanly.')
    parser.add_argument('--fno-provide-tcb-caps',
                        action='store_false',
                        dest='fprovide_tcb_caps',
                        help='Do not hand out TCB caps, causing '
                        'components to fault on exiting.')
    parser.add_argument('--default-priority',
                        type=int,
                        default=254,
                        help='Default component thread priority.')
    parser.add_argument('--default-max-priority',
                        type=int,
                        default=254,
                        help='Default component thread maximum priority.')
    parser.add_argument('--default-affinity',
                        type=int,
                        default=0,
                        help='Default component thread affinity.')
    parser.add_argument(
        '--default-period',
        type=int,
        default=10000,
        help='Default component thread scheduling context period.')
    parser.add_argument(
        '--default-budget',
        type=int,
        default=10000,
        help='Default component thread scheduling context budget.')
    parser.add_argument(
        '--default-data',
        type=int,
        default=0,
        help='Default component thread scheduling context data.')
    parser.add_argument('--default-size_bits',
                        type=int,
                        default=8,
                        help='Default scheduling context size bits.')
    parser.add_argument('--default-stack-size',
                        type=int,
                        default=16384,
                        help='Default stack size of each thread.')
    parser.add_argument(
        '--largeframe',
        action='store_true',
        help='Use large frames (for non-DMA pools) when possible.')
    parser.add_argument('--architecture',
                        '--arch',
                        default='aarch32',
                        type=lambda x: type('')(x).lower(),
                        choices=('aarch32', 'arm_hyp', 'ia32', 'x86_64',
                                 'aarch64', 'riscv32', 'riscv64'),
                        help='Target architecture.')
    parser.add_argument('--makefile-dependencies',
                        '-MD',
                        type=argparse.FileType('w'),
                        help='Write Makefile dependency rule to '
                        'FILE')
    parser.add_argument(
        '--debug-fault-handlers',
        action='store_true',
        help='Provide fault handlers to decode cap and VM faults for '
        'debugging purposes.')
    parser.add_argument('--largeframe-dma',
                        action='store_true',
                        help='Use large frames for DMA pools when possible.')
    parser.add_argument('--realtime',
                        action='store_true',
                        help='Target realtime seL4.')
    parser.add_argument(
        '--object-sizes',
        type=argparse.FileType('r'),
        help='YAML file specifying the object sizes for any seL4 objects '
        'used in this invocation of the runner.')

    object_state_group = parser.add_mutually_exclusive_group()
    object_state_group.add_argument(
        '--load-object-state',
        type=argparse.FileType('rb'),
        help='Load previously-generated cap and object state.')
    object_state_group.add_argument(
        '--save-object-state',
        type=argparse.FileType('wb'),
        help='Save generated cap and object state to this file.')

    # To get the AST, there should be either a pickled AST or a file to parse
    parser.add_argument('--load-ast',
                        type=argparse.FileType('rb'),
                        help='Load specification AST from this file.',
                        required=True)

    # Juggle the standard streams either side of parsing command-line arguments
    # because argparse provides no mechanism to control this.
    old_out = sys.stdout
    old_err = sys.stderr
    sys.stdout = out
    sys.stderr = err
    options = parser.parse_args(argv[1:])

    sys.stdout = old_out
    sys.stderr = old_err

    # Check that verification_base_name would be a valid identifer before
    # our templates try to use it
    if options.verification_base_name is not None:
        if not re.match(r'[a-zA-Z][a-zA-Z0-9_]*$',
                        options.verification_base_name):
            parser.error(
                'Not a valid identifer for --verification-base-name: %r' %
                options.verification_base_name)

    return options
Ejemplo n.º 17
0
def _main():
    # Our binary requirements go here
    req = reqs.Req()
    req.require('git')
    req.coccinelle('1.0.7')
    if not req.reqs_match():
        sys.exit(1)

    # set up and parse arguments
    parser = argparse.ArgumentParser(description='generate backport tree')
    parser.add_argument('kerneldir', metavar='<kernel tree>', type=str,
                        help='Kernel tree to copy drivers from')
    parser.add_argument('outdir', metavar='<output directory>', type=str,
                        help='Directory to write the generated tree to')
    parser.add_argument('--copy-list', metavar='<listfile>', type=argparse.FileType('r'),
                        default='copy-list',
                        help='File containing list of files/directories to copy, default "copy-list"')
    parser.add_argument('--git-revision', metavar='<revision>', type=str,
                        help='git commit revision (see gitrevisions(7)) to take objects from.' +
                             'If this is specified, the kernel tree is used as git object storage ' +
                             'and we use git ls-tree to get the files.')
    parser.add_argument('--clean', const=True, default=False, action="store_const",
                        help='Clean output directory instead of erroring if it isn\'t empty')
    parser.add_argument('--list-files', const=True, default=False, action="store_const",
                        help='Only list files to copy')
    parser.add_argument('--integrate', const=True, default=False, action="store_const",
                        help='Integrate a future backported kernel solution into ' +
                             'an older kernel tree source directory.')
    parser.add_argument('--refresh', const=True, default=False, action="store_const",
                        help='Refresh patches as they are applied, the source dir will be modified!')
    parser.add_argument('--base-name', metavar='<name>', type=str, default='Linux',
                        help='name of base tree, default just "Linux"')
    parser.add_argument('--gitdebug', '--git-debug', const=True, default=False, action="store_const",
                        help='Use git, in the output tree, to debug the various transformation steps ' +
                             'that the tree generation makes (apply patches, ...)')
    parser.add_argument('--verbose', const=True, default=False, action="store_const",
                        help='Print more verbose information')
    parser.add_argument('--extra-driver', nargs=2, metavar=('<source dir>', '<copy-list>'), type=str,
                        action='append', default=[], help='Extra driver directory/copy-list.')
    parser.add_argument('--kup', const=True, default=False, action="store_const",
                        help='For maintainers: upload a release to kernel.org')
    parser.add_argument('--kup-test', const=True, default=False, action="store_const",
                        help='For maintainers: do all the work as if you were about to ' +
                             'upload to kernel.org but do not do the final `kup put` ' +
                             'and also do not run any `kup mkdir` commands. This will ' +
                             'however run `kup ls` on the target paths so ' +
                             'at the very least we test your kup configuration. ' +
                             'If this is your first time uploading use this first!')
    parser.add_argument('--test-cocci', metavar='<sp_file>', type=str, default=None,
                        help='Only use the cocci file passed for Coccinelle, don\'t do anything else, ' +
                             'also creates a git repo on the target directory for easy inspection ' +
                             'of changes done by Coccinelle.')
    parser.add_argument('--profile-cocci', metavar='<sp_file>', type=str, default=None,
                        help='Only use the cocci file passed and pass --profile  to Coccinelle, ' +
                             'also creates a git repo on the target directory for easy inspection ' +
                             'of changes done by Coccinelle.')
    args = parser.parse_args()

    # When building a package we use CPTCFG as we can rely on the
    # fact that kconfig treats CONFIG_ as an environment variable
    # requring less changes on code. For kernel integration we use
    # the longer CONFIG_BACKPORT given that we'll be sticking to
    # the kernel symbol namespace, to address that we do a final
    # search / replace. Technically its possible to rely on the
    # same prefix for packaging as with kernel integration but
    # there are already some users of the CPTCFG prefix.
    bpid = None
    if args.integrate:
        bpid = Bp_Identity(integrate = args.integrate,
                           kconfig_prefix = 'CONFIG_',
                           project_prefix = 'BACKPORT_',
                           project_dir = args.outdir,
                           target_dir = os.path.join(args.outdir, 'backports/'),
                           target_dir_name = 'backports/',
                           kconfig_source_var = '$BACKPORT_DIR',
                           )
    else:
        bpid = Bp_Identity(integrate = args.integrate,
                           kconfig_prefix = 'CPTCFG_',
                           project_prefix = '',
                           project_dir = args.outdir,
                           target_dir = args.outdir,
                           target_dir_name = '',
                           kconfig_source_var = '$BACKPORT_DIR',
                           )

    def logwrite(msg):
        sys.stdout.write(msg)
        sys.stdout.write('\n')
        sys.stdout.flush()

    retv = process(args.kerneldir, args.copy_list,
                   git_revision=args.git_revision,
                   bpid=bpid,
                   clean=args.clean,
                   refresh=args.refresh, base_name=args.base_name,
                   gitdebug=args.gitdebug, verbose=args.verbose,
                   extra_driver=args.extra_driver,
                   kup=args.kup,
                   kup_test=args.kup_test,
                   test_cocci=args.test_cocci,
                   profile_cocci=args.profile_cocci,
                   logwrite=logwrite,
                   list_files=args.list_files)
    if args.list_files:
        print('\n'.join(retv))
    else:
        return retv
Ejemplo n.º 18
0
#
# Cameo Image header geneator, used by some D-Link DGS-1210 switches
# and APRESIA ApresiaLightGS series
#
import argparse
import pathlib
import socket
import struct

MODEL_LEN = 20
SIGNATURE_LEN = 16
LINUXLOAD_LEN = 10
BUFSIZE = 4096

parser = argparse.ArgumentParser(description='Generate Cameo firmware header.')
parser.add_argument('source_file', type=argparse.FileType('rb'))
parser.add_argument('dest_file', type=argparse.FileType('wb'))
parser.add_argument('model')
parser.add_argument('signature')
parser.add_argument('partition',
                    type=int,
                    choices=range(0, 10),
                    metavar="partition=[0-9]",
                    help="partition id")
parser.add_argument('customer_signature',
                    type=int,
                    choices=range(0, 10),
                    metavar="customer_signature=[0-9]",
                    help="customer signature")
parser.add_argument('board_version',
                    type=int,
Ejemplo n.º 19
0
            if isinstance(inDict[key], dict):
                print_dict_keys(inDict[key], recurseLevel - 1, indentLevel + 1,
                                showAll)
    print("")


#### End of function print_dict_keys

if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description="Utility to show the keys in" +
        " a JSON file up to a given depth")
    # Add a file to read input from
    parser.add_argument("infile",
                        help="JSON file to read information from",
                        type=argparse.FileType('r'))

    parser.add_argument("--level",
                        help="Maximum level to iterate to",
                        nargs="?",
                        type=int,
                        default=1)
    parser.add_argument("--all",
                        help="Flags whether to show all keys in the JSON file",
                        action='store_true',
                        default=False)

    # Parse the arguments
    args = parser.parse_args()

    # Read in the JSON as a dictionary
Ejemplo n.º 20
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument('--file',
                        type=argparse.FileType('r'),
                        help='input video file',
                        required=True)
    parser.add_argument('--frames_limit',
                        default=200000,
                        type=int,
                        help='limit of frames to process > 2')
    parser.add_argument('--output_html',
                        default='report.html',
                        type=argparse.FileType('w'),
                        help='path to output .html file with report')
    parser.add_argument(
        '--output_video',
        default=None,
        type=argparse.FileType('w'),
        help='path to output .avi file with visualisation of bounding boxes')
    parser.add_argument(
        '--output_csv',
        default='recognized.csv',
        type=argparse.FileType('w'),
        help='path to output table with information about all detected faces')
    parser.add_argument('--caffe_models_path',
                        default='/root/caffe/models',
                        type=str,
                        help='path to directory with pre-trained caffe models')
    parser.add_argument('--gpu',
                        action='store_true',
                        help='switch for gpu computation')

    args = parser.parse_args()

    if args.frames_limit < 3:
        raise argparse.ArgumentTypeError('minimum frames_limit is 3')

    caffe_models_path = os.environ.get(
        'CAFFE_MODELS_PATH') or args.caffe_models_path

    if args.gpu:
        caffe.set_mode_gpu()
    else:
        caffe.set_mode_cpu()

    # create temporary directory in the current directory where cropped faces will be stored
    with temporary_directory() as tmp_dir:

        print('Extracting people')
        extracted_faces = extract_people.extract_faces(
            args.file.name,
            frames_limit=args.frames_limit,
            tmp_dir=tmp_dir,
            detection_step=DETECTION_STEP,
            caffe_models_path=caffe_models_path,
        )

        print('Extracting statistics')
        recognized_faces_df = recognize_people.recognize_faces(
            detected_faces=extracted_faces,
            tmp_dir=tmp_dir,
            frames_limit=args.frames_limit,
            caffe_models_path=caffe_models_path,
            recognition_step=RECOGNITION_STEP,
        )

        print('Generating html')
        gen_html(args.output_html, recognized_faces_df)

        if args.output_csv is not None:
            recognized_faces_df.to_csv(args.output_csv)

        if args.output_video is not None:
            print('Visualizing')
            visualize(
                people_df=recognized_faces_df,
                input_videofile=args.file.name,
                output_videofile=args.output_video.name,
                frames_limit=args.frames_limit,
            )
Ejemplo n.º 21
0
        elif a1[i] is not None and a2[i] is None:
            cr.set_source_rgba(1.0, 0.0, 0.0, 1.0)
            a1_x, a1_y = mm.rev_geocode((a1[i].longitude, a1[i].latitude))
            cr.arc(a1_x, a1_y, p_radius, 0, 2 * math.pi)
            cr.fill()
        elif a1[i] is None and a2[i] is not None:
            cr.set_source_rgba(1.0, 0.5, 0.0, 1.0)
            a2_x, a2_y = mm.rev_geocode((a2[i].longitude, a2[i].latitude))
            cr.arc(a2_x, a2_y, p_radius, 0, 2 * math.pi)
            cr.fill()
    return surface


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('gpx_file1', type=argparse.FileType('r'))
    parser.add_argument('gpx_file2', type=argparse.FileType('r'))
    parser.add_argument('-c',
                        '--cutoff',
                        type=int,
                        default=10,
                        help="cutoff distance in meters for similar points")
    parser.add_argument('-d', '--debug', action='store_true')
    parser.add_argument('-e',
                        '--even',
                        type=int,
                        help="evenly distribute points in meters")
    parser.add_argument('-o',
                        '--output-file',
                        default="alignment.png",
                        help="output filename")
Ejemplo n.º 22
0
def main():
    envs = {
        'COMPOSE_FILE': 'docker-compose.yml',
        'COMPOSE_HTTP_TIMEOUT': '60',
        'COMPOSE_TLS_VERSION': 'TLSv1'
    }
    env_path = os.path.join(os.getcwd(), '.env')

    if os.path.isfile(env_path):
        with open(env_path) as env_file:
            envs.update(
                dict(
                    map(lambda line: line.strip().split('=', 1),
                        (line for line in env_file
                         if not line.startswith('#') and line.strip()))))

    map(lambda e: os.environ.update({e[0]: e[1]}),
        (e for e in envs.items() if not e[0] in os.environ))

    parser = argparse.ArgumentParser(
        formatter_class=lambda prog: argparse.HelpFormatter(
            prog, max_help_position=50, width=120))
    parser.add_argument(
        '-f',
        '--file',
        type=argparse.FileType(),
        help='Specify an alternate compose file (default: docker-compose.yml)',
        default=[],
        action='append')
    parser.add_argument(
        '-p',
        '--project-name',
        help='Specify an alternate project name (default: directory name)',
        default=os.environ.get('COMPOSE_PROJECT_NAME'))
    parser.add_argument('--dry-run', action='store_true')
    subparsers = parser.add_subparsers(title='Command')
    parser.add_argument('_service',
                        metavar='service',
                        nargs='*',
                        help='List of services to run the command for')

    services_parser = argparse.ArgumentParser(add_help=False)
    services_parser.add_argument(
        'service', nargs='*', help='List of services to run the command for')

    pull_parser = subparsers.add_parser('pull',
                                        help='Pull service images',
                                        add_help=False,
                                        parents=[services_parser])
    pull_parser.set_defaults(command='pull')

    rm_parser = subparsers.add_parser('rm',
                                      help='Stop and remove services',
                                      add_help=False,
                                      parents=[services_parser])
    rm_parser.set_defaults(command='rm')
    rm_parser.add_argument('-f',
                           help='docker-compose compatibility; ignored',
                           action='store_true')

    start_parser = subparsers.add_parser('start',
                                         help='Start services',
                                         add_help=False,
                                         parents=[services_parser])
    start_parser.set_defaults(command='start')

    stop_parser = subparsers.add_parser('stop',
                                        help='Stop services',
                                        add_help=False,
                                        parents=[services_parser])
    stop_parser.set_defaults(command='stop')

    up_parser = subparsers.add_parser('up',
                                      help='Create and start services',
                                      add_help=False,
                                      parents=[services_parser])
    up_parser.set_defaults(command='up')
    up_parser.add_argument('-d',
                           help='docker-compose compatibility; ignored',
                           action='store_true')

    convert_parser = subparsers.add_parser(
        'convert',
        help='Convert services to Kubernetes format',
        add_help=False,
        parents=[services_parser])
    convert_parser.set_defaults(command='convert')

    args = parser.parse_args(sys.argv[1:])

    if len(args.file) == 0:
        try:
            args.file = map(lambda f: open(f),
                            os.environ['COMPOSE_FILE'].split(':'))
        except IOError as e:
            print(e)
            parser.print_help()
            sys.exit(1)

    global debug
    debug = args.dry_run

    compose_base_dir = os.path.dirname(os.path.abspath(args.file[0].name))

    if args.project_name is None:
        args.project_name = os.path.basename(compose_base_dir)

    # Decode and merge the compose files
    compose_dicts = map(lambda f: yaml.load(f, yodl.OrderedDictYAMLLoader),
                        args.file)
    merged_compose = reduce(merge, compose_dicts)

    docker_compose = DockerCompose(merged_compose, args.project_name,
                                   compose_base_dir + '/', args.service)
    getattr(docker_compose, args.command)()
Ejemplo n.º 23
0
def make_parser(prog: str = 'gensound'):
    parser = argparse.ArgumentParser(
        prog=prog,
        usage='{} COMMAND [options...]'.format(prog),
        description='Command line interface of PyGenSound.',
        epilog='\n'.join([
            'EXAMPLE: ',
            '  $ gensound sine 440 -d 0.1 | gensound fadeout -o nhk.wav',
            '  $ gensound silence -d 0.9 | gensound concat nhk.wav -',
            ('  $ gensound sine 880 -d 2.0 | gensound fadeout | gensound'
             + 'concat nhk.wav -'),
        ]),
        formatter_class=argparse.RawDescriptionHelpFormatter,
    )
    subparsers = parser.add_subparsers(title='commands', metavar='COMMAND')

    sine = subparsers.add_parser('sine',
                                 help='Generate sine wave sound.',
                                 description='Generate sine wave sound.')
    _setup_gencommand(sine)
    sine.set_defaults(handler=command_sine)

    sawtooth = subparsers.add_parser(
        'sawtooth',
        help='Generate sawtooth wave sound.',
        description='Generate sawtooth wave sound.',
    )
    _setup_gencommand(sawtooth)
    sawtooth.set_defaults(handler=command_sawtooth)

    square = subparsers.add_parser(
        'square',
        help='Generate square wave sound.',
        description='Generate square wave sound.',
    )
    _setup_gencommand(square)
    square.set_defaults(handler=command_square)

    silence = subparsers.add_parser('silence',
                                    help='Generate cilence sound.',
                                    description='Generate cilence sound.')
    _setup_gencommand(silence, has_frequency=False, has_volume=False)
    silence.set_defaults(handler=command_silence)

    noise = subparsers.add_parser('noise',
                                  help='Generate white noise sound.',
                                  description='Generate white noise sound.')
    _setup_gencommand(noise, has_frequency=False)
    noise.set_defaults(handler=command_noise)

    overlay = subparsers.add_parser('overlay',
                                    help='Overlay some sounds.',
                                    description='Overlay some sounds.')
    _setup_joincommand(overlay, 'overlay')
    overlay.set_defaults(handler=command_overlay)

    concat = subparsers.add_parser('concat',
                                   help='Concatnate some sounds.',
                                   description='Concatnate some sounds.')
    _setup_joincommand(concat, 'concatenate')
    concat.set_defaults(handler=command_concat)

    fadeIn = subparsers.add_parser(
        'fadein',
        help='Apply fade-in effect to sound.',
        description='Apply fade-in effect to sound.',
    )
    _setup_duration_filtercommand(fadeIn)
    fadeIn.set_defaults(handler=command_fadein)

    fadeOut = subparsers.add_parser(
        'fadeout',
        help='Apply fade-out effect to sound.',
        description='Apply fade-out effect to sound.',
    )
    _setup_duration_filtercommand(fadeOut)
    fadeOut.set_defaults(handler=command_fadeout)

    highpass = subparsers.add_parser(
        'highpass',
        help='Apply high pass filter to sound.',
        description='Apply high pass filter to sound.',
    )
    _setup_freq_filtercommand(highpass)
    highpass.set_defaults(handler=command_highpass)

    lowpass = subparsers.add_parser(
        'lowpass',
        help='Apply low pass filter to sound.',
        description='Apply low pass filter to sound.',
    )
    _setup_freq_filtercommand(lowpass)
    lowpass.set_defaults(handler=command_lowpass)

    resample = subparsers.add_parser('resample',
                                     help='Resampling sound.',
                                     description='Resampling sound.')
    _setup_freq_filtercommand(resample)
    resample.set_defaults(handler=command_resample)

    speed = subparsers.add_parser('speed',
                                  help='Change speed of sound.',
                                  description='Change speed of sound.')
    speed.add_argument('rate',
                       type=float,
                       nargs=1,
                       help="Speed rate. Doesn't change speed if 1.0.")
    _setup_input(speed)
    _setup_output(speed)
    speed.set_defaults(handler=command_speed)

    duration = subparsers.add_parser('duration',
                                     help='Change duration of sound.',
                                     description='Change duration of sound.')
    duration.add_argument('duration',
                          type=float,
                          nargs=1,
                          help='New duration in seconds.')
    _setup_input(duration)
    _setup_output(duration)
    duration.set_defaults(handler=command_speed)

    stereo = subparsers.add_parser('stereo',
                                   help='Make streo from sound(s).',
                                   description='Make streo from sound(s).')
    stereo.add_argument('left',
                        type=argparse.FileType('rb'),
                        default=sys.stdin,
                        nargs='?',
                        help='File to left sound. If not given right, it will'
                             + ' use as a right too. Default is stdin.')
    stereo.add_argument('right',
                        type=argparse.FileType('rb'),
                        default=None,
                        nargs='?',
                        help='File to right sound.')
    _setup_output(stereo)
    stereo.set_defaults(handler=command_stereo)

    monaural = subparsers.add_parser('monaural',
                                     help='Make monaural from multiple channel'
                                          + ' sound.',
                                     description='Make monaural from multiple'
                                                 + ' channel sound.')
    monaural.add_argument('file',
                          type=argparse.FileType('rb'),
                          default=sys.stdin,
                          nargs='?',
                          help='File for conversion to monaural.')
    _setup_output(monaural)
    monaural.set_defaults(handler=command_monaural)

    return parser
Ejemplo n.º 24
0
def main():

    # argparse
    parser = argparse.ArgumentParser(
        description=
        'check whether ARS firing time could affect the ribonucleotide incorporation'
    )
    parser.add_argument('ars',
                        type=argparse.FileType('r'),
                        help='Bed file for ars region with time')
    parser.add_argument('list',
                        type=argparse.FileType('r'),
                        help='List for bed file with genotype')
    parser.add_argument(
        '-csv',
        help='Start from a generated dataframe csv file, skip data reading')
    parser.add_argument('-bed',
                        default='.',
                        help='Folder of bed file, default=\'.\'')
    parser.add_argument('-l',
                        type=int,
                        default=15000,
                        help='Length of flank region, default=15000')
    parser.add_argument('-o', default='Output', help='Output file basename')
    parser.add_argument('--block_ribosomal',
                        action='store_false',
                        help='Do not block ribosomal DNA')
    parser.add_argument('--efficiency',
                        action='store_true',
                        help='Use efficiency instead of time')
    args = parser.parse_args()

    # read lib info
    libinfo = read_libinfo(args.list)
    libs = list(libinfo.keys())
    print('Libraries:' + ','.join(libs))

    # read data
    if not args.csv:
        # read ars
        ars = read_ars(args.ars)
        print('ARS information read!')
        # extend position
        windows = generate_windows(ars, args.l, args.block_ribosomal)
        # add data
        data = read_data(windows, libs, args.bed)
        df = generate_df(data, libinfo)
        df.to_csv(args.o + '_data.csv', index=False)
    else:
        df = pd.read_csv(args.csv)
    print('Data read!')

    # generate summary
    df_summary = generate_summary(df)
    genotypes = df_summary.Genotype.unique()
    genotypes_possible = ['Rrnh201', 'EMrnh201', 'rnh201', 'WT']
    genotypes_used = [x for x in genotypes_possible if x in genotypes]
    # plot
    draw_ratio_scatter(df_summary,
                       genotypes_used,
                       output=args.o + '_MLE_scatter.png',
                       use_efficiency=args.efficiency)
    draw_ratio_scatter(df_summary,
                       genotypes_used,
                       output=args.o + '_mean_scatter.png',
                       use_MLE_ratio=False,
                       use_efficiency=args.efficiency)

    print('Done!')
Ejemplo n.º 25
0
def main():
    p = argparse.ArgumentParser()
    p.add_argument('csv')
    p.add_argument('revindex')
    p.add_argument('siglist', nargs='+')
    p.add_argument('--lca', nargs='+', default=LCA_DBs)
    p.add_argument('-k', '--ksize', default=31, type=int)
    p.add_argument('-o', '--output', type=argparse.FileType('wt'),
                   help='output CSV to this file instead of stdout')
    #p.add_argument('-v', '--verbose', action='store_true')
    p.add_argument('-d', '--debug', action='store_true')
    args = p.parse_args()

    if args.debug:
        global _print_debug
        _print_debug = True

    ## load LCA databases
    lca_db_list = []
    for lca_filename in args.lca:
        print('loading LCA database from {}'.format(lca_filename),
              file=sys.stderr)
        lca_db = lca_json.LCA_Database(lca_filename)
        taxfoo, hashval_to_lca, _ = lca_db.get_database(args.ksize, SCALED)
        lca_db_list.append((taxfoo, hashval_to_lca))
    
    # reverse index names -> taxids
    names_to_taxids = defaultdict(set)
    for taxid, (name, _, _) in taxfoo.taxid_to_names.items():
        names_to_taxids[name].add(taxid)

    ### parse spreadsheet
    r = csv.reader(open(args.csv, 'rt'))
    row_headers = ['identifier'] + taxlist

    print('examining spreadsheet headers...', file=sys.stderr)
    first_row = next(iter(r))

    n_disagree = 0
    for (column, value) in zip(row_headers, first_row):
        if column.lower() != value.lower():
            print('** assuming {} == {} in spreadsheet'.format(column, value),
                  file=sys.stderr)
            n_disagree += 1
            if n_disagree > 2:
                print('whoa, too many assumptions. are the headers right?',
                      file=sys.stderr)
                sys.exit(-1)

    confusing_lineages = defaultdict(list)
    incompatible_lineages = defaultdict(list)
    assignments = {}
    for row in r:
        lineage = list(zip(row_headers, row))

        ident = lineage[0][1]
        lineage = lineage[1:]

        # clean lineage of null names
        lineage = [(a,b) for (a,b) in lineage if b not in null_names]

        # ok, find the least-common-ancestor taxid...
        taxid, rest = get_lca_taxid_for_lineage(taxfoo, names_to_taxids,
                                                lineage)

        # and find the *lowest* identifiable ancestor taxid, just to see
        # if there are confusing lineages.
        lowest_taxid, lowest_rest = \
          get_lowest_taxid_for_lineage(taxfoo, names_to_taxids, lineage)

        # do they match? if not, report.
        if lowest_taxid != taxid:
            lowest_lineage = taxfoo.get_lineage(lowest_taxid, taxlist)
            lowest_str = ', '.join(lowest_lineage)

            # find last matching, in case different classification levels.
            match_lineage = [ b for (a, b) in lineage ]
            end = match_lineage.index(lowest_lineage[-1])
            assert end >= 0
            match_lineage = match_lineage[:end + 1]
            match_str = ', '.join(match_lineage)

            confusing_lineages[(match_str, lowest_str)].append(ident)

        # check! NCBI lineage should be lineage of taxid + rest
        ncbi_lineage = taxfoo.get_lineage(taxid, taxlist)
        assert len(ncbi_lineage)
        reconstructed = ncbi_lineage + [ b for (a,b) in rest ]

        # ...make a comparable lineage from the CSV line...
        csv_lineage = [ b for (a, b) in lineage ]

        # are NCBI-rooted and CSV lineages the same?? if not, report.
        if csv_lineage != reconstructed:
            csv_str = ", ".join(csv_lineage[:len(ncbi_lineage)])
            ncbi_str = ", ".join(ncbi_lineage)
            incompatible_lineages[(csv_str, ncbi_str)].append(ident)

        # all is well if we've reached this point! We've got NCBI-rooted
        # taxonomies and now we need to record. next:
        #
        # build a set of triples: (rank, name, taxid), where taxid can
        # be None.

        lineage_taxids = taxfoo.get_lineage_as_taxids(taxid)
        tuples_info = []
        for taxid in lineage_taxids:
            name = taxfoo.get_taxid_name(taxid)
            rank = taxfoo.get_taxid_rank(taxid)

            if rank in taxlist:
                tuples_info.append((rank, name))

        for (rank, name) in rest:
            assert rank in taxlist
            tuples_info.append((rank, name))

        assignments[ident] = tuples_info

    print("{} weird lineages that maybe don't match with NCBI.".format(len(confusing_lineages) + len(incompatible_lineages)), file=sys.stderr)

    ## next phase: collapse lineages etc.

    ## load revindex
    print('loading reverse index:', args.revindex, file=sys.stderr)
    custom_bins_ri = revindex_utils.HashvalRevindex(args.revindex)

    # load the signatures associated with each revindex.
    print('loading signatures for custom genomes...', file=sys.stderr)
    sigids_to_sig = {}
    for sigid, (filename, md5) in custom_bins_ri.sigid_to_siginfo.items():
        sig = revindex_utils.get_sourmash_signature(filename, md5)
        if sig.name() in assignments:
            sigids_to_sig[sigid] = sig
        else:
            debug('no assignment:', sig.name())

    # figure out what ksize we're talking about here! (this should
    # probably be stored on the revindex...)
    random_db_sig = next(iter(sigids_to_sig.values()))
    ksize = random_db_sig.minhash.ksize

    print('...found {} custom genomes that also have assignments!!'.format(len(sigids_to_sig)), file=sys.stderr)

    ## now, connect the dots: hashvals to custom classifications
    hashval_to_custom = defaultdict(list)
    for hashval, sigids in custom_bins_ri.hashval_to_sigids.items():
        for sigid in sigids:
            sig = sigids_to_sig.get(sigid, None)
            if sig:
                assignment = assignments[sig.name()]
                hashval_to_custom[hashval].append(assignment)

    # whew! done!! we can now go from a hashval to a custom assignment!!

    # for each query, gather all the matches in both custom and NCBI, then
    # classify.
    csvfp = csv.writer(sys.stdout)
    if args.output:
        print("outputting classifications to '{}'".format(args.output.name))
        csvfp = csv.writer(args.output)
    else:
        print("outputting classifications to stdout")
    csvfp.writerow(['ID'] + taxlist)

    total_count = 0
    for query_filename in args.siglist:
        for query_sig in sourmash_lib.load_signatures(query_filename,
                                                      ksize=ksize):
            print(u'\r\033[K', end=u'', file=sys.stderr)
            print('... classifying {}'.format(query_sig.name()), end='\r',
                  file=sys.stderr)
            debug('classifying', query_sig.name())
            total_count += 1

            these_assignments = defaultdict(list)
            n_custom = 0
            for hashval in query_sig.minhash.get_mins():
                # custom
                assignment = hashval_to_custom.get(hashval, [])
                if assignment:
                    these_assignments[hashval].extend(assignment)
                    n_custom += 1

                # NCBI
                for (this_taxfoo, hashval_to_lca) in lca_db_list:
                    hashval_lca = hashval_to_lca.get(hashval)
                    if hashval_lca is not None and hashval_lca != 1:
                        lineage = this_taxfoo.get_lineage_as_dict(hashval_lca,
                                                                  taxlist)

                        tuple_info = []
                        for rank in taxlist:
                            if rank not in lineage:
                                break
                            tuple_info.append((rank, lineage[rank]))
                        these_assignments[hashval_lca].append(tuple_info)

            check_counts = Counter()
            for tuple_info in these_assignments.values():
                last_tup = tuple(tuple_info[-1])
                check_counts[last_tup] += 1

            debug('n custom hashvals:', n_custom)
            debug(pprint.pformat(check_counts.most_common()))

            # now convert to trees -> do LCA & counts
            counts = Counter()
            parents = {}
            for hashval in these_assignments:

                # for each list of tuple_info [(rank, name), ...] build
                # a tree that lets us discover least-common-ancestor.
                tuple_info = these_assignments[hashval]
                tree = build_tree(tuple_info)

                # also update a tree that we can ascend from leaves -> parents
                # for all assignments for all hashvals
                parents = build_reverse_tree(tuple_info, parents)

                # now find either a leaf or the first node with multiple
                # children; that's our least-common-ancestor node.
                lca, reason = find_lca(tree)
                counts[lca] += 1

            # ok, we now have the LCAs for each hashval, and their number
            # of counts. Now sum across "significant" LCAs - those above
            # threshold.

            tree = {}
            tree_counts = defaultdict(int)

            debug(pprint.pformat(counts.most_common()))

            n = 0
            for lca, count in counts.most_common():
                if count < THRESHOLD:
                    break

                n += 1

                xx = []
                parent = lca
                while parent:
                    xx.insert(0, parent)
                    tree_counts[parent] += count
                    parent = parents.get(parent)
                debug(n, count, xx[1:])

                # update tree with this set of assignments
                build_tree([xx], tree)

            if n > 1:
                debug('XXX', n)

            # now find LCA? or whatever.
            lca, reason = find_lca(tree)
            if reason == 0:               # leaf node
                debug('END', lca)
            else:                         # internal node
                debug('MULTI', lca)

            # backtrack to full lineage via parents
            lineage = []
            parent = lca
            while parent != ('root', 'root'):
                lineage.insert(0, parent)
                parent = parents.get(parent)

            # output!
            row = [query_sig.name()]
            for taxrank, (rank, name) in itertools.zip_longest(taxlist, lineage, fillvalue=('', '')):
                if rank:
                    assert taxrank == rank
                row.append(name)

            csvfp.writerow(row)

    print(u'\r\033[K', end=u'', file=sys.stderr)
    print('classified {} signatures total'.format(total_count), file=sys.stderr)
Ejemplo n.º 26
0
def get_parser():
    """Parse the CLI args."""
    parser = argparse.ArgumentParser(
        description='Evaluate an ASR transcript against a reference transcript.'
    )
    parser.add_argument('ref',
                        type=argparse.FileType('r'),
                        help='Reference transcript filename')
    parser.add_argument('hyp',
                        type=argparse.FileType('r'),
                        help='ASR hypothesis filename')
    print_args = parser.add_mutually_exclusive_group()
    print_args.add_argument(
        '-i',
        '--print-instances',
        action='store_true',
        help='Print all individual sentences and their errors.')
    print_args.add_argument(
        '-r',
        '--print-errors',
        action='store_true',
        help='Print all individual sentences that contain errors.')
    parser.add_argument(
        '--head-ids',
        action='store_true',
        help=
        'Hypothesis and reference files have ids in the first token? (Kaldi format)'
    )
    parser.add_argument(
        '-id',
        '--tail-ids',
        '--has-ids',
        action='store_true',
        help=
        'Hypothesis and reference files have ids in the last token? (Sphinx format)'
    )
    parser.add_argument('-c',
                        '--confusions',
                        action='store_true',
                        help='Print tables of which words were confused.')
    parser.add_argument(
        '-p',
        '--print-wer-vs-length',
        action='store_true',
        help='Print table of average WER grouped by reference sentence length.'
    )
    parser.add_argument(
        '-m',
        '--min-word-count',
        type=int,
        default=1,
        metavar='count',
        help='Minimum word count to show a word in confusions (default 1).')
    parser.add_argument(
        '-a',
        '--case-insensitive',
        action='store_true',
        help='Down-case the text before running the evaluation.')
    parser.add_argument(
        '-e',
        '--remove-empty-refs',
        action='store_true',
        help='Skip over any examples where the reference is empty.')

    return parser
Ejemplo n.º 27
0
def make_argparser():
    parser = argparse.ArgumentParser()
    subparsers = parser.add_subparsers(dest='command', help='Subcommands')
    read = subparsers.add_parser('read', help='Read a bookmarks export XML.')
    read.add_argument('bookmarks', help='The bookmarks file.')
    bookmark = subparsers.add_parser('bookmark',
                                     help='Save a url as a bookmark.')
    bookmark.add_argument(
        'urls',
        nargs='?',
        help=
        'Provide a literal url as the argument, or a file containing urls (one per line). '
        'If not given, this will read a list of urls from stdin.')
    bookmark.add_argument(
        '-a',
        '--auth-token',
        required=True,
        help='Your Pinboard API authentication token. Available from '
        'https://pinboard.in/settings/password')
    bookmark.add_argument(
        '-t',
        '--tags',
        default='automated',
        help=
        'The tags to save the bookmark(s) with. Use a comma-delimited list. Default: "%(default)s"'
    )
    bookmark.add_argument(
        '-d',
        '--skip-dead-links',
        action='store_true',
        help="Don't bookmark urls which return an error HTTP status.")
    bookmark.add_argument(
        '-A',
        '--user-agent',
        default=
        'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0',
        help=
        'User agent to give when making requests to the urls. Default: "%(default)s"'
    )
    parser.add_argument(
        '-p',
        '--pause',
        type=float,
        default=1,
        help=
        'A time to wait in-between requests to the Pinboard API. The documentation recommends 3 '
        'seconds: https://pinboard.in/api Default: %(default)s')
    bookmark.add_argument(
        '-n',
        '--simulate',
        action='store_true',
        help=
        'Only simulate the process, printing the tabs which will be archived but without actually '
        'doing it.')
    parser.add_argument(
        '-l',
        '--log',
        type=argparse.FileType('w'),
        default=sys.stderr,
        help=
        'Print log messages to this file instead of to stderr. Warning: Will overwrite the file.'
    )
    volume = parser.add_mutually_exclusive_group()
    volume.add_argument('-q',
                        '--quiet',
                        dest='volume',
                        action='store_const',
                        const=logging.CRITICAL)
    volume.add_argument('-v',
                        '--verbose',
                        dest='volume',
                        action='store_const',
                        const=logging.INFO,
                        default=logging.INFO)
    volume.add_argument('-D',
                        '--debug',
                        dest='volume',
                        action='store_const',
                        const=logging.DEBUG)
    return parser
Ejemplo n.º 28
0
#!/usr/bin/env python
import os
import sys
import pprint
import argparse
import pickle
import pylab as pl
import matplotlib.pyplot as plt
import networkx as nx

parser = argparse.ArgumentParser(description='Construct meshterm network for search criteria.')
parser.add_argument('--outfile', type=argparse.FileType('w'), required=True )
parser.add_argument('--pickle',  type=argparse.FileType('r'), required=True )

args    = parser.parse_args()
outfile = args.outfile
pickle_file  = args.pickle

if __name__ == '__main__':

    
    G = pickle.Unpickler(pickle_file).load()
    
    print "nodes: ", len(G.nodes())
      
    plt.cla()
    fig = plt.figure(figsize=(38,38), dpi=800)
    nx.draw(G, 
            node_size  = [G.degree(n) for n in G.nodes()],
            width      = [G.get_edge_data(*e)['citations'] for e in G.edges()],
            edge_color = [G.get_edge_data(*e)['jin'] for e in G.edges()] )
                #entry += "<th>" + w + "</thr>\n"
            entry = "<tr><th colspan=\"0\">" + line + "</th></tr>\n"
            entries.append(entry)

        if i % 3 == 1:
            words = line.split()
            words.append('&lt;/s&gt;')
        elif i % 3 == 2:
            probs = list(map(float, line.split()))
            entry = ""
            for w,p in zip(words, probs):
                color = '#%02x%02x%02x' % (int((1-p)*255), int((1-p)*255), int((1-p)*255))
                entry += "<td bgcolor=\"{0}\">{1}</td>".format(color, w)
            entry = "<tr>" + entry + "</tr>\n"
            entries.append(entry)


    outfile.write(html_text.format('\n'.join(entries)))


parser = argparse.ArgumentParser()
parser.add_argument('--input', '-i', type=argparse.FileType('r'),
                        default=sys.stdin, metavar='PATH',
                        help="Input file (default: standard input)")
parser.add_argument('--output', '-o', type=argparse.FileType('w'),
                        default=sys.stdout, metavar='PATH',
                        help="Output file (default: standard output)")

args = parser.parse_args()

print_probdist(args.input, args.output)
Ejemplo n.º 30
0
					buf.append('\ttest.%s();\n' % method)
			buf.append('\ttest.tear_down();\n\ttest.summary();\n')
			buf.append('}\n\n')
		buf.append('int main(string[] argv)\n{\n')
		buf.append('\tGLib.Test.init(ref argv);\n')
		buf.append('\tTest.set_nonfatal_assertions();\n')
		for path, run_func in run_funcs:
			buf.append('\tGLib.Test.add_func("%s", %s);\n' % (path, run_func))
		buf.append('\treturn Test.run();\n}\n')
		return "".join(buf)
	

if __name__ == "__main__":
	import argparse
	parser = argparse.ArgumentParser()
	parser.add_argument("-i", "--input", type=argparse.FileType('r'), help="source files to extract test cases from")
	parser.add_argument("-o", "--output", type=argparse.FileType('w'), help="where to write generated test runner")
	args = parser.parse_args()
	
	input = args.input or sys.stdin
	output = args.output or sys.stdout
	generator = TestGenerator(TestParser())
	data = input.read()
	try:
		result = generator.generate_tests(data)
		output.write(result)
		sys.exit(0)
	except pp.ParseException as e:
		sys.stderr.write("Parse Error: %s\n" % e)
		sys.exit(1)