Пример #1
0
 def cli(self):
     arg_parser = ArgumentParser()
     arg_parser.add_argument("experiment", nargs="*", default=[None], metavar="EXPERIMENT", help="experiment to run")
     arg_parser.add_argument("--repl", action="store_true", default=False, help="start an interactive command line")
     arg_parser.add_argument("--print-parameter-space", action="store_true", default=False, help="print size of parameter space")
     for key in sorted(self.default_parameter_space.parameters):
         arg_parser.add_argument("--" + key.replace("_", "-"))
     args = arg_parser.parse_args()
     if any(experiment is not None and experiment not in self.experiment_parameter_spaces.keys() for experiment in args.experiment):
         arg_parser.error("EXPERIMENT must be one of:\n{}".format("\n".join("\t{}".format(experiment) for experiment in self.experiment_parameter_spaces.keys())))
     arguments = dict((k, intellicast(v)) for k, v in args.__dict__.items() if k not in ("experiment", "print_parameter_space") and v is not None)
     if args.print_parameter_space:
         for experiment in args.experiment:
             if experiment is None:
                 pspace = self.default_parameter_space.clone()
                 pspace.fix_parameters(**arguments)
                 print("\n".join(" ".join("{}={}".format(k, v) for k, v in sorted(space.items()) if k in chain(pspace.variable_parameters, pspace.dependent_parameters)) for space in pspace.permutations()))
             else:
                 for experiment in args.experiment:
                     pspace = self.experiment_parameter_spaces[experiment].clone()
                     pspace.fix_parameters(**arguments)
                     print("\n".join(" ".join("{}={}".format(k, v) for k, v in sorted(space.items()) if k in chain(pspace.variable_parameters, pspace.dependent_parameters)) for space in pspace.permutations()))
     else:
         for experiment in args.experiment:
             if experiment is None:
                 self.experiment.set_parameter_space(self.default_parameter_space)
             else:
                 self.experiment.set_parameter_space(self.experiment_parameter_spaces[experiment])
             self.experiment.run_with(**arguments)
Пример #2
0
def run():
    defaultmech = "%s/mapping/cb05cl_ae6_aq.csv" % os.path.dirname(__file__)
    parser = ArgumentParser(description = "Usage: %prog [-tq] \n"+(" "*16)+" [-i <init name>] [-f <final name>] <yamlfile>")
    parser.add_argument("-t", "--template", dest = "template", action = "store_true", default = False, help="Output template on standard out (configurable with -m and -c")

    parser.add_argument("-v", "--verbose", dest = "verbose", action = "count", default = 0, help = "extra output for debugging")
    
    paths = glob(os.path.join(os.path.dirname(__file__), 'mapping', '*_*.csv'))
    mechanisms = ', '.join(['_'.join(path.split('/')[-1].split('_')[:])[:-4] for path in paths])
    parser.add_argument("-c", "--configuration", dest="configuration", default = None,
                        help = "Chemical mechanisms: %s (for use with -t)" % mechanisms)
    parser.add_argument('configfile')
    options = parser.parse_args()
    args = [options.configfile]
    if options.template:
        from template import template
        if options.configuration is None:
            warn("Using default mechanism: %s" % defaultmech)
            options.configuration = defaultmech
        else:
            if os.path.exists(options.configuration):
                pass
            else:
                options.configuration = "%s/mapping/%s.csv" % (os.path.dirname(__file__), options.configuration)
                if not os.path.exists(options.configuration):
                    raise IOError('Cannot find file %s; must be either you own file or in %s' % (options.configuration, mechanisms))
        print template(options.configuration)
        parser.exit()
    if len(args)<1:
        parser.error(msg="Requires a yaml file as an argument.  For a template use the -t option.  The template will be output to the stdout.")
    else:
        yamlpath=args[0]
        from load import loader
        from process import process
        outf = process(config = loader(yamlpath), verbose = options.verbose)
Пример #3
0
def parse_config():
    parser = ArgumentParser(
        description=__doc__, formatter_class=RawTextHelpFormatter)
    parser.add_argument(
        '--skip-whitespace',
        action='store_true',
        default=False,
        help='Ignore whitespace differences')
    parser.add_argument('from_dir', help='Source directory to diff from')
    parser.add_argument('to_dir', help='Source directory to diff to')
    c = parser.parse_args()

    if not os.path.isdir(c.from_dir):
        parser.error('\'from\' path %s is not a valid directory' % c.from_dir)
    (c.from_root, from_relative) = find_root(c.from_dir)
    if c.from_root is None:
        parser.error(
            '\'from\' path %s is not relative to a lucene/solr checkout' %
            c.from_dir)
    if not os.path.isdir(c.to_dir):
        parser.error('\'to\' path %s is not a valid directory' % c.to_dir)
    (c.to_root, to_relative) = find_root(c.to_dir)
    if c.to_root is None:
        parser.error('\'to\' path %s is not relative to a lucene/solr checkout'
                     % c.to_dir)
    if from_relative != to_relative:
        parser.error(
            '\'from\' and \'to\' path are not equivalent relative paths within their'
            ' checkouts: %r != %r' % (from_relative, to_relative))
    return c
Пример #4
0
def main():
    '''Called if script is run from the command line.'''

    from argparse import ArgumentParser
    import sys

    parser = ArgumentParser(description=__doc__)
    parser.add_argument("-f", "--file", help="Input file (burp session file)")
    parser.add_argument("-o", "--output", help="Output file (clean XML file)")
    parser.add_argument("-v", "--verbose", action="store_true", help="Be more verbose")
    parser.add_argument("-n", "--non-printable", action="store_true", help="Retain non-printable characters")
    
    args = parser.parse_args()

    if args.file:
            if args.output == "-":
                out = sys.stdout
            else:
                out = open(args.output, 'wb') if args.output else open(args.file + '.xml', 'wb')
    else:
        print __doc__
        parser.error('Input file is a mandatory parameter!')

    burp_to_xml(args.file, out, args.non_printable, args.verbose)
    out.close()
    
    if args.verbose:
                sys.stderr.write("Output written to %s\n" % outfile)
def parse_opts():
    unilex_path = os.path.join(os.path.dirname(__file__), 'unilex')
    ap = ArgumentParser(description=__doc__,
                        formatter_class=ArgumentDefaultsHelpFormatter)
    ap.add_argument('-c', '--corpus', help='Text to train on')
    ap.add_argument('-m', '--model', help='Pre-trained model')
    ap.add_argument('-p', '--pronounce', default=unilex_path,
                    help='Spelling/pronunciation dictionary')
    ap.add_argument('-t', '--training-size', type=int, default=10000,
                    help=('Number of words from the corpus to train on,'
                          ' -1 -> all words'))
    ap.add_argument('-n', '--num-twisters', type=int, default=5,
                    help='Number of tongue twisters to output')
    ap.add_argument('-w', '--words-per-twister', type=int, default=2,
                    help='Number of words per generated twister')
    ap.add_argument('-r', '--reverse-twist', action='store_true',
                    help='Generate especially non-twisty phrases')
    ap.add_argument('-R', '--random', action='store_true',
                    help='Generate phrases at random (for testing)')
    ap.add_argument('-s', '--score', action='store_true',
                    help='Use the model to score sentences from stdin')
    opts = ap.parse_args()
    if opts.training_size < 0:  # use the whole corpus
        opts.training_size = None
    if opts.num_twisters < 0:
        ap.error('Must provide a number of twisters > 0')
    return opts
Пример #6
0
Файл: sjmpc.py Проект: dnnr/wall
    def run(self):
        parser = ArgumentParser()
        parser.add_argument('url')
        parser.add_argument('type')
        parser.add_argument('arg', type=namedarg, nargs='*')
        args = parser.parse_args()

        try:
            connection = websocket.create_connection(args.url)
        except ValueError as e:
            parser.error(
                "argument url: invalid url value: '{}'".format(args.url))
        except (IOError, WebSocketException) as e:
            print('error: failed to connect (details: {})'.format(e),
                file=sys.stderr)
            sys.exit(1)

        try:
            call_msg = Message(args.type, dict(args.arg))
            connection.send(str(call_msg))
            while True:
                result_msg = Message.parse(connection.recv())
                if result_msg.type == call_msg.type:
                    break
            connection.close()
        except WebSocketException as e:
            print('error: disconnected (details: {})'.format(e),
                file=sys.stderr)
            sys.exit(1)

        print(json.dumps(result_msg.data, indent=4))
Пример #7
0
def main():
  import json
  from argparse import ArgumentParser

  parser = ArgumentParser()
  addarg = parser.add_argument
  addarg("src_dir", metavar="SOURCE_DIR", nargs=1,
    help="source dir of the generated HTML files")
  addarg("pdf_file", metavar="PDF_FILE", nargs=1,
    help="destination path of the PDF file")
  addarg("--params", "-p", dest="params", metavar="JSON",
    help="pass parameters to the generator as a JSON string")

  opts = args = parser.parse_args(sys.uargv[1:])

  params = {}
  if opts.params:
    try:
      params = json.loads(opts.params)
    except ValueError as e:
      parser.error("--params=JSON: %s" % e)
  else:
    print("Warning: no --params argument defined; using defaults.")

  SRC = Path(args.src_dir)
  DIL = dil_path()
  DEST_PDF = doc_path(args.pdf_file)
  TMP = (DEST_PDF.folder/"pdf_tmp").mkdir()

  pdf_gen = PDFGenerator()
  pdf_gen.fetch_files(DIL, TMP)
  html_files = SRC.glob("*.html")
  pdf_gen.run(html_files, DEST_PDF, TMP, params)

  TMP.rm()
Пример #8
0
def main():
    # Options and Argument parsing for running model from the command line, without the GUI.
    usage = "usage: PROG [options] your_model.epg"
    #    parser = OptionParser(usage=usage, version="%prog "+__version__.version)
    parser = ArgumentParser(usage=usage, description="Run epigrass models from the console",
                            prog="epirunner " + __version__.version)
    parser.add_argument("-b", "--backend", dest="backend",
                        help="Define which datastorage backend to use", metavar="<mysql|sqlite|csv>", default="sqlite")
    parser.add_argument("-u", "--dbusername",
                        dest="dbuser", help="MySQL user name")
    parser.add_argument("-p", "--password",
                        dest="dbpass", help="MySQL password for user")
    parser.add_argument("-H", "--dbhost",
                        dest="dbhost", default="localhost", help="MySQL hostname or IP address")
    parser.add_argument("--upload", help="Upload your models and latest simulation to Epigrass Web")
    parser.add_argument("-P", "--parallel", action="store_true", default=False,
                        dest="parallel", help="use multiprocessing to run the simulation")
    parser.add_argument("epg", metavar='EPG', nargs=1,
                        help='Epigrass model definition file (.epg).')

    args = parser.parse_args()
    if args.backend == "mysql" and not (args.dbuser and args.dbpass):
        parser.error("You must specify a user and password when using MySQL.")
    if args.backend not in ['mysql', 'sqlite', 'csv']:
        parser.error('"%s" is an invalid backend type.' % args.backend)
    print
    '==> ', args.epg

    if args.upload:  # Only upload model
        upload_model(args)
    else:
        onStraightRun(args)
Пример #9
0
def process_command_line():
    from argparse import ArgumentParser

    parser = ArgumentParser(
        description='Parse a pcap file containing CME MDP3 market data based on a SBE xml schema file.',
        version='0.1')

    parser.add_argument('pcapfile',
        help='Name of the pcap file to process')

    parser.add_argument('-s', '--schema', default='templates_FixBinary.xml',
        help='Name of the SBE schema xml file')

    parser.add_argument('-d', '--secdef', default='secdef.dat.gz',
        help='Name of the security definition file')

    parser.add_argument('-i', '--ids', default='',
        help='Comma separated list of security ids to display books for')

    args = parser.parse_args()

    # check number of arguments, verify values, etc.:
    if not os.path.isfile(args.schema):
        parser.error('sbe schema xml file "{}" not found'.format(args.schema))

    if not os.path.isfile(args.secdef):
        parser.error('Security definition file "{}" not found'.format(args.secdef))


    return args
Пример #10
0
def main():
    pdf_root_default = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'pdf')
    thumbnail_root_default = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'static', 'thumbnails')

    p = ArgumentParser(description='Import scraped documents into the database')
    p.add_argument('--pdf-root',
                   help='Root directory of PDF files to make thumbnails for (default: ../pdf)',
                   default=pdf_root_default)
    p.add_argument('--thumbnail-root',
                   help='Root directory to put thumbnails in (default: ../static/thumbnails',
                   default=thumbnail_root_default)
    p.add_argument('--force-regen', '-f',
                   help='Regenerate all thumbnails, even those that already exist.',
                   action='store_true')
    p.add_argument('--no-optipng', '-n',
                   help='Do not use optipng on the output (much faster, larger pngs)',
                   action='store_true')
    p.add_argument('--verbose', '-v', help='be more verbose',
                   action='store_true')
    p.add_argument('--quiet', '-q', help='be more quiet', action='store_true')
    p.add_argument('pattern', nargs='*', help='filename pattern')

    args = p.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)
    elif args.quiet:
        logging.basicConfig(level=logging.WARN)

    if not os.path.isdir(args.pdf_root):
        p.error('pdf root is not a directory.')
    if not os.path.isdir(args.thumbnail_root):
        p.error('thumbnail root is not a directory.')

    logging.info("Generating thumbnails for pdfs...")
    checked = 0

    pdf_root = os.path.abspath(args.pdf_root)
    thumbnail_root = os.path.abspath(args.thumbnail_root)

    for root, dirs, files in os.walk(pdf_root):
        pdf_files = filter(lambda x: x.endswith('.pdf'), files)
        if args.pattern:
            matches = set()
            for p in args.pattern:
                if p in root:
                    # directory match.. put them all in
                    matches = pdf_files
                    break
                matches.update(filter(lambda x: p in x, pdf_files))
            pdf_files = list(matches)
        if not pdf_files:
            continue
        for f in pdf_files:
            check_pdf(root, f, pdf_root, thumbnail_root, args.force_regen, not args.no_optipng)
            checked += 1
            if (checked % 1000) == 0:
                logging.info(".. %d pdfs checked." % checked)

    logging.info("FINISHED. %d pdfs checked." % checked)
Пример #11
0
    def run(self):
        parser = ArgumentParser()
        parser.add_argument('url')
        parser.add_argument('type')
        parser.add_argument('arg', type=namedarg, nargs='*')
        parser.add_argument('--auth-token')
        args = parser.parse_args()

        try:
            self._connection = websocket.create_connection(args.url)
        except ValueError as e:
            parser.error(
                "argument url: invalid url value: '{}'".format(args.url))
        except (IOError, WebSocketException) as e:
            print('error: failed to connect (details: {})'.format(e),
                file=sys.stderr)
            sys.exit(1)

        try:
            if args.auth_token:
                if not self._call('authenticate', {'token': args.auth_token}):
                    print('error: failed to authenticate')
                    sys.exit(1)
            result = self._call(args.type, dict(args.arg))
            self._connection.close()
        except WebSocketException as e:
            print('error: disconnected (details: {})'.format(e),
                file=sys.stderr)
            sys.exit(1)

        print(json.dumps(result, indent=4))
Пример #12
0
def main(args=None):
    if not args:
        from sys import argv
        args = argv[1:]

    from argparse import ArgumentError, ArgumentParser
    from distdb import DB

    cfg = config.read()

    CLI = ArgumentParser(description='BHIndex - Distributed Filesystem using BitHorde')
    CLI.add_argument('--database', '--db', dest="db", default=cfg.get('DB', 'file'),
                     help="Path to the SQLite database")
    CLI.add_argument('--setuid', dest="suid", help="Set username before running")
    CLI.add_argument('--verbose', '-v', action="store_true",
                     help="write debug-level output")
    subparsers = CLI.add_subparsers(title="Sub-commands")

    Add = subparsers.add_parser('add', help='Add files to BitHorde and BHIndex')
    add.prepare_args(Add, cfg)

    Cat = subparsers.add_parser('cat', help='Read one or more files from BitHorde')
    cat.prepare_args(Cat, cfg)

    ExportLinks = subparsers.add_parser('link', help='Exports the bhindex-files to a folder of symlinks')
    links.prepare_args(ExportLinks, cfg)

    LS = subparsers.add_parser('ls', help='List files in a directory of BHIndex')
    tree.prepare_ls_args(LS, cfg)

    MV = subparsers.add_parser('mv', help='Move a file or directory in the bithorde tree')
    tree.prepare_mv_args(MV, cfg)

    Scanner = subparsers.add_parser('update', help='Scans for asset-availability in bithorde and updates DB')
    scanner.prepare_args(Scanner, cfg)

    Syncer = subparsers.add_parser('syncer', help='Runs online synchronization with other bhindex')
    syncer.prepare_args(Syncer, cfg)

    Vacuum = subparsers.add_parser('vacuum', help='Runs routine DB-maintenance')
    vacuum.prepare_args(Vacuum, cfg)

    args = CLI.parse_args(args)
    try:
        if args.verbose:
            lvl = logging.DEBUG
        else:
            lvl = logging.INFO
        logging.basicConfig(level=lvl, format="%(levelname)-8s %(asctime)-15s <%(name)s> %(message)s")

        db = DB(args.db)

        if args.suid:
            from pwd import getpwnam
            from os import setuid
            setuid(getpwnam(args.suid).pw_uid)

        args.main(args, cfg, db)
    except ArgumentError, e:
        CLI.error(e)
Пример #13
0
def parse_options():
    """
    Parse command line arguments and options
    """

    epilog = "All arguments and options are mandatory"

    # The RawDescriptionHelpFormatter is required to show the epilog
    parser = ArgumentParser(epilog=epilog, formatter_class=RawDescriptionHelpFormatter, version="%prog 1.0")

    parser.add_argument("-i", "--input", metavar='"INPUT"', default="", help="Name of the ADL file to be processed")
    parser.add_argument("-o", "--output", default="", metavar='"OUTPUT"', help="Name of the XML output file")

    args = parser.parse_args()

    # Convert the list of options to a dictionary
    options = args.__dict__

    if options["input"] == "":
        parser.error("Error: Input file is missing.")

    # If not specified, the default output name is the input name
    if options["output"] == "":
        # optionss["output"] = options["input"].replace(".adl", "")
        options["output"] = os.path.splitext(options["input"])[0]
        options["output"] += ".xml"
        print "Using default output filename '" + options["output"] + "'"
    else:
        if os.path.splitext(options["output"])[1] != ".xml":
            options["output"] = os.path.splitext(options["output"])[0]
            options["output"] += ".xml"
            print "Warning: extension changed automatically to '.xml', output filename is '" + options["output"] + "'"

    return options
def main(argv=None): # IGNORE:C0111
    '''Command line options.'''

    if argv is None:
        argv = sys.argv
    else:
        sys.argv.extend(argv)
    
    ipv4 = re.compile('^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$')
    program_name = os.path.basename(sys.argv[0])
    program_version = "v%s" % __version__
    program_build_date = str(__updated__)
    program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
    program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
    program_license = '''%s

  Created by user_name on %s.
  Copyright 2015 organization_name. All rights reserved.

  Licensed under the Apache License 2.0
  http://www.apache.org/licenses/LICENSE-2.0

  Distributed on an "AS IS" basis without warranties
  or conditions of any kind, either express or implied.

USAGE
''' % (program_shortdesc, str(__date__))

    try:
        # Setup argument parser
        parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)
        parser.add_argument("destination", help="The destination to ping")
        parser.add_argument('-V', '--version', action='version', version=program_version_message)
        
        # Process arguments
        args = parser.parse_args()
        destination = args.destination
        if not ipv4.match(destination):
            try:
                destination = socket.gethostbyname(destination)
            except socket.gaierror as e:
                parser.error("\nUnknown host: %s (%s)\n" % (destination, e.args[1]))
        if DEBUG: print args
        
        
        
        # Now do it!
        go_do_it(destination)
        
        return 0
    except KeyboardInterrupt:
        ### handle keyboard interrupt ###
        return 0
    except Exception, e:
        if DEBUG:
            raise(e)
        indent = len(program_name) * " "
        sys.stderr.write(program_name + ": " + repr(e) + "\n")
        sys.stderr.write(indent + "  for help use --help")
        return 2
Пример #15
0
def main():
    from ConfigParser import SafeConfigParser
    from argparse import ArgumentParser

    description = (
        "Retrieves the german charts from mtv "
        "downloads the corresponding videos from youtube "
        "and if wanted, extracts the audio with "
        "mplayer and lame (flv to mp3)."
    )
    parser = ArgumentParser(description=description)
    parser.add_argument("--config", dest="config", metavar="FILE", help="specify config file")

    defaults = dict()
    if not "-h" in sys.argv and not "--help" in sys.argv:
        arg, remaining_args = parser.parse_known_args()

        if not arg.config is None:
            config = SafeConfigParser()
            config.read(arg.config)
            try:
                items = validate_config(config.items("chartdl"), booleans=["audio_only", "notify", "debug", "quiet"])
            except ValidationError, e:
                parser.error(unicode(e))
            defaults.update(items)
Пример #16
0
def main():
    from argparse import ArgumentParser
    import json

    parser = ArgumentParser(description='fetches the german music'
                                        ' charts from mtv')
    parser.add_argument('-c', '--category', dest='category',
                        choices=CHARTS.keys(),
                        default='hitlist',
                        help='chart category')
    parser.add_argument('-o', '--output', dest='output',
                        choices=['table', 'json'],
                        default='table',
                        help='output format')
    parser.add_argument('-n', '--number', dest='number',
                        default=100,
                        type=int,
                        help='number of shown charts')
    parser.add_argument('-r', '--reversed', dest='reversed',
                        action='store_true',
                        help='reverse list')

    args = parser.parse_args()

    if args.output == 'table' and texttable is None:
        parser.error('Unable to output table, install `texttable`')

    all_charts = get_charts(args.category)
    if args.reversed:
        all_charts = reversed(list(all_charts))
    charts = list(all_charts)[:args.number]
    
    format_charts = {'table': make_table, 'json': json.dumps}[args.output]
    print(format_charts(charts).encode('utf-8'))
def parse_args():
    """Parses arguments, returns (options, args)."""
    from argparse import ArgumentParser

    available_ports = list(get_serial_ports())
    parser = ArgumentParser(description='Upload AVR `.hex` to '
                            'Arduino-compatible board.')
    parser.add_argument('board_name', help='Arduino board name (e.g., `uno`, '
                        '`mega2560`).')
    parser.add_argument('-V', '--disable-verify', action='store_true',
                        help='Disable automatic verify check when uploading '
                        'data.')
    parser.add_argument('hex', type=path, help='Path to `.hex` file.')
    parser.add_argument('port', help='Serial port.', nargs='+',
                        default=None, choices=available_ports)
    args = parser.parse_args()
    if args.port is None:
        # No serial port was specified.
        if len(available_ports) == 1:
            # There is only one serial port available, so select it
            # automatically.
            args.port = available_ports[0]
        else:
            parser.error('No serial port was specified.  Please select at '
                         'least one of the following ports: %s' %
                         available_ports)

    return args
Пример #18
0
def parse_options():
    usage = "%(prog)s [options] config_file"
    parser = ArgumentParser(usage=usage)
    parser.add_argument("-l", "--limit",
                        dest="limit",
                        default=10,
                        help="The number of rows to return")
    parser.add_argument("--host",
                        dest="host",
                        default="localhost",
                        help="The mysql hostname")
    parser.add_argument("-u", "--username",
                        dest="username",
                        default="candlepin",
                        help="The username for the database")
    parser.add_argument("-p", "--password",
                        dest="password",
                        default="",
                        help="The password for the database user")
    parser.add_argument("-n", "--database-name",
                        dest="database",
                        default="candlepin",
                        help="The database name")
    parser.add_argument("-t", "--database-type",
                        dest="type",
                        choices=["mysql","postgres"],
                        default="mysql",
                        help="The database type")

    (options, args) = parser.parse_known_args()
    if len(args) != 1:
        parser.error("You must provide a config file name with sql")
    return (options, args)
Пример #19
0
def parse_args(argv=None):
    parser = ArgumentParser()
    parser.add_argument('--upgrade', action="store_true", default=False,
                        help='Perform an upgrade test.')
    parser.add_argument('bundle_path',
                        help='URL or path to a bundle')
    add_basic_testing_arguments(parser)
    parser.add_argument('--allow-native-deploy', action='store_true',
                        help='Let juju 2 use native bundle deploying.')
    parser.add_argument('--bundle-verification-script',
                        help='Script to verify the bundle.')
    parser.add_argument('--bundle-name', default=None,
                        help='Name of the bundle to deploy.')
    parser.add_argument('--health-cmd', default=None,
                        help='A binary for checking the health of the'
                        ' deployed bundle.')
    parser.add_argument('--upgrade-condition', action='append', default=None,
                        help='unit_name:<conditions>'
                        ' One or more of the following conditions to apply'
                        ' to the given unit_name: clock_skew.')
    parser.add_argument('--agent-timeout', type=int, default=1200,
                        help='The time to wait for agents to start')
    parser.add_argument('--workload-timeout', type=int, default=1800,
                        help='The time to wait for workloads to active')
    args = parser.parse_args(argv)
    if args.allow_native_deploy and args.bundle_name:
        parser.error('cannot supply bundle name with native juju deploying')
    return args
Пример #20
0
def options():
    """print help menu and handle the options"""

    # TODO the output file can't has .html as extension.    
    # The RawDescriptionHelpFormatter is required to show the epilog
    parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
                            version="%prog 0.9")
    
    parser.add_argument("-o", "--output",
        metavar = "FILENAME",
        help = "Name of the html output file")

    parser.add_argument("-i", "--input",
        metavar = "FILENAME or DIR_NAME",
        default =" ",
        help = "Name of the file/directory that should be processed")

    parser.add_argument("-r", "--recursive",
        action="store_true",
        help = "If the input is a directory scan the directories recursively")

    parser.add_argument("-t", "--template",
        metavar = "FILENAME",
        help = "Name of the template you want to use")

    args = parser.parse_args()

    # Convert the list of options to a dictionary
    opts = args.__dict__

    if opts['input'] is "":
        parser.error("Input file is missing.")

    return opts
Пример #21
0
def main():
    desc = u'{0} [Option] \nDetailed options -h or --help'.format(__file__)
    parser = ArgumentParser(description=desc)
    parser.add_argument('-c', '-create', action='store_true',
            help=u'Create new password.')
    parser.add_argument('-l', '-load', action='store_true',
            help=u'Load saved password by it\'s keyword.')
    parser.add_argument('-u', '-update', action='store_true',
            help=u'Update master password for this tool.')
    parser.add_argument('-d', '-delete', action='store_true',
            help=u'Delete a password by it\'s keyword.')
    parser.add_argument('-list', action='store_true',
            help=u'Show all passwords you saved')

    args = parser.parse_args()
   # print args
    create, load, update, delete, show_list\
            = args.c, args.l, args.u, args.d, args.list

    if not create and not load and not update and not delete and not show_list:
        parser.error('Please input a option')

    passwordManager = PasswordManager()

    if create:
        passwordManager.create_passwd()
    elif load:
        passwordManager.load_passwd()
    elif update:
        passwordManager.update_master()
    elif delete:
        passwordManager.delete_passwd()
    elif show_list:
        passwordManager.show_passwds()
Пример #22
0
def parse_remote_options():
    """
    Parse command line arguments and options.

    :return: options and arguments
    """
    description = '''Send commands to Samsung D-Series (and up) devices.
Commands can either start with 'KEY_' and be a valid key code or any
text (eG usable for password fields, etc.).'''
    p = ArgumentParser(description=description)
    p.add_argument('-i', '--ip', dest='ip', help='Device IP (mandatory if you '
                                                 'don\'t set SAMSUNG_DEVICE in your environment)')
    p.add_argument('-p', '--port', dest='port', action='store', type='int',
                   default='55000', help='Device Port (default %(default)s')
    p.add_argument('-d', '--delay', dest='delay', action='store', type='float',
                   default='0.5', help='Delay between commands in seconds ('
                                       'default: %(default)s')
    p.add_argument('keys', nargs='+')
    args = p.parse_args()
    if args.ip is None:
        try:
            args.ip = os.environ['SAMSUNG_DEVICE']
        except KeyError:
            p.error('Either set SAMSUNG_DEVICE in the environment or specify '
                    'the device IP\n')
    if len(args.keys) == 0:
        p.error('Please specify one or more commands to send\n')
    return args
Пример #23
0
def main():
    """Main function."""
    parser = ArgumentParser()
    parser.add_argument('-c', '--config', metavar='FILE',
                        help="load configuration from FILE")
    parser.add_argument('-p', '--pidfile', metavar='FILE',
                        help="use FILE as pidfile")
    parser.add_argument('-D', '--no-detach', dest='nodetach',
                        action='store_true',
                        help="do not detach from console")
    parser.add_argument('-V', '--version', action='version',
                        version='twod ' + __version__)
    args = parser.parse_args()

    if args.config and not path.isfile(path.expanduser(args.config)):
        parser.error("'%s' is not a file" % args.config)

    twod = Twod(args.config) if args.config else Twod()
    if args.nodetach:
        twod.run()
    else:
        pidfile = '/var/run/twod.pid'
        if args.pidfile:
            pidfile = args.pidfile
        twod.log.debug("Moving to background...")

        # Try to catch issues with pidfile before detaching, but as late as
        # possible to avoid race conditions
        if not access(path.dirname(pidfile), W_OK | X_OK):
            twod.log.critical("Unable to write pidfile")
            exit(1)
        with DaemonContext(pidfile=PIDLockFile(pidfile)):
            twod.run()
Пример #24
0
def main():
    parser = ArgumentParser()
    parser.add_argument("-t",           dest="target_hosts", required=True, help="Set a target range of addresses to target. Ex 10.11.1.1-255" )
    parser.add_argument("-o",           dest="output_directory", required=True, help="Set the output directory. Ex /root/Documents/labs/")
    parser.add_argument("-w",           dest="wordlist", required=False, help="Set the wordlist to use for generated commands. Ex /usr/share/wordlist.txt")
    parser.add_argument("--pingsweep",  dest="ping_sweep", action="store_true", help="Write a new target.txt by performing a ping sweep and discovering live hosts.", default=False)
    parser.add_argument("--dns",        dest="find_dns_servers", action="store_true", help="Find DNS servers from a list of targets.", default=False)
    parser.add_argument("--services",   dest="perform_service_scan", action="store_true", help="Perform service scan over targets.", default=False)
    parser.add_argument("--hostnames",  dest="hostname_scan", action="store_true", help="Attempt to discover target hostnames and write to 0-name.txt and hostnames.txt.", default=False)
    parser.add_argument("--snmp",       dest="perform_snmp_walk", action="store_true", help="Perform service scan over targets.", default=False)
    parser.add_argument("--quick",      dest="quick",   action="store_true", required=False, help="Move to the next target after performing a quick scan and writing first-round recommendations.", default=False)    
    parser.add_argument("--quiet",      dest="quiet",   action="store_true", help="Supress banner and headers to limit to comma dilimeted results only.", default=False)
    parser.add_argument("--exec",       dest="follow",  action="store_true", help="Execute shell comamnds from recommendations as they are discovered. Likely to lead to very long execute times depending on the wordlist being used.", default=False)
    parser.add_argument("--simple_exec",dest="quickfollow",  action="store_true", help="Execute non-brute forcing shell comamnds only as they are discovered.", default=False)
    arguments = parser.parse_args()

    if len(sys.argv) == 1:
        print_banner()
        parser.error("No arguments given.")
        parser.print_usage
        sys.exit()

    if arguments.output_directory.endswith('/' or '\\'):
        arguments.output_directory = arguments.output_directory[:-1]
    if arguments.target_hosts.endswith('/' or '\\'):
        arguments.target_hosts = arguments.target_hosts[:-1]

    print(is_json(CONFIG))
Пример #25
0
    def check_args(self, argv):
        parser = ArgumentParser()
        parser.add_argument('--model', type=str, required=True, help='Save model to <path>, or resume running from that state')
        parser.add_argument('--corpus', type=str, help='Path to SAM corpus')

        parser.add_argument('-T', '--T', type=int, default=10, help='Number of topics')
        parser.add_argument('--iterations', type=int, default=500, help='Run VEM for <n> iterations')
        parser.add_argument('--write_topic_weights', type=str, help='Write topic weights to <path>')
        parser.add_argument('--write_topics', type=str, help='Write topics to <path>')
        options = parser.parse_args(argv[1:])

        # If the model doesn't already exist (we're creating a new one), we need to know where the corpus lives
        if not os.path.exists(options.model):
            if options.corpus is None:
                parser.error('Must provide --corpus when creating a new model')
            if not os.path.exists(options.corpus):
                parser.error('Corpus file %s does not exist!' % options.corpus)

        self.add_output_file(options.model)
        if options.write_topic_weights:
            self.add_output_file(options.write_topic_weights)
        if options.write_topics:
            self.add_output_file(options.write_topics)

        return options
Пример #26
0
def process_command_line():
    from argparse import ArgumentParser

    parser = ArgumentParser(
        description="Parse a pcap file containing CME MDP3 market data based on a SBE xml schema file.",
        version="0.1")

    parser.add_argument("pcapfile",
        help="Name of the pcap file to process")

    parser.add_argument("-s", "--schema", default='templates_FixBinary.xml',
        help="Name of the SBE schema xml file")

    default_skip_fields = 'message_size,block_length,template_id,schema_id,version'

    parser.add_argument("-f", "--skip-fields", default=default_skip_fields,
        help="Don't print these message fields (default={})".format(default_skip_fields))

    args = parser.parse_args()

    # check number of arguments, verify values, etc.:
    if not os.path.isfile(args.schema):
        parser.error("sbe schema xml file '{}' not found".format(args.schema))

    return args
Пример #27
0
Файл: bot.py Проект: jreese/edi
def parse_args(argv: List[str]=None) -> object:
    '''Parse and perform basic validation of CLI options.'''

    parser = ArgumentParser(description='simple Slack bot')
    parser.add_argument('-D', '--debug', action='store_true', default=False,
                        help='enable debug/verbose output')
    parser.add_argument('--config', type=str, default='config.yaml',
                        metavar='PATH',
                        help='path to configuration file if not in cwd')
    parser.add_argument('--log', type=str, default=None, metavar='PATH',
                        help='path to log program output')

    options = parser.parse_args(argv)

    if path.isdir(options.config):
        options.config = path.join(options.config, 'config.yaml')

    if not path.isfile(options.config):
        parser.error('config path "%s" does not exist' % (options.config,))

    if options.log:
        try:
            with open(options.log, 'a'):
                pass
        except:
            parser.error('log path "%s" invalid' % (options.log,))

    return options
Пример #28
0
def query_main(graph, default_index):
    """
    Run a query.

    """
    parser = ArgumentParser()
    parser.add_argument("-i", "--index", default=default_index)
    parser.add_argument("-q", "--query", default='{"match_all": {}}')
    parser.add_argument("-f", "--flat", action="store_true")
    args = parser.parse_args()

    try:
        query = loads(args.query)
    except Exception:
        parser.error("query must be valid json")

    response = graph.elasticsearch_client.search(
        index=args.index,
        body=dict(query=query),
    )

    if args.flat:
        return response["hits"]["hits"]
    else:
        return response
Пример #29
0
Файл: warp.py Проект: yous/warp
def main():
    """CLI frontend function.  It takes command line options e.g. host,
    port and provides ``--help`` message.

    """
    parser = ArgumentParser(description='Simple HTTP transparent proxy')
    parser.add_argument('-H', '--host', default='127.0.0.1',
                      help='Host to listen [default: %(default)s]')
    parser.add_argument('-p', '--port', type=int, default=8800,
                      help='Port to listen [default: %(default)d]')
    parser.add_argument('-v', '--verbose', action="count", default=0,
                      help='Print verbose')
    args = parser.parse_args()
    if not (1 <= args.port <= 65535):
        parser.error('port must be 1-65535')
    if args.verbose >= 3:
        parser.error('verbose level must be 1-2')
    if args.verbose >= 1:
        logger.setLevel(logging.DEBUG)
    if args.verbose >= 2:
        logging.getLogger('warp').setLevel(logging.DEBUG)
        logging.getLogger('asyncio').setLevel(logging.DEBUG)
    global verbose
    verbose = args.verbose
    loop = asyncio.get_event_loop()
    try:
        asyncio.async(start_warp_server(args.host, args.port))
        loop.run_forever()
    except KeyboardInterrupt:
        print('bye')
    finally:
        loop.close()
Пример #30
0
def main():
    parser = ArgumentParser(description='Password generator')

    parser.add_argument('-n', '--no-numbers',
                        help='Do not use numeric characters in the password',
                        dest='use_nums',
                        action='store_false',
                        default=True)
    parser.add_argument('-s', '--allowed-symbols',
                        help='Permissible symbol characters',
                        dest='kosher_syms',
                        type=str,
                        default=punctuation)
    parser.add_argument('-a', '--no-alpha',
                        help='Do not use letters in the password',
                        dest='use_alpha',
                        action='store_false',
                        default=True)
    parser.add_argument('-l', '--length',
                        help='Minimum password length',
                        dest='length',
                        type=int)
    parser.add_argument('-e', '--entropy',
                        help='Minimum password entropy',
                        dest='entropy',
                        type=float)
    parser.add_argument('-u', '--username',
                        help='Username base',
                        dest='username',
                        default=default_username(),
                        type=str)
    parser.add_argument('-t', '--salt',
                        help='Username salt length',
                        dest='salt_length',
                        type=int,
                        default=4)
    parser.add_argument('-p', '--passphrase',
                        help='Generate a passphrase instead of a password',
                        dest='passphrase',
                        action='store_true',
                        default=False)
    parser.add_argument('-d', '--dictionary',
                        help='Supply an alternatate file to use as a dictionary',
                        dest='dictionary',
                        type=str,
                        default=DEFAULT_DICTIONARY_FILE)

    options = parser.parse_args(argv[1:])

    try:
        pgen = build_generator(options)
        salted_username = generate_username(pgen, options)
        password, entropy = generate_password(pgen, options)

        print(salted_username)
        print(password + '\n')
        print('password entropy: %.2f bits' % entropy)
    except UsageError as e:
        parser.error(str(e))
Пример #31
0
def main():
    """
    The primary CLI function for the Topic Explorer.
    """
    # Create the master argparse object.
    parser = ThrowingArgumentParser()

    # Adding the benchmarks flags.
    benchmark_group = parser.add_mutually_exclusive_group()
    benchmark_group.add_argument('-t',
                                 '--time',
                                 help="Print execution time",
                                 action='store_true')
    benchmark_group.add_argument('-p',
                                 '--profile',
                                 help="""Profile the command.
    Optional filename saves results for use with snakeviz, pstats, or
    cprofilev. Automatically launches snakeviz, if installed.""",
                                 nargs='?',
                                 metavar='STATS_FILE')

    # Using add_subparsers(metavar) until argparse.SUPPRESS support is fixed.
    # See issue http://bugs.python.org/issue22848
    parsers = parser.add_subparsers(
        help="select a command",
        parser_class=ArgumentParser,
        metavar='{version,demo,update,init,prep,train,launch,notebook,metadata}'
    )
    version_parser = parsers.add_parser('version',
                                        help="Print the version and exit")
    version_parser.set_defaults(func='version')

    # Init Parser
    parser_init = parsers.add_parser('init',
                                     help="Initialize the topic explorer")
    init.populate_parser(parser_init)
    parser_init.set_defaults(func="init")

    # Prep Parser
    parser_prep = parsers.add_parser(
        'prep',
        help="Prep the corpus",
        formatter_class=argparse.RawDescriptionHelpFormatter)
    prep.populate_parser(parser_prep)
    parser_prep.set_defaults(func="prep")

    # Train Parser
    parser_train = parsers.add_parser('train', help="Train the LDA models")
    train.populate_parser(parser_train)
    parser_train.set_defaults(func="train")

    # Launch Parser
    parser_launch = parsers.add_parser('launch',
                                       help="Serve the trained LDA models")
    server.populate_parser(parser_launch)
    parser_launch.set_defaults(func="launch")

    # Serve Parser
    parser_serve = parsers.add_parser(
        'serve',
        help="Serve a single LDA model, helper for `topicexplorer launch`," +
        "rarely called directly")
    server.populate_parser(parser_serve)
    parser_serve.set_defaults(func="serve")

    # Notebook Parser
    parser_nb = parsers.add_parser('notebook',
                                   help="Create a set of IPython Notebooks")
    notebook.populate_parser(parser_nb)
    parser_nb.set_defaults(func="notebook")

    # Demo Parser
    parser_demo = parsers.add_parser('demo',
                                     help="Download and run the AP demo")
    parser_demo.set_defaults(func="demo")

    # Update Parser
    parser_update = parsers.add_parser('update',
                                       help="Update the Topic Explorer")
    parser_update.set_defaults(func="update")

    # Metadata Parser
    parser_metadata = parsers.add_parser(
        'metadata', help="Add spaces before unicode chars")
    metadata.populate_parser(parser_metadata)
    parser_metadata.set_defaults(func="metadata")

    # Export Parser
    parser_export = parsers.add_parser('export', help="Export a tez archive")
    export.populate_parser(parser_export)
    parser_export.set_defaults(func="export")

    # Export Parser
    parser_import = parsers.add_parser('import', help="Import the tez archive")
    tezimport.populate_parser(parser_import)
    parser_import.set_defaults(func="import")

    # fancy arg validation for manually injecting tempfile to profile arg
    try:
        try:
            args = parser.parse_args()
        except ArgumentParserError as e:
            import sys
            new_args = sys.argv[1:]
            try:
                # If the error was thrown by the '-p' argument not having a
                # valid file, fix by manually injecting a nargs break
                profile = new_args.index('-p')

                if (len(new_args) > (profile + 1)
                        and new_args[profile + 1] in parsers.choices.keys()):
                    new_args.insert(profile + 1, '-')
                    args = parser.parse_args(new_args)
                else:
                    raise e
            except ValueError:
                raise e
    except ArgumentParserError as e:
        import sys
        # Check to see if error occurs with a subparser and cause the exception
        # to arise from the subparser instead
        for p in parsers.choices.keys():
            if p in sys.argv[1:]:
                subargs_idx = sys.argv.index(p) + 1
                subargs = sys.argv[subargs_idx:]
                subparser = locals()['parser_' + p]
                # this might cause an error in the subparser, in which case
                # we actually want to show that error first
                args = subparser.parse_args(subargs)

        # Use the default error mechanism for the master parser.
        # If the code gets here, it means the error was not in a subparser
        ArgumentParser.error(parser, e.message)

    if args.profile:
        if args.profile == '-':
            import tempfile
            temphandle, args.profile = tempfile.mkstemp(suffix='.prof',
                                                        prefix='vsm.')
            print("Saving benchmark data to", args.profile)

        from profilehooks import profile

        def benchmark(fn):
            return profile(fn,
                           immediate=True,
                           filename=args.profile,
                           stdout=None)

    elif args.time:
        from profilehooks import timecall

        def benchmark(fn):
            return timecall(fn, immediate=False)
    else:

        def benchmark(fn):
            return fn

    if args.func == 'version':
        from topicexplorer.version import __pretty_version__
        print(__pretty_version__, end='')

    elif args.func == 'init':
        args.config_file = benchmark(init.main)(args)

        print("\nTIP: Only initalizing corpus object and config file.")
        print("     Next prepare the corpus using:")
        print("         topicexplorer prep", args.config_file)
        print("     Or skip directly to training LDA models using:")
        print("         topicexplorer train", args.config_file)

    elif args.func == 'prep':
        benchmark(prep.main)(args)

        print("\nTIP: Train the LDA models with:")
        print("         topicexplorer train", args.config_file)

    elif args.func == 'train':
        benchmark(train.main)(args)

        if not args.dry_run:
            print("\nTIP: launch the topic explorer with:")
            print("         topicexplorer launch", args.config_file)
            print("     or the notebook server with:")
            print("         topicexplorer notebook", args.config_file)

    elif args.func == 'launch' or args.func == 'serve':
        # Note that we are only benchmarking the creation process - obviously
        # benches of the serve process will take longer
        app = benchmark(server.create_app)(args)
        server.main(args, app)

    elif args.func == 'notebook':
        benchmark(notebook.main)(args)

    elif args.func == 'demo':
        benchmark(demo.main)(args)

    elif args.func == 'update':
        benchmark(update.main)(args)

    elif args.func == 'metadata':
        benchmark(metadata.main)(args)

    elif args.func == 'export':
        benchmark(export.main)(args)

    elif args.func == 'import':
        benchmark(tezimport.main)(args)

    if args.profile:
        try:
            import snakeviz.cli
            print("\n\n")
            snakeviz.cli.main([args.profile])
        except ImportError:
            print(
                """\nSnakeviz is not installed. Install with `pip install snakeviz`,
            then run `snakeviz {}`.""".format(args.profile))
Пример #32
0
def main():
    """Entry point."""
    debug = False
    try:
        argparser = ArgumentParser(description=modules[__name__].__doc__)
        argparser.add_argument('-P',
                               '--vidpid',
                               action='append',
                               help='specify a custom VID:PID device ID, '
                               'may be repeated')
        argparser.add_argument('-V',
                               '--virtual',
                               type=FileType('r'),
                               help='use a virtual device, specified as YaML')
        argparser.add_argument('-v',
                               '--verbose',
                               action='count',
                               default=0,
                               help='increase verbosity')
        argparser.add_argument('-d',
                               '--debug',
                               action='store_true',
                               help='enable debug mode')
        args = argparser.parse_args()
        debug = args.debug

        loglevel = max(DEBUG, ERROR - (10 * args.verbose))
        loglevel = min(ERROR, loglevel)
        if debug:
            formatter = Formatter(
                '%(asctime)s.%(msecs)03d %(name)-20s '
                '%(message)s', '%H:%M:%S')
        else:
            formatter = Formatter('%(message)s')
        FtdiLogger.set_formatter(formatter)
        FtdiLogger.set_level(loglevel)
        FtdiLogger.log.addHandler(StreamHandler(stderr))

        if args.virtual:
            from pyftdi.usbtools import UsbTools
            # Force PyUSB to use PyFtdi test framework for USB backends
            UsbTools.BACKENDS = ('pyftdi.tests.backend.usbvirt', )
            # Ensure the virtual backend can be found and is loaded
            backend = UsbTools.find_backend()
            loader = backend.create_loader()()
            loader.load(args.virtual)

        try:
            add_custom_devices(Ftdi, args.vidpid)
        except ValueError as exc:
            argparser.error(str(exc))

        Ftdi.show_devices()

    except (ImportError, IOError, NotImplementedError, ValueError) as exc:
        print('\nError: %s' % exc, file=stderr)
        if debug:
            print(format_exc(chain=False), file=stderr)
        exit(1)
    except KeyboardInterrupt:
        exit(2)
Пример #33
0
def run():

    ########################################################################
    # Argument parsing
    ########################################################################

    # Info
    usage = "AlleyOop utility tools and diagnostics for SLAMSeq data"

    # Main Parsers
    parser = ArgumentParser(description=usage,
                            formatter_class=ArgumentDefaultsHelpFormatter)
    parser.add_argument('--version',
                        action='version',
                        version='%(prog)s ' + __version__)

    # Initialize Subparsers
    subparsers = parser.add_subparsers(help="", dest="command")

    # dedup command
    dedupparser = subparsers.add_parser(
        'dedup',
        help='Deduplicate SLAM-seq aligned data',
        formatter_class=ArgumentDefaultsHelpFormatter)
    dedupparser.add_argument("-o",
                             "--outputDir",
                             type=str,
                             required=True,
                             dest="outputDir",
                             default=SUPPRESS,
                             help="Output directory for mapped BAM files.")
    dedupparser.add_argument(
        "-tc",
        "--tcMutations",
        type=int,
        required=False,
        default=0,
        dest="tcMutations",
        help="Only select reads with x number of T>C mutations.")
    dedupparser.add_argument("-t",
                             "--threads",
                             type=int,
                             required=False,
                             default=1,
                             dest="threads",
                             help="Thread number")
    dedupparser.add_argument('bam',
                             action='store',
                             help='Bam file(s)',
                             nargs="+")

    # collapse command
    collapseparser = subparsers.add_parser(
        'collapse',
        help='Collapse UTRs',
        formatter_class=ArgumentDefaultsHelpFormatter)
    collapseparser.add_argument("-o",
                                "--outputDir",
                                type=str,
                                required=True,
                                dest="outputDir",
                                default=SUPPRESS,
                                help="Output directory for mapped BAM files.")
    collapseparser.add_argument("-t",
                                "--threads",
                                type=int,
                                required=False,
                                default=1,
                                dest="threads",
                                help="Thread number")
    collapseparser.add_argument('tcount',
                                action='store',
                                help='Tcount file(s)',
                                nargs="+")

    # positional-rates command
    posratesparser = subparsers.add_parser(
        'positional-tracks',
        help='Genome-wide positional tracks as bedgraph',
        formatter_class=ArgumentDefaultsHelpFormatter)
    posratesparser.add_argument('bam',
                                action='store',
                                help='Bam file(s)',
                                nargs="+")
    posratesparser.add_argument("-o",
                                "--outputDir",
                                type=str,
                                required=True,
                                dest="outputDir",
                                default=SUPPRESS,
                                help="Output directory for bedGraph files.")
    posratesparser.add_argument("-s",
                                "--snp-directory",
                                type=str,
                                required=False,
                                dest="snpDir",
                                default=SUPPRESS,
                                help="Directory containing SNP files.")
    posratesparser.add_argument("-r",
                                "--reference",
                                type=str,
                                required=True,
                                dest="ref",
                                default=SUPPRESS,
                                help="Reference fasta file")
    posratesparser.add_argument(
        "-c",
        "--conversion-threshold",
        type=int,
        dest="conversionThreshold",
        required=False,
        default=1,
        help=
        "Number of T>C conversions required to count read as T>C read (default: %(default)d)"
    )
    posratesparser.add_argument(
        "-a",
        "--coverage-cutoff",
        type=int,
        dest="coverageCutoff",
        required=False,
        default=1,
        help=
        "Minimum coverage required to report nucleotide-conversion rate (default: %(default)d). Anything less than 1 will be set to 1 to avoid division by zero."
    )
    posratesparser.add_argument(
        "-q",
        "--min-base-qual",
        type=int,
        default=27,
        required=False,
        dest="minQual",
        help="Min base quality for T -> C conversions (default: %(default)d)")
    posratesparser.add_argument("-t",
                                "--threads",
                                type=int,
                                required=False,
                                default=1,
                                dest="threads",
                                help="Thread number (default: %(default)d)")

    # stats command
    statsparser = subparsers.add_parser(
        'rates',
        help='Calculate overall conversion rates on SLAM-seq datasets',
        formatter_class=ArgumentDefaultsHelpFormatter)
    statsparser.add_argument('bam',
                             action='store',
                             help='Bam file(s)',
                             nargs="+")
    statsparser.add_argument("-o",
                             "--outputDir",
                             type=str,
                             required=True,
                             dest="outputDir",
                             default=SUPPRESS,
                             help="Output directory for mapped BAM files.")
    statsparser.add_argument("-r",
                             "--reference",
                             type=str,
                             required=True,
                             dest="referenceFile",
                             default=SUPPRESS,
                             help="Reference fasta file")
    statsparser.add_argument("-mq",
                             "--min-basequality",
                             type=int,
                             required=False,
                             default=27,
                             dest="mq",
                             help="Minimal base quality for SNPs")
    #statsparser.add_argument('-R', "--compute-rates", dest="overallRates", action='store_true', help="Compute overall conversion rates.")
    statsparser.add_argument("-t",
                             "--threads",
                             type=int,
                             required=False,
                             default=1,
                             dest="threads",
                             help="Thread number")

    # context command
    tccontextparser = subparsers.add_parser(
        'tccontext',
        help='Calculate T->C conversion context on SLAM-seq datasets',
        formatter_class=ArgumentDefaultsHelpFormatter)
    tccontextparser.add_argument('bam',
                                 action='store',
                                 help='Bam file(s)',
                                 nargs="+")
    #tccontextparser.add_argument("-b", "--bed", type=str, required=True, dest="bed", help="BED file")
    tccontextparser.add_argument("-o",
                                 "--outputDir",
                                 type=str,
                                 required=True,
                                 dest="outputDir",
                                 default=SUPPRESS,
                                 help="Output directory for mapped BAM files.")
    tccontextparser.add_argument("-r",
                                 "--reference",
                                 type=str,
                                 required=True,
                                 dest="referenceFile",
                                 default=SUPPRESS,
                                 help="Reference fasta file")
    tccontextparser.add_argument("-mq",
                                 "--min-basequality",
                                 type=int,
                                 required=False,
                                 default=0,
                                 dest="mq",
                                 help="Minimal base quality for SNPs")
    tccontextparser.add_argument("-t",
                                 "--threads",
                                 type=int,
                                 required=False,
                                 default=1,
                                 dest="threads",
                                 help="Thread number")

    # stats rates utr command
    statsutrrateparser = subparsers.add_parser(
        'utrrates',
        help='Calculate conversion rates per UTR on SLAM-seq datasets')
    statsutrrateparser.add_argument('bam',
                                    action='store',
                                    help='Bam file(s)',
                                    nargs="+")
    statsutrrateparser.add_argument(
        "-o",
        "--outputDir",
        type=str,
        required=True,
        dest="outputDir",
        help="Output directory for mapped BAM files.")
    statsutrrateparser.add_argument("-r",
                                    "--reference",
                                    type=str,
                                    required=True,
                                    dest="referenceFile",
                                    help="Reference fasta file")
    statsutrrateparser.add_argument(
        "-mq",
        "--min-basequality",
        type=int,
        required=False,
        default=27,
        dest="mq",
        help="Minimal base quality for SNPs (default: %(default)s)")
    statsutrrateparser.add_argument("-m",
                                    "--multiTCStringency",
                                    dest="strictTCs",
                                    action='store_true',
                                    required=False,
                                    help="")
    statsutrrateparser.add_argument(
        "-t",
        "--threads",
        type=int,
        required=False,
        default=1,
        dest="threads",
        help="Thread number (default: %(default)s)")
    statsutrrateparser.add_argument("-b",
                                    "--bed",
                                    type=str,
                                    required=True,
                                    dest="bed",
                                    help="BED file")
    statsutrrateparser.add_argument(
        "-l",
        "--max-read-length",
        type=int,
        required=False,
        dest="maxLength",
        help="Max read length in BAM file (default: %(default)s)")

    # SNPeval command
    snpevalparser = subparsers.add_parser('snpeval',
                                          help='Evaluate SNP calling')
    snpevalparser.add_argument('bam',
                               action='store',
                               help='Bam file(s)',
                               nargs="+")
    snpevalparser.add_argument("-o",
                               "--outputDir",
                               type=str,
                               required=True,
                               dest="outputDir",
                               help="Output directory for mapped BAM files.")
    snpevalparser.add_argument("-s",
                               "--snp-directory",
                               type=str,
                               required=True,
                               dest="snpDir",
                               help="Directory containing SNP files.")
    snpevalparser.add_argument("-r",
                               "--reference",
                               type=str,
                               required=True,
                               dest="ref",
                               help="Reference fasta file")
    snpevalparser.add_argument("-b",
                               "--bed",
                               type=str,
                               required=True,
                               dest="bed",
                               help="BED file")
    snpevalparser.add_argument(
        "-c",
        "--min-coverage",
        required=False,
        dest="cov",
        type=int,
        help="Minimum coverage to call variant (default: %(default)s)",
        default=10)
    snpevalparser.add_argument(
        "-f",
        "--var-fraction",
        required=False,
        dest="var",
        type=float,
        help="Minimum variant fraction to call variant (default: %(default)s)",
        default=0.8)
    snpevalparser.add_argument("-m",
                               "--multiTCStringency",
                               dest="strictTCs",
                               action='store_true',
                               required=False,
                               help="")
    snpevalparser.add_argument(
        "-l",
        "--max-read-length",
        type=int,
        required=False,
        dest="maxLength",
        help="Max read length in BAM file (default: %(default)s)")
    snpevalparser.add_argument(
        "-q",
        "--min-base-qual",
        type=int,
        default=27,
        required=False,
        dest="minQual",
        help="Min base quality for T -> C conversions (default: %(default)s)")
    snpevalparser.add_argument("-t",
                               "--threads",
                               type=int,
                               required=False,
                               default=1,
                               dest="threads",
                               help="Thread number (default: %(default)s)")

    # stats summary command
    statsSumParser = subparsers.add_parser(
        'summary',
        help='Display summary information and statistics on read numbers')
    statsSumParser.add_argument(
        'bam',
        action='store',
        help='Filtered BAM files (produced by slamdunk filter or all)',
        nargs="+")
    statsSumParser.add_argument("-o",
                                "--output",
                                type=str,
                                required=True,
                                dest="outputFile",
                                help="Output file")
    statsSumParser.add_argument("-t",
                                "--tcountDir",
                                type=str,
                                required=False,
                                dest="countDirectory",
                                help="Folder containing tcount files")

    # merge command
    statsMergeParser = subparsers.add_parser(
        'merge',
        help='Merge T->C rates from multiple sample in one TSV file',
        formatter_class=ArgumentDefaultsHelpFormatter)
    statsMergeParser.add_argument('countFiles',
                                  action='store',
                                  help='tCount files',
                                  nargs="+")
    statsMergeParser.add_argument("-o",
                                  "--output",
                                  type=str,
                                  required=True,
                                  dest="outputFile",
                                  default=SUPPRESS,
                                  help="Output file")
    statsMergeParser.add_argument(
        '-c',
        "--column",
        dest="column",
        type=str,
        required=False,
        default="TcReadCount / ReadCount",
        help="Column or expression used to summarize files.")
    statsMergeParser.add_argument(
        '-n',
        "--columnname",
        dest="columnName",
        type=int,
        required=False,
        default=2,
        help="Index of meta data field to use as column name.")

    # stats read info command
    conversionRateParser = subparsers.add_parser(
        'tcperreadpos',
        help='Calculate conversion rates per read position on SLAM-seq datasets'
    )
    conversionRateParser.add_argument('bam',
                                      action='store',
                                      help='Bam file(s)',
                                      nargs="+")
    conversionRateParser.add_argument("-r",
                                      "--reference",
                                      type=str,
                                      required=True,
                                      dest="referenceFile",
                                      help="Reference fasta file")
    conversionRateParser.add_argument("-s",
                                      "--snp-directory",
                                      type=str,
                                      required=False,
                                      dest="snpDir",
                                      help="Directory containing SNP files.")
    conversionRateParser.add_argument("-l",
                                      "--max-read-length",
                                      type=int,
                                      required=False,
                                      dest="maxLength",
                                      help="Max read length in BAM file")
    conversionRateParser.add_argument(
        "-o",
        "--outputDir",
        type=str,
        required=True,
        dest="outputDir",
        help="Output directory for mapped BAM files."
    )  #conversionRateParser.add_argument("-5", "--trim-5p", type=int, required=False, dest="trim5", help="Number of bp removed from 5' end of all reads.")
    conversionRateParser.add_argument(
        "-mq",
        "--min-basequality",
        type=int,
        required=False,
        default=27,
        dest="mq",
        help="Minimal base quality for SNPs (default: %(default)s)")
    conversionRateParser.add_argument(
        "-t",
        "--threads",
        type=int,
        required=False,
        dest="threads",
        default=1,
        help="Thread number (default: %(default)s)")

    # stats utr info command
    utrRateParser = subparsers.add_parser(
        'tcperutrpos',
        help='Calculate conversion rates per UTR position on SLAM-seq datasets'
    )
    utrRateParser.add_argument('bam',
                               action='store',
                               help='Bam file(s)',
                               nargs="+")
    utrRateParser.add_argument("-r",
                               "--reference",
                               type=str,
                               required=True,
                               dest="referenceFile",
                               help="Reference fasta file")
    utrRateParser.add_argument("-b",
                               "--bed",
                               type=str,
                               required=True,
                               dest="bed",
                               help="BED file")
    utrRateParser.add_argument("-s",
                               "--snp-directory",
                               type=str,
                               required=False,
                               dest="snpDir",
                               help="Directory containing SNP files.")
    utrRateParser.add_argument("-l",
                               "--max-read-length",
                               type=int,
                               required=False,
                               dest="maxLength",
                               help="Max read length in BAM file")
    utrRateParser.add_argument(
        "-o",
        "--outputDir",
        type=str,
        required=True,
        dest="outputDir",
        help="Output directory for mapped BAM files."
    )  #conversionRateParser.add_argument("-5", "--trim-5p", type=int, required=False, dest="trim5", help="Number of bp removed from 5' end of all reads.")
    utrRateParser.add_argument(
        "-mq",
        "--min-basequality",
        type=int,
        required=False,
        default=27,
        dest="mq",
        help="Minimal base quality for SNPs (default: %(default)s)")
    utrRateParser.add_argument("-t",
                               "--threads",
                               type=int,
                               required=False,
                               dest="threads",
                               default=1,
                               help="Thread number (default: %(default)s)")

    # dump read info command
    dumpReadInfo = subparsers.add_parser(
        'dump',
        help='Print all info available for slamdunk reads',
        formatter_class=ArgumentDefaultsHelpFormatter)
    dumpReadInfo.add_argument('bam',
                              action='store',
                              help='Bam file(s)',
                              nargs="+")
    dumpReadInfo.add_argument("-r",
                              "--reference",
                              type=str,
                              required=True,
                              dest="referenceFile",
                              default=SUPPRESS,
                              help="Reference fasta file")
    dumpReadInfo.add_argument("-s",
                              "--snp-directory",
                              type=str,
                              required=True,
                              dest="snpDir",
                              default=SUPPRESS,
                              help="Directory containing SNP files.")
    dumpReadInfo.add_argument(
        "-o",
        "--outputDir",
        type=str,
        required=True,
        dest="outputDir",
        default=SUPPRESS,
        help="Output directory for mapped BAM files."
    )  #conversionRateParser.add_argument("-5", "--trim-5p", type=int, required=False, dest="trim5", help="Number of bp removed from 5' end of all reads.")
    dumpReadInfo.add_argument("-mq",
                              "--min-basequality",
                              type=int,
                              required=False,
                              default=0,
                              dest="mq",
                              help="Minimal base quality for SNPs")
    dumpReadInfo.add_argument("-t",
                              "--threads",
                              type=int,
                              required=False,
                              dest="threads",
                              default=1,
                              help="Thread number")

    args = parser.parse_args()

    ########################################################################
    # Routine selection
    ########################################################################

    command = args.command

    if (command == "dedup"):
        outputDirectory = args.outputDir
        createDir(outputDirectory)
        n = args.threads
        tcMutations = args.tcMutations
        message("Running alleyoop dedup for " + str(len(args.bam)) +
                " files (" + str(n) + " threads)")
        results = Parallel(n_jobs=n, verbose=verbose)(
            delayed(runDedup)(tid, args.bam[tid], outputDirectory, tcMutations)
            for tid in range(0, len(args.bam)))
        dunkFinished()

    elif (command == "collapse"):
        outputDirectory = args.outputDir
        createDir(outputDirectory)
        n = args.threads
        message("Running alleyoop collapse for " + str(len(args.tcount)) +
                " files (" + str(n) + " threads)")
        results = Parallel(n_jobs=n, verbose=verbose)(
            delayed(runCollapse)(tid, args.tcount[tid], outputDirectory)
            for tid in range(0, len(args.tcount)))
        dunkFinished()

    elif (command == "positional-tracks"):
        outputDirectory = args.outputDir
        createDir(outputDirectory)
        snpDirectory = args.snpDir
        n = args.threads
        message("Running alleyoop positional-tracks for " +
                str(len(args.bam)) + " files (" + str(n) + " threads)")
        results = Parallel(n_jobs=n,
                           verbose=verbose)(delayed(runPositionalRates)(
                               tid, args.bam[tid], args.ref, args.minQual,
                               args.conversionThreshold, args.coverageCutoff,
                               outputDirectory, snpDirectory)
                                            for tid in range(0, len(args.bam)))
        dunkFinished()

    elif (command == "half-lifes"):

        outputDirectory = args.outputDir
        createDir(outputDirectory)

        timepoints = args.timepoints

        message("Running alleyoop half-lifes for " + str(len(args.bam)) +
                " files")
        runHalfLifes(args.bam, timepoints, outputDirectory)
        dunkFinished()

    elif (command == "rates"):
        outputDirectory = args.outputDir
        createDir(outputDirectory)
        n = args.threads
        referenceFile = args.referenceFile
        minMQ = args.mq
        message("Running alleyoop rates for " + str(len(args.bam)) +
                " files (" + str(n) + " threads)")
        results = Parallel(n_jobs=n, verbose=verbose)(
            delayed(runStatsRates)(tid, args.bam[tid], referenceFile, minMQ,
                                   outputDirectory)
            for tid in range(0, len(args.bam)))
        dunkFinished()

    elif (command == "snpeval"):
        outputDirectory = args.outputDir
        createDir(outputDirectory)
        snpDirectory = args.snpDir
        n = args.threads
        message("Running alleyoop SNPeval for " + str(len(args.bam)) +
                " files (" + str(n) + " threads)")
        results = Parallel(n_jobs=n, verbose=verbose)(
            delayed(runSNPeval)(tid, args.bam[tid], args.ref, args.bed, args.
                                maxLength, args.minQual, args.cov, args.var,
                                args.strictTCs, outputDirectory, snpDirectory)
            for tid in range(0, len(args.bam)))
        dunkFinished()

    elif (command == "tccontext"):
        outputDirectory = args.outputDir
        createDir(outputDirectory)
        n = args.threads
        referenceFile = args.referenceFile
        minMQ = args.mq
        message("Running alleyoop TC context for " + str(len(args.bam)) +
                " files (" + str(n) + " threads)")
        results = Parallel(n_jobs=n, verbose=verbose)(
            delayed(runStatsTCContext)(tid, args.bam[tid], referenceFile,
                                       minMQ, outputDirectory)
            for tid in range(0, len(args.bam)))
        dunkFinished()

    elif (command == "utrrates"):
        outputDirectory = args.outputDir
        createDir(outputDirectory)
        n = args.threads
        referenceFile = args.referenceFile
        minMQ = args.mq

        message("Running alleyoop utrrates for " + str(len(args.bam)) +
                " files (" + str(n) + " threads)")
        results = Parallel(n_jobs=n, verbose=verbose)(
            delayed(runStatsRatesUTR)(tid, args.bam[tid], referenceFile, minMQ,
                                      args.strictTCs, outputDirectory,
                                      args.bed, args.maxLength)
            for tid in range(0, len(args.bam)))
        dunkFinished()

    elif (command == "summary"):
        message("Running alleyoop summary for " + str(len(args.bam)) +
                " files")
        runSummary(args.bam, args.outputFile, args.countDirectory)
        dunkFinished()

    elif (command == "merge"):
        message("Running alleyoop merge for " + str(len(args.countFiles)) +
                " files")
        outputLog = replaceExtension(args.outputFile, ".log")
        stats.mergeRates(",".join(args.countFiles), args.outputFile,
                         args.column, args.columnName, getLogFile(outputLog))
        dunkFinished()

    elif (command == "tcperreadpos"):
        outputDirectory = args.outputDir
        createDir(outputDirectory)
        n = args.threads
        snpDirectory = args.snpDir
        referenceFile = args.referenceFile
        minMQ = args.mq
        message("Running alleyoop tcperreadpos for " + str(len(args.bam)) +
                " files (" + str(n) + " threads)")
        results = Parallel(n_jobs=n, verbose=verbose)(delayed(runTcPerReadPos)(
            tid, args.bam[tid], referenceFile, minMQ, args.maxLength,
            outputDirectory, snpDirectory) for tid in range(0, len(args.bam)))
        dunkFinished()

    elif (command == "tcperutrpos"):
        outputDirectory = args.outputDir
        createDir(outputDirectory)
        n = args.threads
        snpDirectory = args.snpDir
        referenceFile = args.referenceFile
        minMQ = args.mq
        snpDirectory = args.snpDir
        message("Running alleyoop tcperutrpos for " + str(len(args.bam)) +
                " files (" + str(n) + " threads)")
        results = Parallel(n_jobs=n, verbose=verbose)(delayed(runTcPerUtr)(
            tid, args.bam[tid], referenceFile, args.bed, minMQ, args.maxLength,
            outputDirectory, snpDirectory) for tid in range(0, len(args.bam)))
        dunkFinished()

    elif (command == "dump"):
        outputDirectory = args.outputDir
        createDir(outputDirectory)
        n = args.threads
        snpDirectory = args.snpDir
        referenceFile = args.referenceFile
        minMQ = args.mq
        message("Running alleyoop dump for " + str(len(args.bam)) +
                " files (" + str(n) + " threads)")
        results = Parallel(n_jobs=n, verbose=verbose)(
            delayed(runDumpReadInfo)(tid, args.bam[tid], referenceFile, minMQ,
                                     outputDirectory, snpDirectory)
            for tid in range(0, len(args.bam)))
        dunkFinished()

    else:
        parser.error("Too few arguments.")
Пример #34
0
def run():
    ########################################################################
    # Argument parsing
    ########################################################################

    # Info
    usage = "SLAMdunk software for analyzing SLAM-seq data"

    # Main Parsers
    parser = ArgumentParser(description=usage,
                            formatter_class=ArgumentDefaultsHelpFormatter)
    parser.add_argument('--version',
                        action='version',
                        version='%(prog)s ' + __version__)

    # Initialize Subparsers
    subparsers = parser.add_subparsers(help="", dest="command")

    # map command

    mapparser = subparsers.add_parser(
        'map',
        help='Map SLAM-seq read data',
        formatter_class=ArgumentDefaultsHelpFormatter)
    mapparser.add_argument(
        'files',
        action='store',
        help=
        'Single csv/tsv file (recommended) containing all sample files and sample info or a list of all sample BAM/FASTA(gz)/FASTQ(gz) files',
        nargs="+")
    mapparser.add_argument("-r",
                           "--reference",
                           type=str,
                           required=True,
                           dest="referenceFile",
                           default=SUPPRESS,
                           help="Reference fasta file")
    mapparser.add_argument("-o",
                           "--outputDir",
                           type=str,
                           required=True,
                           dest="outputDir",
                           default=SUPPRESS,
                           help="Output directory for mapped BAM files.")
    mapparser.add_argument(
        "-5",
        "--trim-5p",
        type=int,
        required=False,
        dest="trim5",
        default=12,
        help="Number of bp removed from 5' end of all reads.")
    mapparser.add_argument("-n",
                           "--topn",
                           type=int,
                           required=False,
                           dest="topn",
                           default=1,
                           help="Max. number of alignments to report per read")
    mapparser.add_argument("-a",
                           "--max-polya",
                           type=int,
                           required=False,
                           dest="maxPolyA",
                           default=4,
                           help="Max number of As at the 3' end of a read.")
    mapparser.add_argument("-t",
                           "--threads",
                           type=int,
                           required=False,
                           dest="threads",
                           default=1,
                           help="Thread number")
    mapparser.add_argument(
        "-q",
        "--quantseq",
        dest="quantseq",
        action='store_true',
        required=False,
        help="Run plain Quantseq alignment without SLAM-seq scoring")
    mapparser.add_argument(
        '-e',
        "--endtoend",
        action='store_true',
        dest="endtoend",
        help="Use a end to end alignment algorithm for mapping.")
    mapparser.add_argument(
        "-i",
        "--sample-index",
        type=int,
        required=False,
        default=-1,
        dest="sampleIndex",
        help=
        "Run analysis only for sample <i>. Use for distributing slamdunk analysis on a cluster (index is 1-based)."
    )
    mapparser.add_argument(
        '-ss',
        "--skip-sam",
        action='store_true',
        dest="skipSAM",
        help="Output BAM while mapping. Slower but, uses less hard disk.")

    # filter command

    filterparser = subparsers.add_parser('filter',
                                         help='Filter SLAM-seq aligned data')
    filterparser.add_argument('bam',
                              action='store',
                              help='Bam file(s)',
                              nargs="+")
    filterparser.add_argument("-o",
                              "--outputDir",
                              type=str,
                              required=True,
                              dest="outputDir",
                              help="Output directory for mapped BAM files.")
    filterparser.add_argument("-b",
                              "--bed",
                              type=str,
                              required=False,
                              dest="bed",
                              help="BED file, overrides MQ filter to 0")
    filterparser.add_argument(
        "-mq",
        "--min-mq",
        type=int,
        required=False,
        default=2,
        dest="mq",
        help="Minimum mapping quality (default: %(default)d)")
    filterparser.add_argument(
        "-mi",
        "--min-identity",
        type=float,
        required=False,
        default=0.95,
        dest="identity",
        help="Minimum alignment identity (default: %(default)s)")
    filterparser.add_argument(
        "-nm",
        "--max-nm",
        type=int,
        required=False,
        default=-1,
        dest="nm",
        help="Maximum NM for alignments (default: %(default)d)")
    filterparser.add_argument("-t",
                              "--threads",
                              type=int,
                              required=False,
                              dest="threads",
                              default=1,
                              help="Thread number (default: %(default)d)")

    # snp command

    snpparser = subparsers.add_parser(
        'snp',
        help='Call SNPs on SLAM-seq aligned data',
        formatter_class=ArgumentDefaultsHelpFormatter)
    snpparser.add_argument('bam',
                           action='store',
                           help='Bam file(s)',
                           nargs="+")
    snpparser.add_argument("-o",
                           "--outputDir",
                           type=str,
                           required=True,
                           dest="outputDir",
                           default=SUPPRESS,
                           help="Output directory for mapped BAM files.")
    snpparser.add_argument("-r",
                           "--reference",
                           required=True,
                           dest="fasta",
                           type=str,
                           default=SUPPRESS,
                           help="Reference fasta file")
    snpparser.add_argument("-c",
                           "--min-coverage",
                           required=False,
                           dest="cov",
                           type=int,
                           help="Minimimum coverage to call variant",
                           default=10)
    #snpparser.add_argument("-q", "--min-base-qual", type=int, default=13, required=False, dest="minQual", help="Min base quality for T -> C conversions (default: %(default)d)")
    snpparser.add_argument("-f",
                           "--var-fraction",
                           required=False,
                           dest="var",
                           type=float,
                           help="Minimimum variant fraction to call variant",
                           default=0.8)
    snpparser.add_argument("-t",
                           "--threads",
                           type=int,
                           required=False,
                           default=1,
                           dest="threads",
                           help="Thread number")

    # count command

    countparser = subparsers.add_parser(
        'count', help='Count T/C conversions in SLAM-seq aligned data')
    countparser.add_argument('bam',
                             action='store',
                             help='Bam file(s)',
                             nargs="+")
    countparser.add_argument("-o",
                             "--outputDir",
                             type=str,
                             required=True,
                             dest="outputDir",
                             default=SUPPRESS,
                             help="Output directory for mapped BAM files.")
    countparser.add_argument("-s",
                             "--snp-directory",
                             type=str,
                             required=False,
                             dest="snpDir",
                             default=SUPPRESS,
                             help="Directory containing SNP files.")
    countparser.add_argument("-r",
                             "--reference",
                             type=str,
                             required=True,
                             dest="ref",
                             default=SUPPRESS,
                             help="Reference fasta file")
    countparser.add_argument("-b",
                             "--bed",
                             type=str,
                             required=True,
                             dest="bed",
                             default=SUPPRESS,
                             help="BED file")
    countparser.add_argument(
        "-c",
        "--conversion-threshold",
        type=int,
        dest="conversionThreshold",
        required=False,
        default=1,
        help=
        "Number of T>C conversions required to count read as T>C read (default: %(default)d)"
    )
    countparser.add_argument("-l",
                             "--max-read-length",
                             type=int,
                             required=False,
                             dest="maxLength",
                             help="Max read length in BAM file")
    countparser.add_argument(
        "-q",
        "--min-base-qual",
        type=int,
        default=27,
        required=False,
        dest="minQual",
        help="Min base quality for T -> C conversions (default: %(default)d)")
    countparser.add_argument("-t",
                             "--threads",
                             type=int,
                             required=False,
                             default=1,
                             dest="threads",
                             help="Thread number (default: %(default)d)")

    # all command

    allparser = subparsers.add_parser('all',
                                      help='Run entire SLAMdunk analysis')
    allparser.add_argument(
        'files',
        action='store',
        help=
        'Single csv/tsv file (recommended) containing all sample files and sample info or a list of all sample BAM/FASTA(gz)/FASTQ(gz) files',
        nargs="+")
    allparser.add_argument("-r",
                           "--reference",
                           type=str,
                           required=True,
                           dest="referenceFile",
                           help="Reference fasta file")
    allparser.add_argument("-b",
                           "--bed",
                           type=str,
                           required=True,
                           dest="bed",
                           help="BED file with 3'UTR coordinates")
    allparser.add_argument(
        "-fb",
        "--filterbed",
        type=str,
        required=False,
        dest="filterbed",
        help=
        "BED file with 3'UTR coordinates to filter multimappers (activates -m)"
    )
    allparser.add_argument("-o",
                           "--outputDir",
                           type=str,
                           required=True,
                           dest="outputDir",
                           help="Output directory for slamdunk run.")
    allparser.add_argument(
        "-5",
        "--trim-5p",
        type=int,
        required=False,
        dest="trim5",
        default=12,
        help=
        "Number of bp removed from 5' end of all reads (default: %(default)s)")
    allparser.add_argument(
        "-a",
        "--max-polya",
        type=int,
        required=False,
        dest="maxPolyA",
        default=4,
        help="Max number of As at the 3' end of a read (default: %(default)s)")
    allparser.add_argument(
        "-n",
        "--topn",
        type=int,
        required=False,
        dest="topn",
        default=1,
        help=
        "Max. number of alignments to report per read (default: %(default)s)")
    allparser.add_argument("-t",
                           "--threads",
                           type=int,
                           required=False,
                           default=1,
                           dest="threads",
                           help="Thread number (default: %(default)s)")
    allparser.add_argument(
        "-q",
        "--quantseq",
        dest="quantseq",
        action='store_true',
        required=False,
        help="Run plain Quantseq alignment without SLAM-seq scoring")
    allparser.add_argument(
        '-e',
        "--endtoend",
        action='store_true',
        dest="endtoend",
        help="Use a end to end alignment algorithm for mapping.")
    allparser.add_argument(
        '-m',
        "--multimap",
        action='store_true',
        dest="multimap",
        help="Use reference to resolve multimappers (requires -n > 1).")
    allparser.add_argument(
        "-mq",
        "--min-mq",
        type=int,
        required=False,
        default=2,
        dest="mq",
        help="Minimum mapping quality (default: %(default)s)")
    allparser.add_argument(
        "-mi",
        "--min-identity",
        type=float,
        required=False,
        default=0.95,
        dest="identity",
        help="Minimum alignment identity (default: %(default)s)")
    allparser.add_argument(
        "-nm",
        "--max-nm",
        type=int,
        required=False,
        default=-1,
        dest="nm",
        help="Maximum NM for alignments (default: %(default)s)")
    allparser.add_argument(
        "-mc",
        "--min-coverage",
        required=False,
        dest="cov",
        type=int,
        help="Minimimum coverage to call variant (default: %(default)s)",
        default=10)
    allparser.add_argument(
        "-mv",
        "--var-fraction",
        required=False,
        dest="var",
        type=float,
        help=
        "Minimimum variant fraction to call variant (default: %(default)s)",
        default=0.8)
    allparser.add_argument(
        "-c",
        "--conversion-threshold",
        type=int,
        dest="conversionThreshold",
        required=False,
        default=1,
        help=
        "Number of T>C conversions required to count read as T>C read (default: %(default)d)"
    )
    allparser.add_argument("-rl",
                           "--max-read-length",
                           type=int,
                           required=False,
                           dest="maxLength",
                           help="Max read length in BAM file")
    allparser.add_argument(
        "-mbq",
        "--min-base-qual",
        type=int,
        default=27,
        required=False,
        dest="minQual",
        help="Min base quality for T -> C conversions (default: %(default)d)")
    allparser.add_argument(
        "-i",
        "--sample-index",
        type=int,
        required=False,
        default=-1,
        dest="sampleIndex",
        help=
        "Run analysis only for sample <i>. Use for distributing slamdunk analysis on a cluster (index is 1-based)."
    )
    allparser.add_argument(
        "-ss",
        "--skip-sam",
        action='store_true',
        dest="skipSAM",
        help="Output BAM while mapping. Slower but, uses less hard disk.")

    args = parser.parse_args()

    ########################################################################
    # Routine selection
    ########################################################################

    command = args.command

    if (command == "map"):
        mapper.checkNextGenMapVersion()

        outputDirectory = args.outputDir

        if args.sampleIndex > -1:
            sec = random.randrange(0, 2000) / 1000
            message("Waiting " + str(sec) + " seconds")
            sleep(sec)

        createDir(outputDirectory)
        n = args.threads
        referenceFile = args.referenceFile

        samples, samplesInfos = getSamples(args.files,
                                           runOnly=args.sampleIndex)

        message("Running slamDunk map for " + str(len(samples)) + " files (" +
                str(n) + " threads)")
        for i in xrange(0, len(samples)):
            bam = samples[i]
            sampleInfo = samplesInfos[i]
            tid = i
            if args.sampleIndex > -1:
                tid = args.sampleIndex
            runMap(tid, bam, referenceFile, n, args.trim5, args.maxPolyA,
                   args.quantseq, args.endtoend, args.topn, sampleInfo,
                   outputDirectory, args.skipSAM)

        dunkFinished()

        if not args.skipSAM:
            message("Running slamDunk sam2bam for " + str(len(samples)) +
                    " files (" + str(n) + " threads)")
            results = Parallel(n_jobs=1, verbose=verbose)(
                delayed(runSam2Bam)(tid, samples[tid], n, outputDirectory)
                for tid in range(0, len(samples)))
            dunkFinished()

    elif (command == "filter"):
        outputDirectory = args.outputDir
        createDir(outputDirectory)
        n = args.threads
        message("Running slamDunk filter for " + str(len(args.bam)) +
                " files (" + str(n) + " threads)")
        results = Parallel(n_jobs=n, verbose=verbose)(
            delayed(runFilter)(tid, args.bam[tid], args.bed, args.mq,
                               args.identity, args.nm, outputDirectory)
            for tid in range(0, len(args.bam)))
        dunkFinished()

    elif (command == "snp"):
        outputDirectory = args.outputDir
        createDir(outputDirectory)
        fasta = args.fasta
        minCov = args.cov
        minVarFreq = args.var
        #minQual = args.minQual
        minQual = 15
        n = args.threads
        if (n > 1):
            n = n / 2
        message("Running slamDunk SNP for " + str(len(args.bam)) + " files (" +
                str(n) + " threads)")
        results = Parallel(n_jobs=n, verbose=verbose)(
            delayed(runSnp)(tid, fasta, minCov, minVarFreq, minQual,
                            args.bam[tid], outputDirectory)
            for tid in range(0, len(args.bam)))
        dunkFinished()

    elif (command == "count"):
        outputDirectory = args.outputDir
        createDir(outputDirectory)
        snpDirectory = args.snpDir
        n = args.threads
        message("Running slamDunk tcount for " + str(len(args.bam)) +
                " files (" + str(n) + " threads)")
        results = Parallel(n_jobs=n, verbose=verbose)(
            delayed(runCount)
            (tid, args.bam[tid], args.ref, args.bed, args.maxLength, args.
             minQual, args.conversionThreshold, outputDirectory, snpDirectory)
            for tid in range(0, len(args.bam)))
        dunkFinished()

    elif (command == "all"):
        runAll(args)
        dunkFinished()

    else:
        parser.error("Too few arguments.")
Пример #35
0
def main():
    tests = {
        'open-close': print_open_close,
        'suspend-resume': print_suspend_resume,
        'open-close-multi': print_open_close_multi,
        'move': print_move_window
    }

    parser = ArgumentParser(
        description='Script that performs window operation')
    parser.add_argument('-t',
                        '--test',
                        default='all',
                        help='The name of the test to run. \
                              Available tests: \
                              %s, all. \
                              Default is all' % (', '.join(tests)))
    parser.add_argument('-i',
                        '--iterations',
                        type=int,
                        default=1,
                        help='The number of times to run the test. \
                              Default is 1')
    parser.add_argument('-a',
                        '--application',
                        default='glxgears',
                        help='The 3D application to launch. \
                              Default is "glxgears"')
    parser.add_argument('-to',
                        '--timeout',
                        type=int,
                        default=3,
                        help='The time in seconds between each test. \
                              Default is 3')
    parser.add_argument('-w',
                        '--windows-number',
                        type=int,
                        default=4,
                        help='The number of windows to open.')

    args = parser.parse_args()

    status = 0

    test = tests.get(args.test)

    if test:
        status = test(args.iterations, args.timeout, args.windows_number)
    else:
        if args.test == 'all':
            for test in tests:
                exit_status = tests[test](args.iterations, args.timeout,
                                          args.windows_number)
                if exit_status != 0:
                    status = exit_status
        else:
            parser.error('-t or --test can only be used with one '
                         'of the following tests: '
                         '%s, all' % (', '.join(tests)))

    return status
Пример #36
0
def post_add_arguments(config: dict, arguments: argparse.Namespace, parser: argparse.ArgumentParser):
    logger.debug('Post add arguments')
    logger.debug(json.dumps(vars(arguments)))
    if arguments.auto_refresh:
        if arguments.role_arn:
            raise exceptions.ValidationException('Cannot use autoawsume with a given role_arn')
        if arguments.json:
            raise exceptions.ValidationException('Cannot use autoawsume with json')
    if arguments.clean:
        _, credentials_file = aws_files_lib.get_aws_files(arguments, config)
        aws_files_lib.remove_expired_output_profiles(credentials_file)
        raise exceptions.EarlyExit()
    if arguments.version:
        logger.debug('Logging version')
        safe_print(__data__.version)
        raise exceptions.EarlyExit()
    if arguments.unset_variables:
        logger.debug('Unsetting environment variables')
        print('Unset', [])
        raise exceptions.EarlyExit()
    if type(arguments.config) is list:
        config_lib.handle_config(arguments.config)
        raise exceptions.EarlyExit()
    if arguments.kill:
        kill(arguments)
        raise exceptions.EarlyExit()

    if arguments.with_saml:
        if bool(arguments.role_arn) is not bool(arguments.principal_arn):
            parser.error('both or neither --principal-arn and --role-arn must be specified with saml')
    if not arguments.with_saml and arguments.principal_arn:
        parser.error('--principal-arn can only be specified with --with-saml')

    if arguments.role_arn and not arguments.role_arn.startswith('arn:'):
        logger.debug('Using short-hand role arn syntax')
        parts = arguments.role_arn.split(':')
        if len(parts) == 2:
            partition = 'aws'
            account_id = parts[0]
            role_name = parts[1]
        elif len(parts) == 3:
            partition = parts[0]
            account_id = parts[1]
            role_name = parts[2]
        else:
            parser.error('--role-arn must be a valid role arn or follow the format "<partition>:<account_id>:<role_name>"')
        if not account_id.isnumeric() or len(account_id) != 12:
            parser.error('--role-arn account id must be valid numeric account id of length 12')
        arguments.role_arn = 'arn:{}:iam::{}:role/{}'.format(partition, account_id, role_name)

    if arguments.principal_arn and not arguments.principal_arn.startswith('arn:'):
        logger.debug('Using short-hand role arn syntax')
        parts = arguments.principal_arn.split(':')
        if len(parts) == 2:
            partition = 'aws'
            account_id = parts[0]
            provider_name = parts[1]
        elif len(parts) == 3:
            partition = parts[0]
            account_id = parts[1]
            provider_name = parts[2]
        else:
            parser.error('--principal-arn must be a valid role arn or follow the format "<partition>:<account_id>:<provider_name>"')
        if not provider_name.isnumeric() or len(provider_name) != 12:
            parser.error('--principal-arn account id must be valid numeric account id of length 12')
        arguments.principal_arn = 'arn:{}:iam::{}:role/{}'.format(partition, account_id, provider_name)

    if not arguments.profile_name:
        if arguments.role_arn:
            logger.debug('Role arn passed, target profile name will be role_arn')
            arguments.target_profile_name = arguments.role_arn
        else:
            logger.debug('No profile name passed, target profile name will be "default"')
            arguments.target_profile_name = 'default'
    else:
        arguments.target_profile_name = arguments.profile_name
band_name, stage, concert_start_time, category, concert_length\n
There can be multiple instance of table and col.
cols must be grouped under the correct table"""
    arg_parser = ArgumentParser(description='Bla bla bla',
                                formatter_class=RawTextHelpFormatter)
    arg_parser.add_argument("-y", "--year", dest="year", metavar="INTEGER",
                            help="Scrape specific year")
    arg_parser.add_argument("-o", "--output", dest="output",
                            choices=["file", "stdout", "database"],
                            help="Choose how to output the scraped data")
    arg_parser.add_argument("-dbinfo", "--database_info", metavar="FILE",
                            dest="dbinfo", help=dbinfo_help_text)

    args = arg_parser.parse_args()
    if args.output == "database" and args.dbinfo is None:
        arg_parser.error("Missing database information file")
    print("############### Roskilde band scraper - log {} ##############".
          format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
    rfbs = RfBandScraping(year)
    d = DatabaseHelper(rfbs.current_year)
    d.insert_update_categories(rfbs.categories)

    #rfbs.get_music_as_list()
    rfbs.extract_bands2()
    # rfbs.spilletime_leg()
    # sys.exit()

    # rfbs.spilletime_leg()
    #rfbs.get_category()
    print("-------------------- Result: ---------------------------")
    pprint.pprint(rfbs.bands)
Пример #38
0
def get_bng_calc_arguments(argv=None):
    """
    Get Options from :class:`~optparse.OptionParser` objects.

    This function is used for data processing on-the-fly (requires web connection)

    """

    parser = ArgumentParser(
        usage="%(prog)s [arguments] <Station Database>",
        description="Program to compute the orientation of the components " +
        "of a station based on those in a station database.")
    parser.add_argument(
        "indb",
        help="Station Database to process from.",
        type=str)
    parser.add_argument(
        "-v", "--verbose",
        default=2,
        type=int,
        dest="verb",
        help="Enable Level of verbose output during processing. " +
        "(0) No Output; (1) Output Event Analysis counter; " +
        "(2) Counter and results. Default 2")
    parser.add_argument(
        "-O", "--overwrite",
        default=False,
        action="store_true",
        dest="ovr",
        help="Overwrite existing data on disk. [Default False]")
    parser.add_argument(
        "--save-location",
        default="BNG_RESULTS",
        type=str,
        dest="saveloc",
        help="Specify Save destination. Default is BNG_RESULTS " +
        "(and sub-directories based on Station Name).")
    parser.add_argument(
        "--no-save-progress",
        default=True,
        action="store_false",
        dest="constsave",
        help="Do not save progress during processing.")

    # Use local data directory
    Dtparm = parser.add_argument_group(
        title="Local Data Settings",
        description="Settings associated with defining and using a " +
        "local data base of pre-downloaded day-long SAC files.")
    Dtparm.add_argument(
        "--local-data",
        action="store",
        type=str,
        dest="localdata",
        default="",
        help="Specify a comma separated list of paths containing " +
        "day-long sac files of data already downloaded. If data exists " +
        "for a seismogram is already present on disk, it is selected " +
        "preferentially over downloading the data using the Client interface")
    Dtparm.add_argument(
        "--no-data-zero",
        action="store_true",
        dest="ndval",
        default=False,
        help="Specify to force missing data to be set as zero, rather " +
        "than default behaviour. [Default sets to nan]")
    Dtparm.add_argument(
        "--no-local-net",
        action="store_false",
        dest="useNet",
        default=True,
        help="Specify to prevent using the Network code in the search " +
        "for local data (sometimes for CN stations the dictionary name " +
        "for a station may disagree with that in the filename. " +
        "[Default Network used]")

    # Server Settings
    Svparm = parser.add_argument_group(
        title="Server Settings",
        description="Settings associated with which datacenter to log into.")
    Svparm.add_argument(
        "--catalogue-source",
        action="store",
        type=str,
        dest="cat_client",
        default="IRIS",
        help="Specify the server to connect to for the event catalogue. " +
        "Options include: BGR, ETH, GEONET, GFZ, INGV, IPGP, IRIS, KOERI, " +
        "LMU, NCEDC, NEIP, NERIES, ODC, ORFEUS, RESIF, SCEDC, USGS, USP. " +
        "[Default IRIS]")
    Svparm.add_argument(
        "--waveform-source",
        action="store",
        type=str,
        dest="wf_client",
        default="IRIS",
        help="Specify the server to connect to for the waveform data. " +
        "Options include: BGR, ETH, GEONET, GFZ, INGV, IPGP, IRIS, KOERI, " +
        "LMU, NCEDC, NEIP, NERIES, ODC, ORFEUS, RESIF, SCEDC, USGS, USP. " +
        "[Default IRIS]")
    Svparm.add_argument(
        "-U",
        "--User-Auth",
        action="store",
        type=str,
        dest="UserAuth",
        default="",
        help="Enter your Authentification Username and Password for the " +
        "waveform server (--User-Auth='username:authpassword') to access " +
        "and download restricted data. [Default no user and password]")

    # Station Selection Parameters
    stparm = parser.add_argument_group(
        title="Station Selection Parameters",
        description="Parameters to select a specific station.")
    stparm.add_argument(
        "--keys",
        dest="stkeys",
        type=str,
        default="",
        help="Specify list of Station Keys in the database to process.")
    stparm.add_argument(
        "-c", "--coord-system",
        dest="nameconv",
        type=int,
        default=2,
        help="Coordinate system specification of instrument. " +
        "(0) Attempt Autodetect between 1 and 2; (1) HZ, HN, HE; " +
        "(2) Left Handed: HZ, H2 90 CW H1; (3) Right Handed: HZ, H2 90 CCW " +
        "H1. [Default 2]")

    #-- Timing
    Tmparm = parser.add_argument_group(
        title="Timing Parameters",
        description="Parameters associated with event timing and window " +
        "length.")
    Tmparm.add_argument(
        "--start",
        dest="startT",
        type=str,
        default="",
        help="Enter Start date for event catalogue search. Note, more " +
        "recent of this value or station start date will be used.")
    Tmparm.add_argument(
        "--end",
        dest="endT",
        type=str,
        default="",
        help="Enter End date for event catalogue search. Note, less " +
        "recent of this or the station end date will be used.")
    Tmparm.add_argument(
        "--window",
        dest="wlen",
        type=float,
        default=15.,
        help="Enter length of time window following P arrival time in "+
        "seconds. [Default 15.]")
    Tmparm.add_argument(
        "--times",
        dest="tt",
        type=str,
        default=None,
        help="Enter window start and end times relative to predicted P "+
        "arrival time in seconds. Negative values imply start of window "+
        "before P wave arrival. [Default -2., 5.]")

    # EQ Specifications
    Eqparm = parser.add_argument_group(
        title="Earthquake Selection Criteria",
        description="Parameters associated with selecing the subset of " +
        "earthquakes to use in calculations.")
    Eqparm.add_argument(
        "--min-mag",
        dest="minmag",
        type=float,
        default=5.5,
        help="Specify the minimum magnitude of Earthquakes to use in " +
        "the catalogue search. [Default 5.5]")
    Eqparm.add_argument(
        "--max-mag",
        dest="maxmag",
        type=float,
        default=9.,
        help="Specify the maximum magnitude of Earthquakes to use in " +
        "the catalogue search. [Default 9.]")
    Eqparm.add_argument(
        "--min-dist",
        dest="mindist",
        type=float,
        default=5.,
        help="Specify the minimum earthquake distance (in degrees). " +
        "[Default 5.]")
    Eqparm.add_argument(
        "--max-dist",
        dest="maxdist",
        type=float,
        default=175.,
        help="Specify the maximum earthquake distance (in degrees). " +
        "[Default 175.]")
    Eqparm.add_argument(
        "--max-dep",
        dest="maxdep",
        type=float,
        default=1000.,
        help="Specify maximum Earthquake Depth (km). [Default no limit]")
    Eqparm.add_argument(
        "--discard-catalogue",
        dest="savecat",
        default=True,
        action="store_false",
        help="Specify to discard the eq catalogue after processing.")

    # Processing Specifications
    Procparm = parser.add_argument_group(
        title="Processing Parameters",
        description="Parameters associated with BNG processing.")
    Procparm.add_argument(
        "--new-sampling-rate",
        dest="new_sr",
        type=float,
        default=None,
        help="Specify new sampling rate in Hz. [Default no resampling]")
    Procparm.add_argument(
        "--dphi",
        dest="dphi",
        type=float,
        default=0.1,
        help="Specify angle interval for search, in degrees. [Default 0.1]")
    Procparm.add_argument(
        "--bp",
        dest="bp",
        type=str,
        default=None,
        help="Specify corner frequencies in Hz as a list of two floats. "+
        "[Default 0.7,5.0]")
    Procparm.add_argument(
        "--plot",
        dest="showplot",
        default=False,
        action="store_true",
        help="Show processing step including raw and rotated waveforms. "+
        "[Default doesn't show plot]")

    # Parse Arguments
    args = parser.parse_args(argv)

    # Check inputs
    #if len(args) != 1: parser.error("Need station database file")
    # indb=args[0]
    if not exist(args.indb):
        parser.error("Input file " + args.indb + " does not exist")

    # create station key list
    if len(args.stkeys) > 0:
        args.stkeys = args.stkeys.split(',')

    # construct start time
    if len(args.startT) > 0:
        try:
            args.startT = UTCDateTime(args.startT)
        except:
            parser.error(
                "Cannot construct UTCDateTime from start time: " + args.startT)
    else:
        args.startT = None

    # construct end time
    if len(args.endT) > 0:
        try:
            args.endT = UTCDateTime(args.endT)
        except:
            parser.error(
                "Cannot construct UTCDateTime from end time: " + args.endT)
    else:
        args.endT = None

    # Parse User Authentification
    if not len(args.UserAuth) == 0:
        tt = args.UserAuth.split(':')
        if not len(tt) == 2:
            parser.error(
                "Error: Incorrect Username and Password Strings " +
                "for User Authentification")
        else:
            args.UserAuth = tt
    else:
        args.UserAuth = []

    # Parse Local Data directories
    if len(args.localdata) > 0:
        args.localdata = args.localdata.split(',')
    else:
        args.localdata = []

    # Check NoData Value
    if args.ndval:
        args.ndval = 0.0
    else:
        args.ndval = nan

    if args.bp is not None:
        args.bp = [float(val) for val in args.bp.split(',')]
        args.bp = sorted(args.bp)
        if (len(args.bp)) != 2:
            parser.error(
                "Error: --bp should contain 2 " +
                "comma-separated floats")

    if args.tt is not None:
        args.tt = [float(val) for val in args.tt.split(',')]
        args.tt = sorted(args.tt)
        if (len(args.tt)) != 2:
            parser.error(
                "Error: --times should contain 2 " +
                "comma-separated floats")
    else:
        args.tt = [-2., 5.]

    return args
Пример #39
0
def main():
    msg = []
    if ImageWriter is None:
        msg.append(
            'Image output disabled (Pillow not found), --type option disabled.'
        )
    else:
        msg.append('Image output enabled, use --type option to give image '
                   'format (png, jpeg, ...).')
    if QtCore is None:
        msg.append('PyQt not found, gui action disabled.')
    else:
        msg.append('PyQt found. Use gui action to get a simple GUI.')
    parser = ArgumentParser(description='Create standard barcodes via cli.',
                            epilog=' '.join(msg))
    parser.add_argument('-v',
                        '--version',
                        action='version',
                        version='%(prog)s ' + version)
    subparsers = parser.add_subparsers(title='Actions')
    create_parser = subparsers.add_parser('create',
                                          help='Create a barcode '
                                          'with the given options.')
    create_parser.add_argument('code', help='Code to render as barcode.')
    create_parser.add_argument('output',
                               help='Filename for output '
                               'without extension, e. g. mybarcode.')
    create_parser.add_argument(
        '-c',
        '--compress',
        action='store_true',
        help='Compress output, only recognized if type is svg.')
    create_parser.add_argument('-b',
                               '--barcode',
                               help='Barcode to use '
                               '[default: %(default)s].')
    create_parser.add_argument('--text',
                               help='Text to show under the '
                               'barcode.')
    if ImageWriter is not None:
        create_parser.add_argument('-t',
                                   '--type',
                                   help='Type of output '
                                   '[default: %(default)s].')
    list_parser = subparsers.add_parser('list',
                                        help='List available '
                                        'image and code types.')
    list_parser.set_defaults(func=list_types)
    if QtCore is not None:
        gui_parser = subparsers.add_parser('gui',
                                           help='Opens a simple '
                                           'PyQt GUI to create barcodes.')
        gui_parser.set_defaults(func=open_gui)
    create_parser.set_defaults(type='svg',
                               compress=False,
                               func=create_barcode,
                               barcode='code39',
                               text=None)
    args = parser.parse_args()
    try:
        func = args.func
    except AttributeError:
        parser.error("You need to tell me what to do.")
    else:
        func(args, parser)
Пример #40
0
def main():

    parser = ArgumentParser(
        description="Map importer for Castaway",
        epilog="Copyright (C) 2015 Juan J Martinez <*****@*****.**>",
    )

    parser.add_argument("--version",
                        action="version",
                        version="%(prog)s " + __version__)
    parser.add_argument("--room-width",
                        dest="rw",
                        default=DEF_ROOM_WIDTH,
                        type=int,
                        help="room width (default: %s)" % DEF_ROOM_WIDTH)
    parser.add_argument("--room-height",
                        dest="rh",
                        default=DEF_ROOM_HEIGHT,
                        type=int,
                        help="room height (default: %s)" % DEF_ROOM_HEIGHT)
    parser.add_argument("--ucl",
                        dest="ucl",
                        action="store_true",
                        help="UCL compress (requires ucl binary in the path)")
    parser.add_argument("-b",
                        "--base",
                        dest="base",
                        default=BASE,
                        type=int,
                        help="base character for the tiles (default: %d)" %
                        BASE)
    parser.add_argument("--preferred-bg",
                        dest="bg_color",
                        type=str,
                        default="black",
                        help="preferred background color (eg, black)")
    parser.add_argument("--preferred-fg",
                        dest="fg_color",
                        type=str,
                        default="white",
                        help="preferred fireground color (eg, white)")
    parser.add_argument("--sprite-limit",
                        dest="sprite_limit",
                        default=0,
                        type=int,
                        help="sprite limit per room (default: none)")
    parser.add_argument("--list-colors",
                        dest="list_colors",
                        action="store_true",
                        help="list color names (for --preferred-bg option)")
    parser.add_argument("map_json", help="Map to import")
    parser.add_argument("id", help="variable name")

    args = parser.parse_args()

    if args.list_colors:
        print("Color list: %s" % ', '.join(COLOR_NAMES))
        return

    bg_color = None
    if args.bg_color:
        if args.bg_color.lower() not in COLOR_NAMES:
            parser.error("invalid color name %r" % args.bg_color)
        else:
            bg_color = COLORS[COLOR_NAMES.index(args.bg_color.lower())]

    fg_color = None
    if args.fg_color:
        if args.fg_color.lower() not in COLOR_NAMES:
            parser.error("invalid color name %r" % args.fg_color)
        else:
            fg_color = COLORS[COLOR_NAMES.index(args.fg_color.lower())]

    with open(args.map_json, "rt") as fd:
        data = json.load(fd)

    out = process_map(args.rw, args.rh, data)

    if args.ucl:
        compressed = []
        for block in out:
            p = subprocess.Popen([
                "ucl",
            ],
                                 stdin=subprocess.PIPE,
                                 stdout=subprocess.PIPE)
            output, err = p.communicate(bytearray(block))
            compressed.append([ord(chr(b)) for b in output])
        out = compressed

    print("/* imported from %s */" % args.map_json)
    print("#define ROOM_WIDTH %d" % args.rw)
    print("#define ROOM_HEIGHT %d" % args.rh)
    print("#define ROOM_SIZE %d" % (args.rw * args.rh // 2))
    print("#define MAP_WIDTH %d" % (data["width"] // args.rw))

    data_out = ""
    for i, block in enumerate(out):
        data_out_part = ""
        for part in range(0, len(block), args.rw // 2):
            if data_out_part:
                data_out_part += ",\n"
            data_out_part += ', '.join(
                ["0x%02x" % b for b in block[part:part + args.rw // 2]])
        data_out += "const unsigned char %s_%d[%d] = {\n" % (args.id, i,
                                                             len(block))
        data_out += data_out_part + "\n};\n"

    data_out += "const unsigned char *%s[%d] = { " % (args.id, len(out))
    data_out += ', '.join(["%s_%d" % (args.id, i) for i in range(len(out))])
    data_out += " };\n"
    print(data_out)

    total_maps = len(out)

    out, out_tile_mapping = process_tiles(data, bg_color, fg_color, args.base)

    print("/* map tiles from %s */" %
          find_name(data["tilesets"], "default")["image"])
    print("#define %s_TILE_BASE %d" % (args.id.upper(), args.base))
    print("#define %s_TILE_LEN %d" % (args.id.upper(), len(out) // 8))
    print("const unsigned char %s_tiles[%d] = {" % (args.id, len(out)))
    data_out = ""
    for part in range(0, len(out), 8):
        if data_out:
            data_out += ",\n"
        data_out += ', '.join(["0x%02x" % b for b in out[part:part + 8]])
    print("%s\n};\n" % data_out)

    print("const unsigned char %s_tile_mapping[%d] = {" %
          (args.id, len(out_tile_mapping)))
    data_out = ""
    for part in range(0, len(out_tile_mapping), 8):
        if data_out:
            data_out += ",\n"
        data_out += ', '.join(
            ["0x%02x" % b for b in out_tile_mapping[part:part + 8]])
    print("%s\n};\n" % data_out)

    out = process_objects(args.rw, args.rh, data, args.sprite_limit)

    for map_id, block in out.items():
        print("const uchar %s_objects%d[%d] = { %s };" %
              (args.id, map_id, len(block), ", ".join(map(str, block))))

    ob_map_list = []
    for m in range(total_maps):
        if m in out:
            ob_map_list.append("%s_objects%d" % (args.id, m))
        else:
            ob_map_list.append("0")
    print("const uchar *%s_objects[%d] = { %s };" %
          (args.id, total_maps, ", ".join(ob_map_list)))

    print("#define TOTAL_ROOMS %d\n" % total_maps)
Пример #41
0
def main():
    if os.path.basename(sys.argv[0]) == "genPOI.py":
        prog_name = "genPOI.py"
    else:
        prog_name = sys.argv[0] + " --genpoi"
    logger.configure()

    parser = ArgumentParser(prog=prog_name)
    parser.add_argument("-c",
                        "--config",
                        dest="config",
                        action="store",
                        required=True,
                        help="Specify the config file to use.")
    parser.add_argument(
        "-p",
        "--processes",
        dest="procs",
        action="store",
        type=int,
        help="The number of local worker processes to spawn. Defaults to the "
        "number of CPU cores your computer has.")
    parser.add_argument("-q",
                        "--quiet",
                        dest="quiet",
                        action="count",
                        help="Reduce logging output")
    parser.add_argument("--skip-scan",
                        dest="skipscan",
                        action="store_true",
                        help="Skip scanning for entities when using GenPOI")
    parser.add_argument("--skip-players",
                        dest="skipplayers",
                        action="store_true",
                        help="Skip getting player data when using GenPOI")

    args = parser.parse_args()

    if args.quiet and args.quiet > 0:
        logger.configure(logging.WARN, False)

    # Parse the config file
    mw_parser = config_parser.MultiWorldParser()
    try:
        mw_parser.parse(args.config)
    except config_parser.MissingConfigException:
        parser.error("The configuration file '{}' does not exist.".format(
            args.config))
    if args.procs:
        mw_parser.set_config_item("processes", args.procs)
    try:
        config = mw_parser.get_validated_config()
    except Exception:
        logging.exception(
            "An error was encountered with your configuration. See the info below."
        )
        return 1

    destdir = config['outputdir']
    # saves us from creating the same World object over and over again
    worldcache = {}

    filters = set()
    marker_groups = defaultdict(list)

    logging.info("Searching renders: %s", list(config['renders']))

    # collect all filters and get regionsets
    for rname, render in config['renders'].items():
        # Convert render['world'] to the world path, and store the original
        # in render['worldname_orig']
        try:
            worldpath = config['worlds'][render['world']]
        except KeyError:
            logging.error(
                "Render %s's world is '%s', but I could not find a corresponding entry "
                "in the worlds dictionary.", rname, render['world'])
            return 1
        render['worldname_orig'] = render['world']
        render['world'] = worldpath

        # find or create the world object
        if (render['world'] not in worldcache):
            w = world.World(render['world'])
            worldcache[render['world']] = w
        else:
            w = worldcache[render['world']]

        # get the regionset for this dimension
        rset = w.get_regionset(render['dimension'][1])
        if rset is None:  # indicates no such dimension was found:
            logging.warning(
                "Sorry, you requested dimension '%s' for the render '%s', but I "
                "couldn't find it.", render['dimension'][0], rname)
            continue
        # List of regionsets that should be handled
        rsets = []
        if "crop" in render:
            for zone in render['crop']:
                rsets.append(world.CroppedRegionSet(rset, *zone))
        else:
            rsets.append(rset)

        # find filters for this render
        for f in render['markers']:
            # internal identifier for this filter
            name = (replaceBads(f['name']) +
                    hex(hash(f['filterFunction']))[-4:] + "_" +
                    hex(hash(rname))[-4:])

            # add it to the list of filters
            for rset in rsets:
                filters.add((name, f['name'], f['filterFunction'], rset,
                             worldpath, rname))

            # add an entry in the menu to show markers found by this filter
            group = dict(groupName=name,
                         displayName=f['name'],
                         icon=f.get('icon', 'signpost_icon.png'),
                         createInfoWindow=f.get('createInfoWindow', True),
                         checked=f.get('checked', False),
                         showIconInLegend=f.get('showIconInLegend', False))
            marker_groups[rname].append(group)

    # initialize the structure for the markers
    markers = dict((name, dict(created=False, raw=[], name=filter_name))
                   for name, filter_name, __, __, __, __ in filters)

    all_rsets = set(map(lambda f: f[3], filters))
    logging.info("Will search %s region sets using %s filters", len(all_rsets),
                 len(filters))

    # apply filters to regionsets
    if not args.skipscan:
        for rset in all_rsets:
            rset_filters = list(filter(lambda f: f[3] == rset, filters))
            logging.info("Calling handleEntities for %s with %s filters", rset,
                         len(rset_filters))
            handleEntities(rset, config, args.config, rset_filters, markers)

    # apply filters to players
    if not args.skipplayers:
        PlayerDict.load_cache(destdir)

        # group filters by worldpath, so we only search for players once per
        # world
        def keyfunc(x):
            return x[4]

        sfilters = sorted(filters, key=keyfunc)
        for worldpath, worldpath_filters in itertools.groupby(
                sfilters, keyfunc):
            handlePlayers(worldpath, list(worldpath_filters), markers)

    # add manual POIs
    # group filters by name of the render, because only filter functions for
    # the current render should be used on the current render's manualpois
    def keyfunc(x):
        return x[5]

    sfilters = sorted(filters, key=keyfunc)
    for rname, rname_filters in itertools.groupby(sfilters, keyfunc):
        manualpois = config['renders'][rname]['manualpois']
        handleManual(manualpois, list(rname_filters), markers)

    logging.info("Done handling POIs")
    logging.info("Writing out javascript files")

    if not args.skipplayers:
        PlayerDict.save_cache(destdir)

    with open(os.path.join(destdir, "markersDB.js"), "w") as output:
        output.write("var markersDB=")
        json.dump(markers, output, sort_keys=True, indent=2)
        output.write(";\n")
    with open(os.path.join(destdir, "markers.js"), "w") as output:
        output.write("var markers=")
        json.dump(marker_groups, output, sort_keys=True, indent=2)
        output.write(";\n")
    with open(os.path.join(destdir, "baseMarkers.js"), "w") as output:
        output.write("overviewer.util.injectMarkerScript('markersDB.js');\n")
        output.write("overviewer.util.injectMarkerScript('markers.js');\n")
        output.write("overviewer.util.injectMarkerScript('regions.js');\n")
        output.write("overviewer.collections.haveSigns=true;\n")
    logging.info("Done")
Пример #42
0
def parse_options():

    description = "This example script connects with a SAP Router service and tries to determine its version. " \
                  "Finger printing is performed by triggering different errors and looking at the lines where the " \
                  "error is produced."

    usage = "%(prog)s -d <remote host> [options]"

    parser = ArgumentParser(usage=usage,
                            description=description,
                            epilog=pysap.epilog)

    target = parser.add_argument_group("Target")
    target.add_argument("-d",
                        "--remote-host",
                        dest="remote_host",
                        default="127.0.0.1",
                        help="Remote host [%(default)s]")
    target.add_argument("-p",
                        "--remote-port",
                        dest="remote_port",
                        type=int,
                        default=3299,
                        help="Remote port [%(default)d]")

    database = parser.add_argument_group("Database options")
    database.add_argument("-f",
                          "--fingerprints-file",
                          dest="fingerprints",
                          metavar="FILE",
                          default="router_fingerprints.json",
                          help="Fingerprints file to use [%(default)s]")
    database.add_argument(
        "-a",
        "--add-fingerprint",
        dest="add_fingerprint",
        action="store_true",
        help="New fingerprint to add to the database in json format")
    database.add_argument(
        "-i",
        "--version-information",
        dest="version_info",
        help="Version information to use when adding new entries in json format"
    )
    database.add_argument(
        "-n",
        "--new-entries",
        dest="new_entries",
        action="store_true",
        help="Generate new database entries even when the fingerprints matched"
    )
    database.add_argument("--new-fingerprints-file",
                          dest="new_fingerprint_file",
                          metavar="FILE",
                          default="saprouter_new_fingerprints.json",
                          help="File to write or load from new fingerprints")

    misc = parser.add_argument_group("Misc options")
    misc.add_argument("-v",
                      "--verbose",
                      dest="verbose",
                      action="store_true",
                      help="Verbose output")

    options = parser.parse_args()

    if not options.remote_host:
        parser.error("Remote host is required")

    return options
Пример #43
0
def main(args=None):
    parser = ArgumentParser(description="WACZ creator",
                            formatter_class=RawTextHelpFormatter)

    parser.add_argument("-V",
                        "--version",
                        action="version",
                        version=get_version())

    subparsers = parser.add_subparsers(dest="cmd")
    subparsers.required = True

    create = subparsers.add_parser("create", help="create wacz file")
    create.add_argument("inputs", nargs="+")
    create.add_argument("-f", "--file", action="store_true")

    create.add_argument("-o", "--output", default="archive.wacz")

    create.add_argument(
        "-t",
        "--text",
        help=
        "Generates pages.jsonl with a full-text index. Must be run in addition with --detect-pages or it will have no effect",
        action="store_true",
    )

    create.add_argument(
        "-p",
        "--pages",
        help="Overrides the pages generation with the passed jsonl pages",
        action="store",
    )

    create.add_argument(
        "--detect-pages",
        help="Generates pages.jsonl without a text index",
        action="store_true",
    )

    create.add_argument(
        "--hash-type",
        choices=["sha256", "md5"],
        help=
        "Allows the user to specify the hash type used. Currently we allow sha256 and md5",
    )

    create.add_argument("--ts")
    create.add_argument("--url")
    create.add_argument("--date")
    create.add_argument("--title")
    create.add_argument("--desc")
    create.set_defaults(func=create_wacz)

    validate = subparsers.add_parser("validate", help="validate a wacz file")
    validate.add_argument("-f", "--file", required=True)
    validate.set_defaults(func=validate_wacz)

    cmd = parser.parse_args(args=args)

    if cmd.cmd == "create" and cmd.ts is not None and cmd.url is None:
        parser.error("--url must be specified when --ts is passed")

    if cmd.cmd == "create" and cmd.detect_pages is not False and cmd.pages is not None:
        parser.error(
            "--pages and --detect-pages can't be set at the same time they cancel each other out."
        )

    value = cmd.func(cmd)
    return value
Пример #44
0
def main():
    """Main routine"""
    debug = False
    try:
        argparser = ArgumentParser(description=modules[__name__].__doc__)
        argparser.add_argument('device',
                               nargs='?',
                               default='ftdi:///?',
                               help='serial port device name')
        argparser.add_argument('-x',
                               '--hexdump',
                               action='store_true',
                               help='dump EEPROM content as ASCII')
        argparser.add_argument('-X',
                               '--hexblock',
                               type=int,
                               help='dump EEPROM as indented hexa blocks')
        argparser.add_argument('-o',
                               '--output',
                               type=FileType('wt'),
                               help='output ini file to save EEPROM content')
        argparser.add_argument('-s',
                               '--serial-number',
                               help='set serial number')
        argparser.add_argument('-m',
                               '--manufacturer',
                               help='set manufacturer name')
        argparser.add_argument('-p', '--product', help='set product name')
        argparser.add_argument('-c',
                               '--config',
                               action='append',
                               help='change/configure a property '
                               'as key=value pair')
        argparser.add_argument('-e',
                               '--erase',
                               action='store_true',
                               help='erase the whole EEPROM content')
        argparser.add_argument('-u',
                               '--update',
                               action='store_true',
                               help='perform actual update, use w/ care')
        argparser.add_argument('-V',
                               '--virtual',
                               type=FileType('r'),
                               help='use a virtual device, specified as YaML')
        argparser.add_argument('-v',
                               '--verbose',
                               action='count',
                               default=0,
                               help='increase verbosity')
        argparser.add_argument('-d',
                               '--debug',
                               action='store_true',
                               help='enable debug mode')
        args = argparser.parse_args()
        debug = args.debug

        if not args.device:
            argparser.error('Serial device not specified')

        loglevel = max(DEBUG, ERROR - (10 * args.verbose))
        loglevel = min(ERROR, loglevel)
        if debug:
            formatter = Formatter(
                '%(asctime)s.%(msecs)03d %(name)-20s '
                '%(message)s', '%H:%M:%S')
        else:
            formatter = Formatter('%(message)s')
        FtdiLogger.set_formatter(formatter)
        FtdiLogger.set_level(loglevel)
        FtdiLogger.log.addHandler(StreamHandler(stderr))

        if args.virtual:
            from pyftdi.usbtools import UsbTools
            # Force PyUSB to use PyFtdi test framework for USB backends
            UsbTools.BACKENDS = ('pyftdi.tests.backend.usbvirt', )
            # Ensure the virtual backend can be found and is loaded
            backend = UsbTools.find_backend()
            loader = backend.create_loader()()
            loader.load(args.virtual)

        eeprom = FtdiEeprom()
        eeprom.open(args.device)
        if args.erase:
            eeprom.erase()
        if args.serial_number:
            eeprom.set_serial_number(args.serial_number)
        if args.manufacturer:
            eeprom.set_manufacturer_name(args.manufacturer)
        if args.product:
            eeprom.set_product_name(args.product)
        for conf in args.config or []:
            if conf == '?':
                helpstr = ', '.join(eeprom.properties)
                print(
                    fill(helpstr, initial_indent='  ', subsequent_indent='  '))
                exit(1)
            for sep in ':=':
                if sep in conf:
                    name, value = conf.split(sep, 1)
                    if not value:
                        argparser.error('Configuration %s without value' %
                                        conf)
                    helpio = StringIO()
                    eeprom.set_property(name, value, helpio)
                    helpstr = helpio.getvalue()
                    if helpstr:
                        print(
                            fill(helpstr,
                                 initial_indent='  ',
                                 subsequent_indent='  '))
                        exit(1)
                    break
            else:
                argparser.error('Missing name:value separator in %s' % conf)
        if args.hexdump:
            print(hexdump(eeprom.data))
        if args.hexblock is not None:
            indent = ' ' * args.hexblock
            for pos in range(0, len(eeprom.data), 16):
                hexa = ' '.join(
                    ['%02x' % x for x in eeprom.data[pos:pos + 16]])
                print(indent, hexa, sep='')
        if args.update:
            eeprom.commit(False)
        if args.verbose > 0:
            eeprom.dump_config()
        if args.output:
            eeprom.save_config(args.output)

    except (ImportError, IOError, NotImplementedError, ValueError) as exc:
        print('\nError: %s' % exc, file=stderr)
        if debug:
            print(format_exc(chain=False), file=stderr)
        exit(1)
    except KeyboardInterrupt:
        exit(2)
Пример #45
0
        "--first-prefix",
        action="store_true",
        help="split on underscore in file name, use first prefix only",
    )
    parser.add_argument(
        "--pretrained-model-dir",
        type=str,
        default=default_pretrained,
        help=
        "path to pretrained model directory (default of None uses ./model)",
    )
    parser.add_argument("input", type=str, help="input audio file")

    args = parser.parse_args()
    if not os.path.exists(args.input):
        parser.error("Input file '%s' not found" % args.input)

    # Read audio data
    x, _ = librosa.load(args.input, sr=sample_rate, mono=True)

    x_h_out, x_p_out, x_v_out = xtract_mixin(
        x,
        instrumental=args.instrumental,
        single_model=args.single_model,
        pretrained_model_dir=args.pretrained_model_dir,
    )

    song_name = os.path.splitext(os.path.basename(args.input))[0]
    if args.first_prefix:
        song_name = song_name.split("_")[0]
Пример #46
0
def parameters():
    param = ArgumentParser(description='Kernel Build Script.', )
    group = param.add_mutually_exclusive_group()
    param.add_argument('-b',
                       '--build',
                       choices=['miui', 'custom'],
                       required=True)
    group.add_argument('--clean-only', dest='clean_only', action='store_true')
    group.add_argument('--clean-and-build',
                       dest='clean_and_build',
                       action='store_true')
    param.add_argument('-c', '--cpuquiet', action='store_true')
    param.add_argument('-d',
                       '--device',
                       choices=['mido', 'whyred'],
                       required=True)
    param.add_argument('-o', '--overclock', action='store_true')
    param.add_argument('-r', '--release', action='store_true')
    param.add_argument('-t', '--telegram', action='store_true')
    param.add_argument('-u', '--upload', action='store_true')
    param.add_argument('--verbose', action='store_true')
    param.add_argument('-v', '--version', required=True)
    param.add_argument('-cc', '--cc', choices=['clang', 'gcc'], required=True)
    params = vars(param.parse_args())
    build_type = params['build']
    clean_only = params['clean_only']
    clean_and_build = params['clean_and_build']
    cpuquiet = params['cpuquiet']
    device = params['device']
    oc = params['overclock']
    release = params['release']
    telegram = params['telegram']
    upload = params['upload']
    verbose = params['verbose']
    version = params['version']
    cc = params['cc']
    # Check whyred ENV
    if device == 'whyred':
        # Let's fail all of this if depencies are met, because i'm stupid.
        if True in [cpuquiet, oc, build_type == 'custom']:
            print()
            param.error('[-c/--cpuquiet, -o/--overclock, -b/--build = custom],'
                        " isn't available for whyred")
    elif device == 'mido':
        if cpuquiet is False:
            param.error('mido already drop support for non-cpuquiet')
    # Fail build if using version beta|test|personal while using --release
    if version in ['beta' or 'test' or 'personal'] and release is True:
        param.error('version beta|test|personal, '
                    "can't be passed with --release")
    return {
        'type': build_type,
        'clean': [clean_only, clean_and_build],
        'cpuquiet': cpuquiet,
        'device': device,
        'overclock': oc,
        'release': release,
        'telegram': telegram,
        'upload': upload,
        'verbose': verbose,
        'version': version,
        'cc': cc
    }
Пример #47
0
def main(args=None):
    parser = ArgumentParser()
    parser.add_argument('--version', action='version', version=__version__)
    inputGroup = parser.add_argument_group(
        title='Input arguments',
        description='The following arguments are mutually exclusive.')
    xInputGroup = inputGroup.add_mutually_exclusive_group(required=True)
    xInputGroup.add_argument(
        '-g', '--glyphs-path', metavar='GLYPHS',
        help='Path to .glyphs source file')
    xInputGroup.add_argument(
        '-u', '--ufo-paths', nargs='+', metavar='UFO',
        help='One or more paths to UFO files')
    xInputGroup.add_argument(
        '-m', '--mm-designspace', metavar='DESIGNSPACE',
        help='Path to .designspace file')

    outputGroup = parser.add_argument_group(title='Output arguments')
    outputGroup.add_argument(
        '-o', '--output', nargs='+', default=('otf', 'ttf'), metavar="FORMAT",
        help='Output font formats. Choose between: %(choices)s. '
             'Default: otf, ttf',
        choices=('ufo', 'otf', 'ttf', 'ttf-interpolatable', 'variable'))
    outputSubGroup = outputGroup.add_mutually_exclusive_group()
    outputSubGroup.add_argument(
        '--output-path', default=None,
        help="Output font file path. Only valid when the output is a single "
        "file (e.g. input is a single UFO or output is variable font)")
    outputSubGroup.add_argument(
        '--output-dir', default=None,
        help="Output folder. By default, output folders are created in the "
        "current working directory, grouping output fonts by format.")
    outputGroup.add_argument(
        '-i', '--interpolate', nargs="?", default=False, const=True,
        metavar="INSTANCE_NAME",
        help='Interpolate masters and generate all the instances defined. '
             'To only interpolate a specific instance (or instances) that '
             'match a given "name" attribute, you can pass as argument '
             'the full instance name or a regular expression. '
             'E.g.: -i "Noto Sans Bold"; or -i ".* UI Condensed". '
             '(for Glyphs or MutatorMath sources only). ')
    outputGroup.add_argument(
        '-M', '--masters-as-instances', action='store_true',
        help='Output masters as instances')
    outputGroup.add_argument(
        '--family-name',
        help='Family name to use for masters, and to filter output instances')
    outputGroup.add_argument(
        '--round-instances', dest='round_instances', action='store_true',
        help='Apply integer rounding to all geometry when interpolating')
    outputGroup.add_argument(
        '--designspace-path', default=None,
        help='Path to output designspace file (for Glyphs sources only).')
    outputGroup.add_argument(
        '--master-dir', default=None,
        help='Directory where to write master UFO. Default: "./master_ufo". '
             'If value is "{tmp}", a temporary directory is created and '
             'removed at the end (for Glyphs sources only).')
    outputGroup.add_argument(
        '--instance-dir', default=None,
        help='Directory where to write instance UFOs. Default: '
             '"./instance_ufo". If value is "{tmp}", a temporary directory '
             'is created and removed at the end (for Glyphs sources only).')
    outputGroup.add_argument(
        '--validate-ufo', action='store_true',
        help='Enable ufoLib validation on reading/writing UFO files. It is '
             'disabled by default')

    contourGroup = parser.add_argument_group(title='Handling of contours')
    contourGroup.add_argument(
        '--keep-overlaps', dest='remove_overlaps', action='store_false',
        help='Do not remove any overlap.')
    contourGroup.add_argument(
        '--overlaps-backend', dest='overlaps_backend', metavar="BACKEND",
        choices=("booleanOperations", "pathops"), default="booleanOperations",
        help='Select library to remove overlaps. Choose between: %(choices)s '
             '(default: %(default)s)')
    contourGroup.add_argument(
        '--keep-direction', dest='reverse_direction', action='store_false',
        help='Do not reverse contour direction when output is ttf or '
             'ttf-interpolatable')
    contourGroup.add_argument(
        '-e', '--conversion-error', type=float, default=None, metavar='ERROR',
        help='Maximum approximation error for cubic to quadratic conversion '
             'measured in EM')
    contourGroup.add_argument(
        '-a', '--autohint', nargs='?', const='',
        help='Run ttfautohint. Can provide arguments, quoted')
    contourGroup.add_argument(
        '--cff-round-tolerance', type=float, default=None, metavar='FLOAT',
        help='Restrict rounding of point coordinates in CFF table to only '
             'those floats whose absolute difference from their integral part '
             'is less than or equal to the tolerance. By default, all floats '
             'are rounded to integer (tolerance 0.5); 0 disables rounding.'
    )

    layoutGroup = parser.add_argument_group(title='Handling of OpenType Layout')
    layoutGroup.add_argument(
        '--interpolate-binary-layout', nargs="?", default=False, const=True,
        metavar="MASTER_DIR",
        help='Interpolate layout tables from compiled master binaries. '
             'Requires Glyphs or MutatorMath source.')
    layoutGroup.add_argument(
        "--feature-writer", metavar="CLASS", action="append",
        dest="feature_writer_specs",
        help="string specifying a feature writer class to load, either "
             "built-in or from an external module, optionally initialized with "
             "the given keyword arguments. The class and module names are "
             "separated by '::'. The option can be repeated multiple times "
             "for each writer class. A special value of 'None' will disable "
             "all automatic feature generation. The option overrides both the "
             "default ufo2ft writers and those specified in the UFO lib.")

    feaCompilerGroup = layoutGroup.add_mutually_exclusive_group(required=False)
    feaCompilerGroup.add_argument(
        '--use-afdko', action='store_true',
        help='Use makeOTF instead of feaLib to compile FEA.')
    feaCompilerGroup.add_argument(
        '--mti-source',
        help='Path to mtiLib .txt feature definitions (use instead of FEA)')

    glyphnamesGroup = parser.add_mutually_exclusive_group(required=False)
    glyphnamesGroup.add_argument(
        '--production-names', dest='use_production_names', action='store_true',
        help='Rename glyphs with production names if available otherwise use '
             'uninames.')
    glyphnamesGroup.add_argument(
        '--no-production-names', dest='use_production_names',
        action='store_false')

    subsetGroup = parser.add_mutually_exclusive_group(required=False)
    subsetGroup.add_argument(
        '--subset', dest='subset', action='store_true',
        help='Subset font using export flags set by glyphsLib')
    subsetGroup.add_argument(
        '--no-subset', dest='subset', action='store_false')

    subroutinizeGroup = parser.add_mutually_exclusive_group(required=False)
    subroutinizeGroup.add_argument(
        '-s', '--subroutinize', action='store_true',
        help='Optimize CFF table using compreffor (default)')
    subroutinizeGroup.add_argument(
        '-S', '--no-subroutinize', dest='subroutinize', action='store_false')

    parser.set_defaults(use_production_names=None, subset=None,
                        subroutinize=True)

    logGroup = parser.add_argument_group(title='Logging arguments')
    logGroup.add_argument(
        '--timing', action='store_true',
        help="Print the elapsed time for each steps")
    logGroup.add_argument(
        '--verbose', default='INFO', metavar='LEVEL',
        choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),
        help='Configure the logger verbosity level. Choose between: '
             '%(choices)s. Default: INFO')
    args = vars(parser.parse_args(args))

    specs = args.pop("feature_writer_specs")
    if specs is not None:
        args["feature_writers"] = _loadFeatureWriters(parser, specs)

    glyphs_path = args.pop('glyphs_path')
    ufo_paths = args.pop('ufo_paths')
    designspace_path = args.pop('mm_designspace')
    input_format = ("Glyphs" if glyphs_path else
                    "designspace" if designspace_path else
                    "UFO") + " source"

    if 'variable' in args['output']:
        if not (glyphs_path or designspace_path):
            parser.error(
                'Glyphs or designspace source required for variable font')
        exclude_args(parser, args,
                     ['interpolate', 'masters_as_instances',
                      'interpolate_binary_layout'],
                     "variable output")

    try:
        project = FontProject(timing=args.pop('timing'),
                              verbose=args.pop('verbose'),
                              validate_ufo=args.pop('validate_ufo'))

        if glyphs_path:
            with _make_tempdirs(parser, args):
                project.run_from_glyphs(glyphs_path, **args)
            return

        exclude_args(parser, args,
                     ['family_name', 'mti_source', 'designspace_path',
                      'master_dir', 'instance_dir'],
                     input_format)
        if designspace_path:
            project.run_from_designspace(designspace_path, **args)
            return

        exclude_args(parser, args,
                     ['interpolate', 'interpolate_binary_layout',
                      'round_instances'],
                    input_format)
        project.run_from_ufos(
            ufo_paths, is_instance=args.pop('masters_as_instances'), **args)
    except FontmakeError as e:
        import sys
        sys.exit("fontmake: error: %s" % e)
Пример #48
0
cli.add_argument("path", type=str, metavar="FILE_PATH")
cli.add_argument("-n", "--name", type=str, required=True,
                 metavar="LEXICON_NAME", dest="name")
arguments = cli.parse_args()

# Create a client using the credentials and region defined in the adminuser
# section of the AWS credentials and configuration files
session = Session(profile_name="adminuser")
polly = session.client("polly")

# Open the PLS lexicon file for reading
try:
    with open(arguments.path, "r") as lexicon_file:
        # Read the pls file contents
        lexicon_data = lexicon_file.read()

        # Store the PLS lexicon on the service.
        # If a lexicon with that name already exists,
        # its contents will be updated
        response = polly.put_lexicon(Name=arguments.name,
                                      Content=lexicon_data)
except (IOError, BotoCoreError, ClientError) as error:
    # Could not open/read the file or the service returned an error,
    # exit gracefully
    cli.error(error)

print(u"The \"{0}\" lexicon is now available for use.".format(arguments.name))

              
# snippet-end:[polly.python.PutLexicon.complete]
  
Пример #49
0
def main():
    version_msg = "%prog 2.0"
    usage_msg = """%prog [OPTION]... FILE                                       
                                                                                
Output randomly selected lines from FILE."""

    parser = ArgumentParser()
    parser.add_argument(
        "-n",
        "--head-count",
        action="store",
        dest="headcount",
        default=0,
        help="output HEADCOUNT lines (default is the number of \
lines of the input)")
    parser.add_argument(
        "-r",
        "--repeat",
        action="store_true",
        dest="repeatlines",
        help="allows lines from the input to be repeated. Usage\
 without --head-count leads to infinite lines of output")
    parser.add_argument("-i",
                        "--input-range",
                        action="store",
                        dest="inputrange",
                        default="0",
                        help="selects which lines from the input will be used")

    options, args = parser.parse_known_args(sys.argv[1:])

    iLines = []
    #-i option
    if (options.inputrange == "0"):
        try:
            if (args[0] == "-"):
                iLines = stdin2list(sys.stdin)
            else:
                iLines = file2list(args[0])
        except:
            iLines = stdin2list(sys.stdin)

    else:
        if (len(args) != 0):
            parser.error(" extra operand " + args[0].format(args[0]))
        irange = options.inputrange.split('-')
        r1 = int(input_range[0])
        r2 = int(input_range[1])
        if (r1 > r2):
            parser.error("invalid input range ".format(irange))
        numlines = r2 - r1 + 1
        iLines = list()
        for num in range(0, numlines):
            numbar = num + r1
            iLines.append(str(numbar) + "\n")
    #headcount
    headcount = int(options.headcount)
    if (headcount == 0):
        headcount = len(iLines)
    #repeat
    repeat = bool(options.repeatlines)
    if (headcount > len(iLines) and repeat != True):
        headcount = len(iLines)

    counter = headcount - 1
    while counter in range(0, headcount):
        currLine = chooseline(iLines)
        sys.stdout.write(str(currLine))
        if (repeat == False):
            iLines.remove(currLine)
            counter = counter - 1
Пример #50
0
\t\tcdsc -n [directory] [shortcut] links shortcut to directory
\t\tcdsc -u [shortcut] unlinks the shortcut''', formatter_class=RawTextHelpFormatter)
parser.add_argument('shortcut', nargs='?', help='the string to be used as a shortcut to the directory')
parser.add_argument('-n', type=str, dest='directory', help='create a new link between the shortcut string and specified directory', metavar=("directory"))
parser.add_argument('-u', dest='unlink', action='store_true', help='unlink the specified shortcut')
parser.add_argument('-l', dest='list', action='store_true', help='lists currently linked shortcuts')


args = parser.parse_args()

if args.list:
	print(f'echo -e "{list_all()}"')
else:
	#raise error if shortcut not defined:
	if args.shortcut is None:
		parser.error("No shortcut defined")
	else:
		if args.directory is not None:
			# case where we are adding shortcut
			add_sc(args.shortcut, args.directory)
			print(f"echo 'Succesfully linked shortcut \"{args.shortcut}\".'")
		elif args.unlink:
			# case where we are unlinking
			if del_sc(args.shortcut):
				print(f"echo 'Succesfully unlinked shortcut \"{args.shortcut}\".'")
			else:
				print(f"echo 'Shortcut \"{args.shortcut}\" is not currently linked.'")
		elif args.list:
			print(f'echo -e "{list_all()}"')
		else:
			#case where we are cding to correct directory!
Пример #51
0
    cli.add_argument("--cardid", help="Last 4 digits of your card number")
    cli.add_argument("--output", "-o", help="Output path (QIF)")
    cli.add_argument("--qif-account",
                     help="Default QIF account name (e.g. Aktiva:VISA)")
    cli.add_argument("--from-date",
                     help="Export transactions as of... (DD.MM.YYYY)")
    cli.add_argument("--to-date",
                     help="Export transactions until... (DD.MM.YYYY)",
                     default=date.today().strftime('%d.%m.%Y'))
    cli.add_argument("--raw",
                     action="store_true",
                     help="Store the raw CSV file instead of QIF")

    args = cli.parse_args()
    if not args.userid:
        cli.error("Please specify a valid user id")
    if (not args.baid and not args.cardid) or (
            args.baid and args.cardid
    ):  # at least one must be given but both are not allowed
        cli.error("Please specify a valid bank account number _or_ card id")

    def is_valid_date(date):
        return date and bool(re.match('^\d{1,2}\.\d{1,2}\.\d{2,5}\Z', date))

    from_date = args.from_date
    while not is_valid_date(from_date):
        from_date = raw_input("Start time: ")
    if not is_valid_date(args.to_date):
        cli.error("Please specify a valid end time")
    if not args.output:
        cli.error("Please specify a valid output path")
Пример #52
0
def main():
    from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
    parser = ArgumentParser(description=__doc__,
                            formatter_class=ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        "-t",
        "--type",
        dest="mtype",
        type=str,
        help="Type of SS: ic (IC), cs (Term SS), es (Entity SS) ",
        required=True,
        metavar='str')
    parser.add_argument("-m",
                        "--models",
                        dest="models",
                        nargs='*',
                        type=str,
                        help="SS models to be considered ")
    parser.add_argument(
        "-p",
        "--parameter",
        dest="parameter",
        type=str,
        help="Other necessary parameters needed for the models considered",
        metavar='str')
    parser.add_argument(
        "-d",
        "--data",
        dest="data",
        type=str,
        nargs='+',
        help=
        "Full path to the file containing list of terms, term-term pairs, entity-entity ",
        metavar="str")
    parser.add_argument(
        "-a",
        "--annotationfile",
        dest="annot",
        type=str,
        help="Full path to the file appropriate Entity-term mapping ")
    parser.add_argument("-f",
                        "--ontologyfile",
                        dest="ontology",
                        type=str,
                        help="Full path to the ontology file")
    parser.add_argument("-n",
                        "--namespace",
                        dest="ontospace",
                        type=str,
                        help="The name space of the ontology being used ",
                        default='biological_process',
                        metavar="str")
    parser.add_argument("-o",
                        "--out",
                        dest="outfile",
                        type=str,
                        help="Naming the SS scores output file ",
                        default=os.getcwd(),
                        metavar="FILE")
    parser.add_argument("-s",
                        "--stream",
                        dest="stream",
                        type=int,
                        help="Output on (1) screen or (0) file",
                        default=1,
                        metavar='int')

    argss = parser.parse_args()

    # Providing system and platform requirements
    print('\n' + 74 * '*')
    print(('PySML v{}'.format(__version__) +
           ', developed by {}'.format(__author__)).center(71))
    print(('E-mail: {}'.format(__author_email__)).center(71))
    print(
        '\n' + 74 * '*' + '\n*' +
        'These are requirements for running Integrated Human PPI Generator tool'
        .center(71) + ' *\n' + 74 * '*')
    print(
        '\nPlatform: Python version >= 2.7.x\n\nOS      : Independent, but tested only on Linux (Ubuntu)'
    )
    print(
        "\nThe current PyPML library version retrieves semantic similarity scores\nof all known SS models:\n"
    )
    print(
        "For more information, please refer to the manual at:\n\t1. http://web.cbio.uct.ac.za/ITGOM/post-analysis-tools/pysml/PySML_Documentation.pdf\n\t2. http://web.cbio.uct.ac.za/ITGOM/post-analysis-tools/pysml/PKG-INFO\n\t3. https://github.com/gkm-software-dev/post-analysis-tools\n\nOr go to the PySML directory and type the following command line:"
    )
    print("\n\tpython setup.py --long-description")
    print("\nRequires: python networkx library")
    print(
        '\nOUTPUT  : SS scores are displayed on the screen or written to a file.\n'
    )
    print(('PySML is being run on {}, under'.format(
        datetime.datetime.now().strftime('%d-%m-%Y %H:%M'))).center(71))
    print(('{}'.format(__license__)).center(71))
    print(74 * '*')
    print('\nEnter 1 to continue and 2 to exit\n')

    if sys.version_info.major < 3: raw_pass = raw_input
    else: raw_pass = input
    while True:
        a = raw_pass('> ')
        try:
            a = int(a)
            if a in [1, 2]: break
            print('Please enter 1 to continue and 2 to exit')
        except:
            print('Please enter 1 to continue and 2 to exit')
    if a == 2:
        print(
            "\nExiting PySML. Thanks for using this tool! You can try again at any time.\n"
        )
        print(74 * '*' + '\n')
        sys.exit(2)

    # Quickly check whether the type of measure provided is valid
    if not argss.mtype in ['ic', 'cs', 'es']:
        print(
            '\nMeasure Type Error: There exist three following categories of SS measures-\n\n\tic: For concept information content (IC) scores, \n\tcs: For concept pairwise SS scores and  \n\tes: For entity pairwise SS scores.\n\nPlease refer to the tool manual, fix this issue and try again ...\n'
            + 74 * '*' + '\n')
        sys.exit(3)

    # Check whether python network library required for creating ontology DAG exists
    import imp
    spec = None
    try:
        imp.find_module('networkx')
        spec = True
    except ImportError:
        spec = None
    if spec is None:
        print('\n' + 74 * '*' + '\n' +
              "python-networkx library under Python has not been found.")
        print(
            "install python-networkx and try again. Execution cannot be pursued, now exiting ...\n"
            + 74 * '*' + '\n')
        sys.exit(2)

    # Checking whether required arguments are provided
    allentsim = set([
        'avg', 'bma', 'abm', 'bmm', 'hdf', 'vhdf', 'max', 'aln', 'intel',
        'spgk', 'lp', 'ye', 'simgic', 'simdic', 'simuic', 'simcou', 'simcot',
        'simui', 'simub', 'simdb', 'simnto', 'simcub', 'simctb', 'cho', 'ald',
        'kstats', 'nto', 'ub', 'db', 'ui'
    ])
    allconsim = set([
        'resnik', 'lin', 'nunivers', 'wang', 'jiang', 'faith', 'aic', 'hrss',
        'ps', 'shen', 'wu', 'pekar', 'slimani', 'shenoy', 'wang_edge', 'zhong',
        'almubaid', 'rss', 'ssdd', 'rada', 'resnik_edge', 'leacock', 'li_edge',
        'shenoy', 'almubaid', 'rss'
    ])
    allconic = set([
        'universal', 'wang', 'zhang', 'seco', 'zho', 'seddiqui', 'zanchez',
        'meng', 'stats', 'vic', 'assdd'
    ])
    measures = {
        'ic': (allconic, [('universal', )]),
        'cs': (allconsim, [('nunivers', 'universal')]),
        'es': (allentsim, [('bma', 'nunivers', 'universal')])
    }

    models = []
    if not argss.models:
        models = measures[argss.mtype][1]  # Considering no measure is provided
    elif isinstance(argss.models, list):
        for mod in argss.models:
            tline = re.split(":|,|-", mod.strip())
            while '' in tline:
                tline.remove('')
            if len(tline) in [1, 2, 3
                              ] and tline[0] in measures[argss.mtype][0]:
                models.append(tuple(tline))
            else:
                parser.error(
                    "\n\nModel Error -- Model provided is not appropriate or do not match Model Type\nPlease read the library manual for more information\nargument -m/--models: expected an appropriate argument.\n"
                )
    else:
        parser.error(
            "\n\nModel structure format Error\nPlease read the library manual for more information\nargument -f/--ontologyfile: expected ontology file.\n"
        )

    if not models:
        parser.error(
            "\n\nModel Error -- Model format provided is not valid\nPlease read the library manual for more information\nargument -m/--models: expected an appropriate argument.\n"
        )

    # This means that the ontology must be provided
    Pairs = []
    Annots = {}
    if argss.data is None:
        if argss.mtype == 'cs':
            parser.error(
                "\n\nConcept or concept pair list : Value Error\nFor the type of measure chosen, a list of concepts or concept pairs should be provided\nargument -d/--data: expected list of concepts or concept pairs.\n"
            )
    elif len(argss.data) == 1 and argss.data[0].strip():
        try:  # It might be a file
            arg = os.path.abspath(argss.data[0])
            fp = open(arg)
            for line in fp:
                tline = line.strip()
                if not tline: continue
                tline = [s.strip() for s in re.split("\s+|,|;", tline)]
                while '' in tline:
                    tline.remove('')
                if len(tline) == 2: Pairs.append(tuple(tline))
                elif len(tline) == 1: Pairs.append(tline[0])
            fp.close()
        except:  # It might be a pair or a single concept or entity!
            tline = mod.strip().split(",")
            while '' in tline:
                tline.remove('')
            if len(tline) == 2: Pairs.append(tuple(tline))
            elif len(tline) == 1: Pairs.append(tline[0])
    else:  # This means that we are dealing with list or pairs of terms or entities
        for mod in argss.data:
            tline = mod.strip().split(",")
            while '' in tline:
                tline.remove('')
            if len(tline) == 2: Pairs.append(tuple(tline))
            elif len(tline) == 1: Pairs.append(tline[0])

    # Checking list of pairs!
    if argss.mtype == 'ic':
        if Pairs:
            if not all(isinstance(s, str) for s in Pairs):
                print()
                parser.error(
                    "\nConcept list : Value Error-List of concepts provided is not valid\nargument -d/--data: expected list of concepts or concept pairs.\n"
                )
    elif argss.mtype == 'cs':
        if not Pairs:  # For cs list of concepts or concept pairs is required
            parser.error(
                "\n\nConcept or concept pair list : Value Error\nA list of concepts or concept pairs provided is not valid\nargument -d/--data: expected list of concepts or concept pairs.\n"
            )
        else:
            if all(isinstance(s, str)
                   for s in Pairs):  # We are dealing with list of concepts
                Pairs = [(s, t) for s in Pairs for t in Pairs]
            elif all(isinstance(s, tuple) for s in Pairs
                     ):  # We are already dealing with list of concept pairs
                pass
            else:  # Value error- mixing concepts and concept pairs
                parser.error(
                    "\n\nConcept or concept pair list : Value Error\nA list of concepts or concept pairs provided is not valid\nargument -d/--data: expected list of concepts or concept pairs.\n"
                )
    elif argss.mtype == 'es':
        if argss.annot is None or (isinstance(argss.annot, str)
                                   and not argss.annot.strip()):
            parser.error(
                "\n\nAnnotation file: Value Error\nFor the type of measure chosen, an annotation file, entity-concept mapping should be provided\nargument -a/--annotationfile: expected annotation file.\n"
            )
        else:
            try:
                arg = os.path.abspath(argss.annot)
                fp = open(arg)
                for line in fp:
                    tline = line.strip()
                    if not tline: continue
                    tline = [s.strip() for s in re.split("\s+", tline)]
                    if len(tline) == 2:
                        ttline = re.split(";|,", tline[1])
                        while '' in ttline:
                            ttline.remove('')
                        Annots[tline[0]] = set(ttline)
                fp.close()
            except:
                try:
                    Annots = eval(argss.annot)
                except:
                    parser.error(
                        "\n\nAnnotation variable: Value Error\nFor the type of measure chosen, the annotation variable entity-concept\n\nmapping should be a dictionary-like string\nargument -a/--annotationfile: expected annotation file.\n"
                    )

        if not isinstance(Annots, dict) or not Annots:
            parser.error(
                "\n\nAnnotation variable: Value or Type Error\nFor the type of measure chosen, the annotation variable entity-concept mapping should be a no empty dictionary\nargument -a/--annotationfile: expected annotation file.\n"
            )
        if not Pairs: Pairs = [(p, q) for p in Annots for q in Annots]
        else:
            if all(isinstance(s, str)
                   for s in Pairs):  # We are dealing with list of concepts
                Pairs = [(p, q) for p in Pairs for q in Pairs]
            elif all(isinstance(s, tuple) for s in Pairs
                     ):  # We are already dealing with list of concept pairs
                pass
            else:  # Value error- mixing concepts and concept pairs
                parser.error(
                    "\n\nEntity (protein) or entity pair list : Value Error\nA list of entities or entity pairs provided is not valid\nargument -d/--data: expected list of entities or entity pairs.\n"
                )

    OtherPar = {}
    if argss.parameter:
        try:
            OtherPar = eval(argss.parameter)
        except:
            parser.error(
                "\n\nOther measure parameter: Type Error\nOther parameters should be presented as string-like dictionary \nargument -p/--parameter: expected string-like dictionary.\n"
            )
    if not isinstance(OtherPar, dict):
        parser.error(
            "\n\nOther measure parameter: Type Error\nOther parameters should produce a dictionary \nargument -p/--parameter: expected string-like dictionary.\n"
        )

    # Now processing different scores
    print("\nThanks for choosing PySML. Start processing on %s" %
          str(time.asctime(time.localtime())))
    now = time.time()
    is_a = OtherPar['is_a'] if 'is_a' in OtherPar else None
    part_of = OtherPar['part-of'] if 'part_of' in OtherPar else None
    if argss.ontology is None: argss.ontology = ''
    if argss.mtype == 'ic':
        simscore = InformationContent(ontofile=argss.ontology,
                                      namespace=argss.ontospace,
                                      is_a=is_a,
                                      part_of=part_of)
        # getIC(self, approach = None, TermList = None, **kwargs)
        simscore.getIC([s[0] for s in models], Pairs, **OtherPar)
        ScoreFile = 'InformationContentFile%d.txt' % (random.randint(
            0, 100000), )
    elif argss.mtype == 'cs':  #computeSim(self, TermPairs, models = None, **kwargs)
        simscore = ConceptSimilarity(ontofile=argss.ontology,
                                     namespace=argss.ontospace,
                                     is_a=is_a,
                                     part_of=part_of)
        simscore.computeSim(Pairs, models, **OtherPar)
        ScoreFile = 'ConceptSSFile%d.txt' % (random.randint(0, 100000), )
    elif argss.mtype == 'es':  #['GO:0000022', 'GO:0051231', 'GO:1903047', 'GO:0000278', 'GO:0007052', 'GO:0000023', 'GO:0005984'], [nunivers-zho resnik:zhang wang wang_edge lin,zanchez aic wu hrss jiang]
        simscore = EntitySimilarity(ontofile=argss.ontology,
                                    namespace=argss.ontospace,
                                    is_a=is_a,
                                    part_of=part_of)
        simscore.entitySim(Annots, Pairs, models, **OtherPar)
        ScoreFile = 'EntitySSFile%d.txt' % (random.randint(0, 100000), )

    # Finally, outputting the score on screen or written into a file!
    if argss.stream:
        print(simscore)
    else:  # Print in a file
        arg = '/'.join([argss.outfile, ScoreFile])
        try:
            fw = open(arg, 'w')
            fw.write(repr(simscore) + '\n')
            print("Scores are reported in the file: %s" % (arg, ))
        except:
            parser.error(
                "\nWriting output error\nImpossible to write in %s\nargument -o/--out: Output error, scores cannot be written in the file.\n"
                % (arg, ))

    print("Processing accomplished on %s. Thanks!" %
          str(time.asctime(time.localtime())))
    tt = time.time() - now
    nh = tt // 3600
    rs = tt - nh * 3600
    nm = rs // 60
    rs -= nm * 60
    print("Total time elapsed is approximately %02d:%02d:%02d" % (nh, nm, rs))
    print(74 * '*' + '\n')
Пример #53
0
from argparse import ArgumentParser
from docker_utils import get_digest_of_image

parser = ArgumentParser(description='')
parser.add_argument('-r',
                    dest='repository',
                    type=str,
                    help="Docker container repository e.g. cmssw/cms")
parser.add_argument('-s',
                    dest='source',
                    type=str,
                    help="Existing docker image tag e.g rhel6-itb")
parser.add_argument('-d',
                    dest='destination',
                    type=str,
                    help="New docker image tag e.g rhel6")
args = parser.parse_args()
if not (args.repository and args.source and args.destination):
    parser.error("Missing arguments.")

src = get_digest_of_image(args.repository, args.source)
if not src[0]:
    print("ERROR: Unable to find source image %s:%s" %
          (args.repository, args.source))
    sys.exit(1)

des = get_digest_of_image(args.repository, args.destination)
if src[1] != des[1]:
    print("SOURCE_IMAGE=%s:%s" % (args.repository, args.source))
    print("DESTINATION_IMAGE=%s:%s" % (args.repository, args.destination))
Пример #54
0
def _run_command():
    """
    Command line entry point
    """
    from os.path import dirname, realpath
    import sys

    sys.path.insert(0, dirname(dirname(realpath(__file__))))

    from argparse import ArgumentParser
    from argcomplete import autocomplete
    from airsd._config import (
        get_default_host,
        APP_NAME,
        get_config,
        DEFAULTSECT,
        COMPRESSIONS,
    )

    parser = ArgumentParser(
        prog=APP_NAME, description="Simple file sharing in the cloud."
    )
    sub_parsers = parser.add_subparsers(
        dest="parser_action", title="Commands", help="Commands", description=""
    )
    parser.add_argument(
        "--debug", action="store_true", help="If True, show the full error traceback."
    )

    config = get_config()
    default_host = get_default_host()
    description = "Put the file to share on the storage and return a shareable URL."
    action = sub_parsers.add_parser("put", help=description, description=description)
    action.add_argument("sources", nargs="*", help="The files or directory to share.")
    action.add_argument(
        "--archive",
        "-a",
        action="store_true",
        help="Create an archive before putting on the share. Automatic "
        "if SOURCES is a directory or multiples files.",
    )
    action.add_argument(
        "--expiry",
        "-e",
        default="24",
        help="The download expiry time. In hours, or with a "
        'specific time unit by appending: "s" for seconds, '
        '"m" for minutes, "h" for hours or "d" for days '
        '(For instance: "7d" for 7 days, "30m" for 30 minutes). '
        "Must be greater than zero.",
    )
    action.add_argument(
        "--host",
        "-H",
        help="The remote storage host.",
        default=default_host,
        required=default_host is None,
    )
    action.add_argument(
        "--compression",
        "-c",
        help="The compression method to use when archiving.",
        default=config.get(DEFAULTSECT, "compression"),
        choices=COMPRESSIONS,
    )
    action.add_argument("--name", "-n", help="Rename the file being put.")
    # action.add_argument(
    #     "--quiet", "-q", help="Only return download URL as output.", action="store_true"
    # )

    description = "Get shared file from an URL."
    action = sub_parsers.add_parser("get", help=description, description=description)
    action.add_argument("url", help="The URL.")
    action.add_argument("--output", "-o", help="Output file or directory.", default=".")
    action.add_argument(
        "--extract", "-e", help="Extract an archived file.", action="store_true"
    )
    # action.add_argument(
    #     "--quiet", "-q", help="Does no show output.", action="store_true"
    # )

    description = "Delete a shared file."
    action = sub_parsers.add_parser("delete", help=description, description=description)
    action.add_argument("urls", nargs="*", help="Shareable URL of files to delete.")

    description = "Set a configuration option."
    action = sub_parsers.add_parser(
        "configure", help=description, description=description
    )
    action.add_argument("option", help="Name of option to set.")
    action.add_argument("value", help="Value of option to set.")
    action.add_argument("--host", help="Set option for the specified host.")

    autocomplete(parser)

    args = vars(parser.parse_args())
    debug = args.pop("debug")
    parser_action = args.pop("parser_action")
    if not parser_action:
        parser.error("An action is required")

    try:
        import airsd

        url = getattr(airsd, parser_action)(**args)
        if url:
            print(url)

    except KeyboardInterrupt:  # pragma: no cover
        parser.exit(status=1, message="Interrupted by user\n")

    except Exception as exception:
        if debug:
            raise
        parser.exit(status=1, message=f"\033[31m{exception}\033[30m\n")
Пример #55
0
def main():
    parser = ArgumentParser()
    parser.add_argument(
        "-t",
        dest="target_hosts",
        required=True,
        help="Set a target range of addresses to target. Ex 10.11.1.1-255")
    parser.add_argument(
        "-o",
        dest="output_directory",
        required=True,
        help="Set the output directory. Ex /root/Documents/labs/")
    parser.add_argument(
        "-w",
        dest="wordlist",
        required=False,
        help=
        "Set the wordlist to use for generated commands. Ex /usr/share/wordlist.txt"
    )
    parser.add_argument(
        "--pingsweep",
        dest="ping_sweep",
        action="store_true",
        help=
        "Write a new target.txt by performing a ping sweep and discovering live hosts.",
        default=False)
    parser.add_argument("--dns",
                        dest="find_dns_servers",
                        action="store_true",
                        help="Find DNS servers from a list of targets.",
                        default=False)
    parser.add_argument("--services",
                        dest="perform_service_scan",
                        action="store_true",
                        help="Perform service scan over targets.",
                        default=False)
    parser.add_argument(
        "--hostnames",
        dest="hostname_scan",
        action="store_true",
        help=
        "Attempt to discover target hostnames and write to 0-name.txt and hostnames.txt.",
        default=False)
    parser.add_argument("--snmp",
                        dest="perform_snmp_walk",
                        action="store_true",
                        help="Perform service scan over targets.",
                        default=False)
    parser.add_argument(
        "--quick",
        dest="quick",
        action="store_true",
        required=False,
        help=
        "Move to the next target after performing a quick scan and writing first-round recommendations.",
        default=False)
    parser.add_argument(
        "--quiet",
        dest="quiet",
        action="store_true",
        help=
        "Supress banner and headers to limit to comma dilimeted results only.",
        default=False)
    parser.add_argument(
        "--exec",
        dest="follow",
        action="store_true",
        help=
        "Execute shell comamnds from recommendations as they are discovered. Likely to lead to very long execute times depending on the wordlist being used.",
        default=False)
    parser.add_argument(
        "--simple_exec",
        dest="quickfollow",
        action="store_true",
        help=
        "Execute non-brute forcing shell comamnds only as they are discovered.",
        default=False)
    arguments = parser.parse_args()

    if len(sys.argv) == 1:
        print_banner()
        parser.error("No arguments given.")
        parser.print_usage
        sys.exit()

    if arguments.output_directory.endswith('/' or '\\'):
        arguments.output_directory = arguments.output_directory[:-1]
    if arguments.target_hosts.endswith('/' or '\\'):
        arguments.target_hosts = arguments.target_hosts[:-1]

    if arguments.quiet is not True:
        print_banner()
    if arguments.ping_sweep is True:
        print("[#] Performing ping sweep")
        ping_sweeper(arguments.target_hosts, arguments.output_directory,
                     arguments.quiet)
    if arguments.hostname_scan is True:
        print("[#] Identifying hostnames")
        hostname_scan(arguments.target_hosts, arguments.output_directory,
                      arguments.quiet)
    if arguments.find_dns_servers is True:
        print("[#] Identifying DNS Servers")
        find_dns(arguments.target_hosts, arguments.output_directory,
                 arguments.quiet)
    if arguments.perform_service_scan is True:
        print("[#] Performing service scans")
        if arguments.find_dns_servers is True:
            service_scan(arguments.target_hosts, arguments.output_directory,
                         arguments.find_dns_servers, arguments.quiet,
                         arguments.quick)
        else:
            service_scan(arguments.target_hosts, arguments.output_directory,
                         '', arguments.quiet, arguments.quick)
    if arguments.perform_snmp_walk is True:
        print("[#] Performing SNMP walks")
        snmp_walk(arguments.target_hosts, arguments.output_directory,
                  arguments.quiet)
Пример #56
0
def main():
    """Main routine"""
    debug = False
    try:
        default_device = get_default_device()
        argparser = ArgumentParser(description=modules[__name__].__doc__)
        argparser.add_argument('-f', '--fullmode', dest='fullmode',
                                   action='store_true',
                                   help='use full terminal mode, exit with '
                                        '[Ctrl]+B')
        argparser.add_argument('device', nargs='?', default=default_device,
                               help='serial port device name (default: %s)' %
                               default_device)
        argparser.add_argument('-b', '--baudrate',
                               help='serial port baudrate (default: %d)' %
                               MiniTerm.DEFAULT_BAUDRATE,
                               default='%s' % MiniTerm.DEFAULT_BAUDRATE)
        argparser.add_argument('-w', '--hwflow',
                               action='store_true',
                               help='hardware flow control')
        argparser.add_argument('-e', '--localecho',
                               action='store_true',
                               help='local echo mode (print all typed chars)')
        argparser.add_argument('-r', '--crlf',
                               action='count', default=0,
                               help='prefix LF with CR char, use twice to '
                                    'replace all LF with CR chars')
        argparser.add_argument('-l', '--loopback',
                               action='store_true',
                               help='loopback mode (send back all received '
                                    'chars)')
        argparser.add_argument('-s', '--silent', action='store_true',
                               help='silent mode')
        argparser.add_argument('-P', '--vidpid', action='append',
                               help='specify a custom VID:PID device ID, '
                                    'may be repeated')
        argparser.add_argument('-V', '--virtual', type=FileType('r'),
                               help='use a virtual device, specified as YaML')
        argparser.add_argument('-v', '--verbose', action='count',
                               help='increase verbosity')
        argparser.add_argument('-d', '--debug', action='store_true',
                               help='enable debug mode')
        args = argparser.parse_args()
        debug = args.debug

        if not args.device:
            argparser.error('Serial device not specified')

        loglevel = max(DEBUG, ERROR - (10 * (args.verbose or 0)))
        loglevel = min(ERROR, loglevel)
        if debug:
            formatter = Formatter('%(asctime)s.%(msecs)03d %(name)-20s '
                                  '%(message)s', '%H:%M:%S')
        else:
            formatter = Formatter('%(message)s')
        FtdiLogger.set_formatter(formatter)
        FtdiLogger.set_level(loglevel)
        FtdiLogger.log.addHandler(StreamHandler(stderr))

        if args.virtual:
            from pyftdi.usbtools import UsbTools
            # Force PyUSB to use PyFtdi test framework for USB backends
            UsbTools.BACKENDS = ('pyftdi.tests.backend.usbvirt', )
            # Ensure the virtual backend can be found and is loaded
            backend = UsbTools.find_backend()
            loader = backend.create_loader()()
            loader.load(args.virtual)

        try:
            add_custom_devices(Ftdi, args.vidpid)
        except ValueError as exc:
            argparser.error(str(exc))

        miniterm = MiniTerm(device=args.device,
                            baudrate=to_bps(args.baudrate),
                            parity='N',
                            rtscts=args.hwflow,
                            debug=args.debug)
        miniterm.run(args.fullmode, args.loopback, args.silent, args.localecho,
                     args.crlf)

    except (IOError, ValueError) as exc:
        print('\nError: %s' % exc, file=stderr)
        if debug:
            print(format_exc(chain=False), file=stderr)
        sysexit(1)
    except KeyboardInterrupt:
        sysexit(2)
Пример #57
0
def parse_args(argv):
    prog = basename(argv[0])

    if prog == "__main__.py":
        prog = "python -m xconv"

    parser = ArgumentParser(
        prog=prog,
        usage="""%(prog)s [-h | -l | -i PROFILE]
       %(prog)s [option]... -p PROFILE [-DNAME[=VALUE]]... [-B] [-T] input output
       %(prog)s [option]... -p PROFILE [-DNAME[=VALUE]]...  -M  [-T] inputs... output
       %(prog)s [option]... -p PROFILE [-DNAME[=VALUE]]...  -C  [-T] inputs... output
       %(prog)s [option]... -p PROFILE [-DNAME[=VALUE]]... [-B] inputs... directory
       %(prog)s [option]... -p PROFILE [-DNAME[=VALUE]]... [-B] -t directory inputs...""",
        description="""FFmpeg wrapper based on AdvancedAV""")

    parser.add_argument("-V",
                        "--version",
                        help="Show version and quit",
                        action="version",
                        version="""XConv %s""" % version)

    # Available Options
    parser.add_argument("-v",
                        "--verbose",
                        help="Enable verbose output",
                        action="store_true")
    parser.add_argument("-q",
                        "--quiet",
                        help="Be less verbose",
                        action="store_true")
    parser.add_argument(
        "-j",
        "--concurrent",
        help="Run ffmpeg concurrently using at most N instances [%(default)s]",
        metavar="N",
        default=cpu_count())
    profile = parser.add_argument_group("Profile")
    profile.add_argument("-l",
                         "--list-profiles",
                         help="List profiles and quit",
                         action=ProfilesAction)
    profile.add_argument("-i",
                         "--profile-info",
                         help="Give info about a profile and quit",
                         metavar="PROFILE",
                         action=ProfileInfoAction)
    profile.add_argument("-p",
                         "--profile",
                         help="Specify the profile",
                         metavar="PROFILE",
                         required=True)
    profile.add_argument("-D",
                         "--define",
                         help="Define an option to be used by the profile",
                         metavar="NAME[=VALUE]",
                         action=DefineAction)
    mode = parser.add_argument_group("Mode").add_mutually_exclusive_group()
    mode.add_argument(
        "-B",
        "--batch",
        help="Batch process every input file into an output file (default)",
        action="store_true")
    mode.add_argument("-M",
                      "--merge",
                      help="Merge streams from all inputs",
                      action="store_true")
    mode.add_argument("-C",
                      "--concat",
                      help="Concatenate streams from inputs",
                      action="store_true")
    files = parser.add_argument_group("Files")
    files.add_argument("inputs", help="The input file(s)", nargs="+")
    files.add_argument(
        "output",
        help="The output filename or directory (unless -t is given)",
        nargs="?")  # always empty
    files.add_argument("-u",
                       "--update",
                       help="Only work on files that don't already exist",
                       action="store_true")
    files.add_argument("-c",
                       "--create-directory",
                       help="Create directories if they don't exist",
                       action="store_true")
    target = files.add_mutually_exclusive_group()
    target.add_argument("-t",
                        "--target-directory",
                        help="Output into a directory",
                        metavar="DIRECTORY",
                        type=Path)
    target.add_argument("-T",
                        "--no-target-directory",
                        help="Treat output as a normal file",
                        action="store_true")
    files.add_argument(
        "-S",
        "--subdirectory",
        help=
        "Work in a subdirectory of here and -t (use glob patterns for inputs)")
    files.add_argument("-K",
                       "--copy-files",
                       help="Copy all following files unmodified",
                       metavar="FILE",
                       action=ExtendAction)
    progs = parser.add_argument_group("Programs")
    progs.add_argument("--ffmpeg",
                       help="Path to the ffmpeg executable",
                       default="ffmpeg")
    progs.add_argument("--ffprobe",
                       help="Path to the ffprobe executable",
                       default="ffprobe")

    # Parse arguments
    args = parser.parse_args(argv[1:])

    # Figure out output path
    # ----------------------
    # Fill in args.output
    # args.output will never be filled in by argparse, since inputs consumes everything
    if args.target_directory:
        args.output = args.target_directory
    elif len(args.inputs) < 2:
        parser.error("Neither --target-directory nor output is given")
    else:
        args.output = Path(args.inputs.pop(-1))

    if args.subdirectory:
        subdir = Path(args.subdirectory)  #.resolve()
        outdir = Path(args.output, args.subdirectory)  #.resolve()

        if outdir.exists() and not outdir.is_dir():
            parser.error(
                "--subdirectory only works with output directories. '%s' exists and isn't a directory"
            )

        inputs = args.inputs
        args.inputs = []
        for pattern in inputs:
            args.inputs.extend(subdir.glob(pattern))

        files = args.copy_files
        args.copy_files = []
        for pattern in files:
            args.copy_files.extend(subdir.glob(pattern))

        args.output_directory = args.output = outdir
        args.output_filename = None

    else:
        # Check if we're outputting to a directory
        multiple_outputs = args.copy_files or not (
            args.merge or args.concat) and len(args.inputs) > 1

        if args.target_directory or args.output.is_dir() or multiple_outputs:
            if args.no_target_directory:
                if multiple_outputs:
                    parser.error(
                        "Passed --no-target-directory, but operation would have multiple outputs. (See --merge or --concat)"
                    )
                else:
                    parser.error(
                        "Passed --no-target-directory, but '%s' is an existing directory."
                        % args.output)
            args.output_filename = None
            args.output_directory = args.output
        else:
            args.output_filename = args.output.name
            args.output_directory = args.output.parent

    return args
Пример #58
0
    '--convert',
    help='Convert files between Cantera and Chemkin formats (.cti <=> .inp)',
    action='store_true',
    default=False,
)
parser.add_argument(
    '--thermo',
    help='thermodynamic data filename (only necessary for CHEMKIN files).',
    type=str)
parser.add_argument(
    '--transport',
    help='transport data filename (only necessary for CHEMKIN files).',
    type=str)

args = parser.parse_args()

if args.run_sa and args.epsilon_star is None:
    parser.error('--run_sa requires --epsilon_star.')

if args.convert:
    # Convert model and exit
    convert(args.model, args.thermo, args.transport)
else:
    # Check for Chemkin format and convert if needed
    if splitext(args.model)[1] in ['.inp', '.dat', '.txt']:
        warn('Chemkin file detected; converting before reduction.')
        args.model = convert(args.model, args.thermo_file, args.transport_file)

    pymars(args.model, args.conditions, args.error, args.method, args.targets,
           args.retained_species, args.run_sa, args.epsilon_star)
Пример #59
0
def get_dl_average_arguments(argv=None):
    """
    Get Options from :class:`~optparse.OptionParser` objects.

    This function is used for data processing on-the-fly (requires web connection)

    """

    parser = ArgumentParser(
        usage="%(prog)s [arguments] <Station Database>",
        description="Program to average the orientations of the seismometer " +
        "in a station database.")
    parser.add_argument("indb",
                        help="Station Database to process from.",
                        type=str)
    parser.add_argument(
        "-v",
        "--verbose",
        default=2,
        type=int,
        dest="verb",
        help="Enable Level of verbose output during processing. " +
        "(0) No Output; (1) Output Event Analysis counter; " +
        "(2) Counter and results. Default 2")
    parser.add_argument(
        "--load-location",
        default="DL_RESULTS",
        type=str,
        dest="loadloc",
        help="Specify Load destination. [Default is DL_RESULTS " +
        "(and sub-directories based on Station Name)]")
    parser.add_argument("--plot",
                        default=False,
                        action="store_true",
                        dest="showplot",
                        help="Plot results at end [Default False]")
    parser.add_argument(
        "--save",
        action="store_true",
        dest="saveplot",
        default=False,
        help="Set this option if you wish to save the figure. [Default " +
        "does not save figure]")
    parser.add_argument(
        "--format",
        default="png",
        dest="fmt",
        type=str,
        help="Specify format of figure. Can be any one of the valid" +
        "matplotlib formats: 'png', 'jpg', 'eps', 'pdf'. [Default 'png']")
    parser.add_argument(
        "--cc",
        default=0.8,
        type=float,
        dest="cc",
        help="Cross-correlation threshold for final estimate. [Default 0.8]")

    # Station Selection Parameters
    stparm = parser.add_argument_group(
        title="Station Selection Parameters",
        description="Parameters to select a specific station.")
    stparm.add_argument(
        "--keys",
        dest="stkeys",
        type=str,
        default="",
        help="Specify list of Station Keys in the database to process.")

    # Parse Arguments
    args = parser.parse_args(argv)

    # Check inputs
    #if len(args) != 1: parser.error("Need station database file")
    # indb=args[0]
    if not exist(args.indb):
        parser.error("Input file " + args.indb + " does not exist")

    # create station key list
    if len(args.stkeys) > 0:
        args.stkeys = args.stkeys.split(',')

    return args
Пример #60
0
def main():
    parser = ArgumentParser(
        description="Generate cohorts and run models in openSAFELY framework. "
    )
    # Cohort parser options
    parser.add_argument("--version",
                        help="Display version",
                        action="store_true")
    parser.add_argument("--verbose",
                        help="Show extra logging info",
                        action="store_true")
    subparsers = parser.add_subparsers(help="sub-command help")
    generate_cohort_parser = subparsers.add_parser("generate_cohort",
                                                   help="Generate cohort")
    generate_cohort_parser.set_defaults(which="generate_cohort")
    generate_measures_parser = subparsers.add_parser(
        "generate_measures", help="Generate measures from cohort data")
    generate_measures_parser.set_defaults(which="generate_measures")
    generate_codelist_report_parser = subparsers.add_parser(
        "generate_codelist_report",
        help="Generate OpenSAFELY Interactive codelist report",
    )
    generate_codelist_report_parser.set_defaults(
        which="generate_codelist_report")
    cohort_report_parser = subparsers.add_parser("cohort_report",
                                                 help="Generate cohort report")
    cohort_report_parser.set_defaults(which="cohort_report")
    cohort_report_parser.add_argument(
        "--input-dir",
        help="Location to look for input CSVs",
        type=str,
        default="analysis",
    )
    cohort_report_parser.add_argument(
        "--output-dir",
        help="Location to store output CSVs",
        type=str,
        default="output",
    )

    dump_cohort_sql_parser = subparsers.add_parser(
        "dump_cohort_sql", help="Show SQL to generate cohort")
    dump_cohort_sql_parser.add_argument("--study-definition",
                                        help="Study definition name",
                                        type=str,
                                        required=True)
    dump_cohort_sql_parser.set_defaults(which="dump_cohort_sql")
    dump_study_yaml_parser = subparsers.add_parser(
        "dump_study_yaml", help="Show study definition as YAML")
    dump_study_yaml_parser.set_defaults(which="dump_study_yaml")
    dump_study_yaml_parser.add_argument("--study-definition",
                                        help="Study definition name",
                                        type=str,
                                        required=True)
    # Cohort parser options
    generate_cohort_parser.add_argument(
        "--output-dir",
        help="Location to store output files",
        type=str,
        default="output",
    )
    generate_cohort_parser.add_argument(
        "--output-format",
        help=(f"Output file format: {SUPPORTED_FILE_FORMATS[0]} (default),"
              f" {', '.join(SUPPORTED_FILE_FORMATS[1:])}"),
        type=str,
        choices=SUPPORTED_FILE_FORMATS,
        default=SUPPORTED_FILE_FORMATS[0],
    )
    generate_cohort_parser.add_argument(
        "--study-definition",
        help="Study definition to use",
        type=str,
        choices=["all"] +
        [x[0] for x in list_study_definitions(ignore_errors=True)],
        default="all",
    )
    generate_cohort_parser.add_argument(
        "--temp-database-name",
        help="Name of database to store temporary results",
        type=str,
        default=os.environ.get("TEMP_DATABASE_NAME", ""),
    )
    generate_cohort_parser.add_argument(
        "--index-date-range",
        help="Evaluate the study definition at a range of index dates",
        type=str,
        default="",
    )
    generate_cohort_parser.add_argument(
        "--skip-existing",
        help="Do not regenerate data if output file already exists",
        action="store_true",
    )
    generate_cohort_parser.add_argument(
        "--with-end-date-fix",
        action="store_true",
    )
    cohort_method_group = generate_cohort_parser.add_mutually_exclusive_group()
    cohort_method_group.add_argument(
        "--expectations-population",
        help="Generate a dataframe from study expectations",
        type=int,
        default=0,
    )
    cohort_method_group.add_argument(
        "--dummy-data-file",
        help="Use dummy data from file",
        type=pathlib.Path,
    )
    cohort_method_group.add_argument(
        "--database-url",
        help=
        "Database URL to query (can be supplied as DATABASE_URL environment variable)",
        type=str,
    )

    # Measure generator parser options
    generate_measures_parser.add_argument(
        "--output-dir",
        help="Location to store output files",
        type=str,
        default="output",
    )
    generate_measures_parser.add_argument(
        "--study-definition",
        help="Study definition file containing measure definitions to use",
        type=str,
        choices=["all"] +
        [x[0] for x in list_study_definitions(ignore_errors=True)],
        default="all",
    )
    generate_measures_parser.add_argument(
        "--skip-existing",
        help="Do not regenerate measure if output file already exists",
        action="store_true",
    )

    # Codelist report parser options
    generate_codelist_report_parser.add_argument(
        "--output-dir",
        help="Location to store output files",
        type=str,
        default="output",
    )
    generate_codelist_report_parser.add_argument(
        "--codelist-path",
        help="Location of codelist",
        type=str,
    )
    generate_codelist_report_parser.add_argument(
        "--start-date",
        help="Start date",
        type=datetime.date.fromisoformat,
    )
    generate_codelist_report_parser.add_argument(
        "--end-date",
        help="End date",
        type=datetime.date.fromisoformat,
    )

    maintenance_parser = subparsers.add_parser(
        "maintenance",
        help="Report if backend db is currently performing maintenance",
    )
    maintenance_parser.set_defaults(which="maintenance")
    maintenance_parser.add_argument(
        "--database-url",
        help=
        "Database URL to query (can be supplied as DATABASE_URL environment variable)",
    )
    maintenance_parser.add_argument(
        "--current-mode",
        help="The current mode we think the database is in",
        default="unknown",
    )

    options = parser.parse_args()

    if options.version:
        print(f"v{cohortextractor.__version__}")
    elif not hasattr(options, "which"):
        parser.print_help()
    elif options.which == "generate_cohort":
        # This defaults to False for now (when --with-end-date-fix is not provided) so that
        # study outputs don't change unexpectedly.  After some more investigation, we'll
        # change the default to True (via --without-end-date-fix).
        flags.WITH_END_DATE_FIX = bool(options.with_end_date_fix)

        if options.database_url:
            os.environ["DATABASE_URL"] = options.database_url
        if options.temp_database_name:
            os.environ["TEMP_DATABASE_NAME"] = options.temp_database_name
        if not (options.expectations_population or options.dummy_data_file
                or os.environ.get("DATABASE_URL")):
            parser.error(
                "generate_cohort: error: one of the arguments "
                "--expectations-population --dummy-data-file --database-url is required"
            )
        try:
            generate_cohort(
                options.output_dir,
                options.expectations_population,
                options.dummy_data_file,
                selected_study_name=options.study_definition,
                index_date_range=options.index_date_range,
                skip_existing=options.skip_existing,
                output_format=options.output_format,
            )
        except DummyDataValidationError as e:
            print(f"Dummy data error: {e}")
            sys.exit(1)
        except Exception as e:
            # Checking for "DatabaseError" in the MRO means we can identify database errors without
            # referencing a specific driver.  Both pymssql and presto/trino-python-client raise
            # exceptions derived from a DatabaseError parent class

            # Ignore errors which don't look like database errors
            if "DatabaseError" not in str(e.__class__.mro()):
                raise

            traceback.print_exc()
            # Exit with specific exit codes to help identify known issues
            if "Unexpected EOF from the server" in str(e):
                logger.error(f"Intermittent database error: {e}")
                sys.exit(3)
            if "Invalid object name 'CodedEvent_SNOMED'" in str(e):
                logger.error(
                    "CodedEvent_SNOMED table is currently not available.\n"
                    "This is likely due to regular database maintenance.")
                sys.exit(4)
            logger.error(f"Database error: {e}")
            sys.exit(5)

    elif options.which == "generate_measures":
        generate_measures(
            options.output_dir,
            selected_study_name=options.study_definition,
            skip_existing=options.skip_existing,
        )
    elif options.which == "generate_codelist_report":
        generate_codelist_report(
            options.output_dir,
            options.codelist_path,
            options.start_date,
            options.end_date,
        )
    elif options.which == "cohort_report":
        make_cohort_report(options.input_dir, options.output_dir)
    elif options.which == "dump_cohort_sql":
        dump_cohort_sql(options.study_definition)
    elif options.which == "dump_study_yaml":
        dump_study_yaml(options.study_definition)
    elif options.which == "maintenance":
        if options.database_url:
            os.environ["DATABASE_URL"] = options.database_url
        check_maintenance(options.current_mode)