Example #1
0
def main():
  parser = ArgumentParser(prog='logfire', usage='%(prog)s [options]')

  parser.add_argument('--listen',
    help="Listen for log messages published on Redis channel 'logfire'",
    nargs='?', default=False, const=True)
  parser.add_argument('--serve',
    help="Run HTTP server to analyze logs from browser",
    nargs='?', default=False, const=True)
  parser.add_argument('--tail',
    help="Tail logs",
    nargs='?', default=False, const=True)
  parser.add_argument('--mongohost',
    help="hostname:port for MongoDB")

  parser.add_argument('-p','--port', help="Port for --serve",
    type=int, default=7095)
  parser.add_argument('--comp', help="Component filter for --tail")

  parser.add_argument('--cookiesecret',
    help="Cookie secret if authentication is enabled")
  parser.add_argument('--authgmaillist',
    help="Comma separated list of gmail accounts authorized to access")

  args = parser.parse_args()

  if args.listen:
    listen(args)
  elif args.serve:
    serve(args)
  elif args.tail:
    tail(args)
  else:
    parser.print_help()
Example #2
0
def main():
    parser = ArgumentParser()
    parser_with_options = set_options(parser)
    args = parser_with_options.parse_args()
    arguments = vars(args)
    if 'help' in arguments:
        parser.print_help()
    else:
        config = Config()
        config.format = arguments['output']
        config.total_lists = arguments['lists']
        config.total_words = arguments['number']

        if arguments['file'] is None:
            current_path = os.path.abspath(
                inspect.getfile(inspect.currentframe())
                )
            dir = os.path.dirname(current_path)
            config.file_path = os.path.join(dir, config.internal_words_file)
        else:
            config.file_path = arguments['file']

    words = generate(config)

    if words:
        print(words)

    exit(0)
def main():
  parser = ArgumentParser('--nmap_xml, '
                          '--openvas_xml')

  parser.add_argument('--nmap_xml',
                      dest='nmap_xml',
                      type=str,
                      help='NMAP XML file to parse.')

  parser.add_argument('--openvas_xml',
                      dest='openvas_xml',
                      type=str,
                      help='OpenVAS XML file to parse.')

  args = parser.parse_args()
  nmap_xml = args.nmap_xml
  openvas_xml = args.openvas_xml

  if nmap_xml is not None:

    parse_nmap_xml(nmap_xml)

  if openvas_xml is not None:

    parse_openvas_xml(openvas_xml)

  if openvas_xml is None and nmap_xml is None:
    print('\nI need arguments.\n')
    parser.print_help()
    exit()
Example #4
0
def main():
    parser = ArgumentParser(description=__doc__,
                            formatter_class=RawDescriptionHelpFormatter)
    parser.add_argument('-v', '--version', action='version', version=__version__)
    parser.add_argument('-g', dest='gist_id',
                        help='retreive a paste identified by the gist id')
    parser.add_argument('-d', dest='description',
                        help='description of the gist')
    parser.add_argument('-p', dest='private', action='store_true',
                        help='set for private gist')
    parser.add_argument('-a', dest='anon', action='store_true',
                        help='set for anonymous gist')
    parser.add_argument('file', nargs='*', help='file to paste to gist')
    args = parser.parse_args()

    if args.gist_id:
        get_gist(args.gist_id)
        sys.exit()

    if sys.stdin.isatty() and not args.file:
        parser.print_help()
        sys.exit(1)

    if len(args.file) < 1:
        data = gen_request([sys.stdin], args.private, args.anon, args.description)
    else:
        data = gen_request(args.file, args.private, args.anon, args.description)

    with urlopen(site, data) as info:
        url = info.geturl()

    if copy_paste(url.encode('utf8')):
        print('{0} | copied to clipboard successfully.'.format(url))
    else:
        print('{0}'.format(url))
Example #5
0
def process():
    parser = ArgumentParser(description = \
            "Simulate the motion of a flock of birds")
    
    # Parameters
    parser.add_argument('--file', '-f', dest = 'configFile')

    # Print help message even if no flag is provided
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()

    # Catch exception if file does not exist
    try:
        # Create object
        boids = Flock(args.configFile)
        # Plot figures
        animator = FlockAnimator((-500,1500), (-500,1500), "The Boids!", boids)
        animator.animate_flock()
    except IOError:
        print "The file you provided does not exist.\n" 
        parser.print_help()
    except:
        print "Unexpected error.", sys.exc_info()[0], "\n"
        raise
Example #6
0
def main():

    parser = ArgumentParser(description="Speed up your SHA. A different hash style.")
    parser.add_argument("-1", "--sha1", action="store_true")
    parser.add_argument("-2", "--sha224", action="store_true")
    parser.add_argument("-3", "--sha256", action="store_true")
    parser.add_argument("-4", "--sha384", action="store_true")
    parser.add_argument("-5", "--sha512", action="store_true")
    parser.add_argument("-f", "--file", type=str, help="The path to the file")

    if len(sys.argv) == 1:
        parser.print_help()
        return

    global args
    args = parser.parse_args()

    hashtree = ""

    big_file = open(args.file, "rb")
    pool = Pool(multiprocessing.cpu_count())

    for chunk_hash in pool.imap(hashing, chunks(big_file)):
        hashtree = hashtree + chunk_hash

    pool.terminate()

    if os.path.getsize(args.file) < 20971520:
        print(hashtree)
    else:
        print(str(hashing(hashtree)))
Example #7
0
def main():

    parser = ArgumentParser(description="Speed up your SHA. A different hash style.")
    parser.add_argument('-1', '--sha1', action='store_true')
    parser.add_argument('-2', '--sha224', action='store_true')
    parser.add_argument('-3', '--sha256', action='store_true')
    parser.add_argument('-4', '--sha384', action='store_true')
    parser.add_argument('-5', '--sha512', action='store_true')
    parser.add_argument('-f', '--file', type=str, help="The path to the file")

    if len(sys.argv) == 1:
        parser.print_help()
        return

    global args
    args = parser.parse_args()

    hashtree = ''

    big_file = open(args.file, 'rb')
    pool = Pool(multiprocessing.cpu_count())

    for chunk_hash in pool.imap(hashing, chunks(big_file)):
        hashtree += chunk_hash + ":hash"

    pool.terminate()

    print(str(hashing(hashtree.encode('ascii'))))
Example #8
0
def main():
    parser = ArgumentParser('Convert APIC encoded JSON to XML to JSON')
    parser.add_argument('-s', '--stdin', help='Parse input from stdin, for use as a filter, e.g., cat doc.xml | %s' %
        str(__file__), action='store_true', default=False, required=False)
    parser.add_argument('-f', '--file', help='File containing XML or JSON', required=False)
    args = parser.parse_args()

    if not args.file and not args.stdin:
        print('ERROR: You must specify at least -s or -f')
        print('')
        parser.print_help()
        sys.exit(1)

    if args.stdin:
        inputStr = sys.stdin.read()
        inputFileH = StringIO.StringIO(inputStr)

    if args.file:
        with file(args.file, 'r') as inputFileH:
            inputStr = inputFileH.read()
            inputFileH = StringIO.StringIO(inputStr)

    format = isXMLorJSON(inputStr)
    
    if format == 'xml':
        tree = ET.ElementTree(ET.fromstring(inputStr))
        print converter().recurseXMLTree(tree.getroot())
    elif format == 'json':
        jsondict = json.loads(inputStr)
        print converter().recurseJSONDict(jsondict)
    else:
        raise IOError('Unsupported format passed as input. Please check that input is formatted correctly in JSON or XML syntax')
        sys.exit(1)
    sys.exit(0)
Example #9
0
def process():
    parser = ArgumentParser(description = "Flock of flying boids simulator")

    parser.add_argument('--file', '-f', dest = 'configFile')

    # Print help message even if no flag is provided
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()
	

	# Catch exception if file does not exist
    try:
		# Create object
        boid = BoidFlock(args.configFile)
        # Plot figures
        figure = plt.figure()
        axes = plt.axes(xlim = (-500,1500), ylim = (-500,1500))
        scatter = axes.scatter(boid.position[0,:], boid.position[1,:])
        # Function handle for animate
        funcEval = lambda x: animate(boid, scatter)
        # Animate
        anim = animation.FuncAnimation(figure, funcEval, frames=50, interval=50)
        plt.show()
    except IOError:
        print "The file does not exist.\n" 
        parser.print_help()
    except:
        print "Unexpected error.\n"
Example #10
0
def main():
    """                                   /// WARNING ///
    The ThreatButt API tool embarks on a high speed, turbulent voyage with sudden turns and sharp drops.
    This tool employs safety restraints which may restrict certain guests from using due to their mental shape and size.
    You must posses the ability to remain in an upright position at all times while laying down.
    Persons with the following conditions should not use this tool:
    - Heart Condition or Abnormal Blood Pressure
    - Back, Neck or Similar Physical Condition
    - Expectant Parents
    - Motion Sickness or Dizziness
    - Media Sensitivity to Strobe Effects
    - Claustrophobia
    - Recent Surgery or Other Conditions That May Be Aggravated By This Tool
    """

    parser = ArgumentParser()
    parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
    parser.add_argument('-i', '--ioc', dest='ioc', default=None,
                        help='[OPTIONAL] An IoC to attribute.')
    parser.add_argument('-m', '--md5', dest='md5', default=None,
                        help='[OPTIONAL] An MD5 hash.')
    parser.add_argument('--maltego', dest='maltego', default=False, action='store_true',
                        help='[OPTIONAL] Run in Maltego compatibility mode.')
    args, _ = parser.parse_known_args()

    tb = ThreatButt(args.maltego)

    if args.ioc:
        tb.clown_strike_ioc(args.ioc)

    elif args.md5:
        tb.bespoke_md5(args.md5)

    else:
        parser.print_help()
def _getArguments():
    """ reads in arguments passed in, returns options """
    parser = ArgumentParser(description='tool to store/retrieve attributes in zookeeper')
    parser.add_argument('--debug', action='store_true', default=False, dest='debugMode', help='debug mode')
    parser.add_argument('--zks', nargs='*', dest='zks', default=None, help='space separated list of zookeepers in the format of hostname:port')
    parser.add_argument('--putnode', dest='putnode', default=None, help='full path of node (attribute) to create')
    parser.add_argument('--putdata', dest='putdata', default=None, help='data to put into node, enclosed in quotes')
    parser.add_argument('--getnode', dest='getnode', default=None, help='full path of node (attribute) to retrieve')
    parser.add_argument('--delnode', dest='delnode', default=None, help='full path of node (attribute) to delete')
    parser.add_argument('--subdomain', dest='subdomain', default=None, help='subdomain of the environment')
    parser.add_argument('--clusterinfo', action='store_true', default=False, dest='clusterinfo', help='display clusters info')

    if len(sys.argv)==1:
        parser.print_help()
        _die()

    options = parser.parse_args()

    # we need to connect somewhere, right?
    if not options.zks :
        _die('ERROR: zookeeper(s) needs to be specified')

    actions=[options.putnode, options.getnode, options.delnode, options.clusterinfo]
    numberOfActions = len([ action for action in actions if action ])

    if numberOfActions != 1:
        _die('ERROR: one and only one --getnode, --putnode, --delnode, or --clusterinfo needs to be specified')
    # if put is specified, make sure data is included
    elif options.clusterinfo and not options.subdomain :
        _die('ERROR: specify --subdomain for clusterinfo')
    elif options.putnode and not options.putdata :
        _die('ERROR: specify --putdata to put into node')

    return options
Example #12
0
def _print_basic_help(option_parser, usage, include_deprecated=False):
    """Print all help for the parser. Unlike similar functions, this needs a
    parser so that it can include custom options added by a
    :py:class:`~mrjob.job.MRJob`.
    """
    help_parser = ArgumentParser(usage=usage, add_help=False)

    for action in option_parser._actions:
        if action.dest in _RUNNER_OPTS:
            continue

        if action.dest in _STEP_OPTS:
            continue

        if (action.dest in _DEPRECATED_NON_RUNNER_OPTS and
                not include_deprecated):
            continue

        if not action.option_strings:
            continue

        help_parser._add_action(action)

    help_parser.print_help()

    print()
    print('To see help for a specific runner, use --help -r <runner name>')
    print()
    print('To see help for options that control what part of a job runs,'
          ' use --help --steps')
    print()
    if not include_deprecated:
        print('To include help for deprecated options, add --deprecated')
        print()
Example #13
0
def main():
    from argparse import ArgumentParser

    description = """
Run polar2grid rescaling via the command line.  This is not the preferred
way to do production level rescaling, but is useful for testing.
"""
    parser = ArgumentParser(description=description)
    parser.add_argument("--doctest", dest="doctest", action="store_true", help="run document tests")
    parser.add_argument(
        "-v",
        "--verbose",
        dest="verbosity",
        action="count",
        default=0,
        help="each occurrence increases verbosity 1 level through ERROR-WARNING-INFO-DEBUG",
    )
    args = parser.parse_args()

    levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG]
    logging.basicConfig(level=levels[min(3, args.verbosity)])

    if args.doctest:
        import doctest

        return doctest.testmod()

    print "Command line interface not implemented yet"
    parser.print_help()
Example #14
0
def process_opt():
    parser = ArgumentParser()

    parser.add_argument("-p", dest="profile", default=None, help="Option: profile sync|cdn|backup|idle|regular"
                                                               "example: ./executor.py -p sync")

    parser.add_argument("-o", dest="ops", default=10, help="Option: ops #"
                                                             "example: ./executor.py -o 5")

    parser.add_argument("-t", dest="itv", default=1, help="Option: itv #"
                                                             "example: ./executor.py -t 5")

    parser.add_argument("-f", dest="folder", default='stacksync_folder', help="Option: ftp folder, folder owncloud_folder|stacksync_folder "
                                                          "example: ./executor.py -f owncloud_folder")

    parser.add_argument("-x", dest="pid", default='StackSync', help="Option: ProcedureName, "
                                                                              "pid StackSync|OwnCloud "
                                                                              "example: ./executor.py -x OwnCloud")

    parser.add_argument("--out", dest="output", default='output', help="Folder for output files")
    opt = parser.parse_args()

    if not opt.itv:
        parser.print_help()
        print 'Example: ./executor.py -o 100 -p sync -t 1 -f owncloud_folder -x OwnCloud'
        sys.exit(1)

    opt = parser.parse_args()

    print opt.profile
    print opt.ops
    print opt.itv

    return opt
Example #15
0
def _print_help_for_runner(opt_names, include_deprecated=False):
    help_parser = ArgumentParser(usage=SUPPRESS, add_help=False)

    _add_runner_args(help_parser, opt_names,
                     include_deprecated=include_deprecated)

    help_parser.print_help()
Example #16
0
def parse_args():
    """
    """

    hash_types = ["nt", "lm"]
    parser = ArgumentParser(usage="[options]")
    parser.add_argument("-a", help="Add cracked hashes to database."
                                   " Format should be password : hash",
                        action='store', dest='add_list', metavar='[file]')
    parser.add_argument("-p", help="Add a list of plaintext credentials to the database",
                    action='store', dest='plaintext', metavar='[file]')
    parser.add_argument("-f", help="Hash format to create if using -a",
                    action='store', dest='hash_type', choices=hash_types)
    parser.add_argument("-k", help="Kill Doozer", action='store_true',
                    dest='kill')
    parser.add_argument("--startup", help="Startup Doozer", action='store_true',
                    dest='startup')
    parser.add_argument("--plaintext", help='Pull a list of all plaintext passwords',
                    action='store_true', dest='gplaintext')
    parser.add_argument("--test", help="Test connection to Doozer",
                    action='store_true', dest='test')
    parser.add_argument("--archive", help="Archive all jobs",
                    action='store_true', dest='archive')

    options = parser.parse_args()
    if len(sys.argv) <= 1:
        parser.print_help()
        sys.exit(1)

    return options
Example #17
0
def main():
    global USE_OVR
    game_names = [name[6:] for _, name, _ in pkgutil.walk_packages(['larch'])
                     if name.startswith('games.')]

    from argparse import ArgumentParser
    parser = ArgumentParser()

    parser.add_argument(
            '--ovr', action='store_true', help='Play game with Oculus Rift')
    parser.add_argument(
            '--game',
            action='store',
            help='Specify the game to run. List of games: {}'.format(
                game_names))

    parsed_args = parser.parse_args(sys.argv[1:])

    USE_OVR = parsed_args.ovr
    InterfaceClass = Interface
    if USE_OVR:
        InterfaceClass = OVRInterface

    # See if --game flag matches a module in the games package.
    Game = None
    if parsed_args.game in game_names:
        module = importlib.import_module('games.{}'.format(parsed_args.game))
        Game = module.new(InterfaceClass)

    if Game:
        with Game() as game:
            game.run()
    else:
        parser.print_help()
Example #18
0
def main():
    parser = ArgumentParser(description = 'PGObserver testdata generator')
    parser.add_argument('-c', '--config', help = 'Path to config file. (default: %s)' % DEFAULT_CONF_FILE, dest="config" , default = DEFAULT_CONF_FILE)
    parser.add_argument('-gts','--generate-x-tables',help='Number of tables', dest="gt",default=5)
    parser.add_argument('-gps','--generate-x-procs',help='Number of stored procedures', dest="gp",default=12)
    parser.add_argument('-gds','--generate-x-days',help='Number of days', dest="gd",default=5)
    parser.add_argument('-giv','--generate-interval',help='Interval between data in minutes', dest="gi",default=5)

    args = parser.parse_args()

    args.config = os.path.expanduser(DEFAULT_CONF_FILE)

    if not os.path.exists(args.config):
        print 'Configuration file missing:', DEFAULT_CONF_FILE
        parser.print_help()
        return

    with open(args.config, 'rb') as fd:
        settings = json.load(fd)

    print "PGObserver testdata generator:"
    print "=============================="
    print ""
    print "Setting connection string to ... " + settings['database']['url']
    print ""
    print "Creating " + str(args.gt) + " tables"
    print "Creating " + str(args.gp) + " stored procedures"
    print "Creating " + str(args.gd) + " days of data"
    print "Creating data points every " + str(args.gi) + " minutes"
    print ""

    generate_test_data(settings['database']['url'] , args.gt , args.gp,  args.gd,  args.gi)
Example #19
0
def load(*argv):
    usage = """
    Load an image on selected nodes in parallel
    {resa}
    """.format(resa=reservation_required)
    the_config = Config()
    the_imagesrepo = ImagesRepo()
    default_image = the_imagesrepo.default()
    default_timeout = the_config.value('nodes', 'load_default_timeout')
    default_bandwidth = the_config.value('networking', 'bandwidth')

    parser = ArgumentParser(usage=usage)
    parser.add_argument("-i", "--image", action='store', default=default_image,
                        help="Specify image to load (default is {})"
                             .format(default_image))
    parser.add_argument("-t", "--timeout", action='store',
                        default=default_timeout, type=float,
                        help="Specify global timeout for the whole process, default={}"
                              .format(default_timeout))
    parser.add_argument("-b", "--bandwidth", action='store',
                        default=default_bandwidth, type=int,
                        help="Set bandwidth in Mibps for frisbee uploading - default={}"
                              .format(default_bandwidth))
    parser.add_argument("-c", "--curses", action='store_true', default=False,
                        help="Use curses to provide term-based animation")
    # this is more for debugging
    parser.add_argument("-n", "--no-reset", dest='reset',
                        action='store_false', default=True,
                        help="""use this with nodes that are already
                        running a frisbee image. They won't get reset,
                        neither before or after the frisbee session""")
    add_selector_arguments(parser)
    args = parser.parse_args(argv)

    message_bus = asyncio.Queue()

    selector = selected_selector(args)
    if not selector.how_many():
        parser.print_help()
        return 1
    nodes = [Node(cmc_name, message_bus) for cmc_name in selector.cmc_names()]

    # send feedback
    message_bus.put_nowait({'selected_nodes': selector})
    from rhubarbe.logger import logger
    logger.info("timeout is {}s".format(args.timeout))
    logger.info("bandwidth is {} Mibps".format(args.bandwidth))

    actual_image = the_imagesrepo.locate_image(args.image, look_in_global=True)
    if not actual_image:
        print("Image file {} not found - emergency exit".format(args.image))
        exit(1)

    # send feedback
    message_bus.put_nowait({'loading_image': actual_image})
    display_class = Display if not args.curses else DisplayCurses
    display = display_class(nodes, message_bus)
    loader = ImageLoader(nodes, image=actual_image, bandwidth=args.bandwidth,
                         message_bus=message_bus, display=display)
    return loader.main(reset=args.reset, timeout=args.timeout)
def main():

    parser = ArgumentParser(description='Store the facebook data based on the keywords from the file.')
    parser.add_argument("-f", "--keyword_file", type=str, help="Input the keyword file after -f")
    parser.add_argument("-o", "--output", type=str, help="Input the output path after -f")
    parser.add_argument("-n", "--max_number", type=int, help="The maximum number of post to return after -n")

    options = parser.parse_args()

    search_file_name = options.keyword_file
    if search_file_name == None:
        parser.print_help()
        exit(1)
    keywords = readSearchTerm(search_file_name)

    output = options.output
    file_name = getFileName(output)
     
    max_number = options.max_number
    if max_number == None:
        max_number = 1000000000 
    
    since = '2012-01-01' 
    until = None 

    lastResults = 1
    totalResults = 0

    while totalResults < max_number and lastResults > 0:

        for keyword in keywords:

            addParms = '&q=' + keyword
            if (since): addParms += '&since=' + str(since)
            if (until): addParms += '&until=' + str(until)
            addParms += '&' + access_token
            print "URL QUERY: " + urlQuery + addParms

            results = readJSONUrl(urlQuery + addParms)['data']

            gzip_out = gzip.open(file_name, 'ab+')

            for result in results:
                json_object = json.dumps(result)
                gzip_out.write(json_object)
                gzip_out.write('\n')

            gzip_out.close()

            lastResults = len(results)
            totalResults += lastResults
            print 'got results: ' + str(lastResults) + "   Total: " + str(totalResults)

            if lastResults:

                earLiestTime = results[-1]['created_time']
                print 'earliest time: ' + str(earLiestTime)
                earLiestTime = stripPlusTime(earLiestTime)
                print 'earliest time: ' + str(earLiestTime)
                until = decreaseTime(earLiestTime)
Example #21
0
def main():
    """
    Find sha512sum's in archives.
    """
    parser = ArgumentParser()
    parser.add_argument(
        "-n", "--name", dest="name",
        help="name or regex of the file(s) to look for", metavar="NAME")
    parser.add_argument(
        'paths', metavar='PATHS', type=str, nargs='+',
        help='Paths to look in')

    args = parser.parse_args()

    try:
        rx = re.compile(args.name)
    except Exception:
        parser.print_help()
        print('\nYou must provide a string or valid regex with --name/-n')
        raise SystemExit(1)

    finder = PackageFinder(look_inside=True)
    hasher = HashGenerator()

    # For each path ...
    for check_path in args.paths:
        data, formats = finder(check_path)
        for package in data:
            result = rx.findall(os.path.basename(package.name))
            if result:
                print("- " + " ".join([hasher(package.path), str(package)]))
Example #22
0
def parse_arguments():
	"""	Parses the given command line options and arguments
	   	Returns a dictionary with the given arguments as the values. """	
	
	use = "python origin.py -f <file.fasta> [options]"
	parser = ArgumentParser(usage=use)

	parser.add_argument('-f', '--fasta', dest='in_fa_filename', 
		help = "Required input FASTA (.fasta/.fa) file")
	parser.add_argument('-o', '--out', dest='out_filename', default='adjusted',
		help = "Basename of rearranged FASTA output file [adjusted]")
	parser.add_argument('-s', '--size', dest='size', type=int, default=524288, 
		help = "Starting window size. Decreases by 2 every iteration [524288]")
	parser.add_argument('-d', '--debug', dest='debug', action='store_true', 
		default=False, help = "Prints out all intermediate files obtained [False]")

	# Specify which method is desired
	parser.add_argument('-m', '--multiscale', dest='use_multiscale', action='store_true', 
		default=True, help = "Use the multiscaling method (Recommended) [True]")
	parser.add_argument('-w', '--wavelet', dest='use_wavelet', action='store_true', 
		default=False, help = "Use the wavelet transform method instead [False]")

	options = parser.parse_args()	

	# FASTA file REQUIRED. Display error and exit if not given in command line
	if not options.in_fa_filename:
		parser.print_help()
		print "\nERROR: Fasta file not given"
		sys.exit()

	return options
Example #23
0
def process():
    parser = ArgumentParser(
        description="Generating a greengraph by using a start point, an end point, number of steps and the name of the file you want the graph to be stored at."
    )

    """
    Parameters
    """
    parser.add_argument("--from", "-f", dest="fromCity")
    parser.add_argument("--to", "-t", dest="toCity")
    parser.add_argument("--steps", "-s")
    parser.add_argument("--out", "-o")

    """
    Print help message even if no flag is provided
    """
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()

    myGraph = Greengraph(args.fromCity, args.toCity)
    data = myGraph.green_between(args.steps)
    plt.plot(data)
    plt.savefig(args.out)
def main():
    from argparse import ArgumentParser
    import inspect

    from openmdao.main.assembly import Assembly, set_as_top

    parser = ArgumentParser()
    parser.add_argument('-m', '--module', action='store', dest='module',
                        metavar='MODULE',
                        help='name of module that contains the class to be instantiated and graphed')
    parser.add_argument('-c', '--class', action='store', dest='klass',
                        help='specify class in module to plot graphs for')
    parser.add_argument('-f', '--fmt', action='store', dest='fmt', default='pdf',
                        help='specify output format')
    parser.add_argument('-r', '--recurse', action='store_true', dest='recurse',
                        help='if set, recurse down and plot all dependency, component,  and derivative graphs')
    parser.add_argument('-p', '--pseudos', action='store_false', dest='pseudos',
                        help='if set, include pseudo components in graphs')
    parser.add_argument('-w', '--workflow', action='store_true', dest='workflow',
                        help='if set, group graph components into workflows')


    options = parser.parse_args()

    if options.module is None:
        parser.print_help()
        sys.exit(-1)

    __import__(options.module)

    mod = sys.modules[options.module]

    if options.klass:
        obj = getattr(mod, options.klass)()
    else:
        def isasm(obj):
            try:
                return issubclass(obj, Assembly) and obj is not Assembly
            except:
                return False

        klasses = inspect.getmembers(mod, isasm)
        if len(klasses) > 1:
            print "found %d Assembly classes. you must specify 1" % len(klasses)
            for i, (cname, klass) in enumerate(klasses):
                print "%d) %s" % (i, cname)
            var = raw_input("\nEnter a number: ")
            obj = klasses[int(var)][1]()
            sys.exit(-1)
        elif klasses:
            obj = klasses[0][1]()
        else:
            print "No classes found"

    set_as_top(obj)
    if not obj.get_pathname():
        obj.name = 'top'

    plot_graphs(obj, recurse=options.recurse, fmt=options.fmt, pseudos=options.pseudos,
                workflow=options.workflow)
Example #25
0
class CmdLineTool:
    
    def __init__(self, description):
        self.description = description
        self._initializeOptParser()

    def _initializeOptParser(self):
        self.argParser = ArgumentParser(description = self.description)
    
    def run(self):
        """
        Run the tool. Call this function after all additional
        arguments have been provided
        """
        self._parseCommandLine()
        self._runImpl()

    # @Override
    def _runImpl(self):
        pass

    def _parseCommandLine(self):
        self.args = self.argParser.parse_args()

    def _usage(self):
        self.argParser.print_help()
Example #26
0
def main():
    global rx, ry, rz
    global torus
    test = False
    aHelp = "Dump the environment"
    des = 'Command Line Options'
    #parse command line options

    #this will require python 2.7 and above
    from argparse import ArgumentParser
    parser = ArgumentParser(description = des)
    parser.add_argument('--test-env', '-a', dest='test', action = 'store_true', help=aHelp)

    options = parser.parse_args()
    
    if options.test:
        test = True
    else:
        parser.print_help()
    if test:
        pass

    setup()
    torus = Torus(1, 0.3, 4, 4)
    rx = ry = rz = 0

    pyglet.app.run()
Example #27
0
def parse_args(args=None):
    parser = ArgumentParser(description="Pair swiss weekend tournament")
    parser.add_argument("-v", "--virtual", help="Virtual game weight",
            type=float, default=0.5)
    parser.add_argument("-l", "--prelives",
            help="Number of lives before being paired",
            type=int, default=0)
    parser.add_argument("--ranks", help="Print player rankings",
            action="store_true")
    parser.add_argument("--show-arbitrary",
            help="Indicate arbitrary color assignments",
            action="store_true")
    parser.add_argument("--seed_file", "--seeds",
            help="aaaa style player seeds")
    parser.add_argument("--history_file", "--games",
            help="aaaa style tournament history")
    parser.add_argument("tournament_state", help="Tournament state file",
            nargs="?")
    args = parser.parse_args(args)
    if args.seed_file and args.tournament_state:
        print "Cannot use both regular tournament state file and aaaa style"
        parser.print_help()
        sys.exit(1)
    if not args.seed_file and not args.tournament_state:
        print "Must give tournament state"
        parser.print_help()
        sys.exit(1)
    return args
Example #28
0
def parse_args(args=None):
    parser = ArgumentParser(description="Pair FTE tournament")
    parser.add_argument("-v", "--virtual", help="Virtual game weight",
            type=float, default=0.5)
    parser.add_argument("-l", "--lives", help="Number of lives",
            type=int, default=3)
    parser.add_argument("--utpr", help="Use UTPR as in WC2013 pairing rules",
            action="store_true")
    parser.add_argument("--wc2015", help="Use wc2015 pairing weights",
            action="store_true")
    parser.add_argument("--all-games",
            help="Use all games, including those by eliminated players",
            action="store_true")
    parser.add_argument("--ranks", help="Print player rankings",
            action="store_true")
    parser.add_argument("--show-arbitrary",
            help="Indicate arbitrary color assignments",
            action="store_true")
    parser.add_argument("--seed_file", "--seeds", help="aaaa style player seeds")
    parser.add_argument("--game_file", "--games",
            help="aaaa style tournament history")
    parser.add_argument("tournament_state", help="Tournament state file",
            nargs="?")
    args = parser.parse_args(args)
    if args.seed_file and args.tournament_state:
        print "Cannot use both regular tournament state file and aaaa style"
        parser.print_help()
        sys.exit(1)
    if not args.seed_file and not args.tournament_state:
        print "Must give tournament state"
        parser.print_help()
        sys.exit(1)
    return args
Example #29
0
File: rk.py Project: bryan-lunt/rk
def parse_command_line_args():
    """Parse command line arguments"""

    # Create top parser
    parser = ArgumentParser(prog="rk", description=argparse["_parser"],
                            add_help=True)
    parser.add_argument("-v", "--version", action="version",
                        version="rk 0.3b1")
    # Create subparsers for the top parser
    subparsers = parser.add_subparsers(title=argparse["_subparsers"])
    # Create the parser for the "list" subcommand
    parser_list = subparsers.add_parser("list",
            description=argparse["_parser_list"],
            help=argparse["_parser_list"])
    parser_list.set_defaults(function_name=show_kernels_list)
    # Create the parser for the "install" subcommand
    parser_install = subparsers.add_parser("install",
            description=argparse["_parser_install"],
            help=argparse["_parser_install"])
    parser_install.add_argument("kernel_names", action="store", nargs='+',
                                metavar="KERNEL_NAME")
    parser_install.set_defaults(function_name=install_kernel)
    # Create the parser for the "install-template" subcommand
    parser_install_template = subparsers.add_parser("install-template",
            description=argparse["_parser_install_template"],
            help=argparse["_parser_install_template"])
    parser_install_template.set_defaults(function_name=install_kernel,
                                         kernel_names=None)
    # Create the parser for the "install-all" subcommand
    parser_install_all = subparsers.add_parser("install-all",
            description=argparse["_parser_install_all"],
            help=argparse["_parser_install_all"])
    parser_install_all.set_defaults(function_name=install_all)
    # Create the parser for the "uninstall" subcommand
    parser_uninstall= subparsers.add_parser("uninstall",
            description=argparse["_parser_uninstall"],
            help=argparse["_parser_uninstall"])
    parser_uninstall.add_argument("kernel_names", action="store", nargs='+',
                                  metavar="KERNEL_NAME")
    parser_uninstall.set_defaults(function_name=uninstall_kernel)
    # Create the parser for the "uninstall-template" subcommand
    parser_uninstall_template = subparsers.add_parser("uninstall-template",
            description=argparse["_parser_uninstall_template"],
            help=argparse["_parser_uninstall_template"])
    parser_uninstall_template.set_defaults(function_name=uninstall_kernel,
                                           kernel_names=None)
    # Create the parser for the "uninstall-all" subcommand
    parser_uninstall_all = subparsers.add_parser("uninstall-all",
            description=argparse["_parser_uninstall_all"],
            help=argparse["_parser_uninstall_all"])
    parser_uninstall_all.set_defaults(function_name=uninstall_all)
    # Create the parser for the "ssh" subcommand
    parser_list = subparsers.add_parser("ssh",
            description=argparse["_parser_ssh"],
            help=argparse["_parser_ssh"])
    parser_list.set_defaults(function_name=setup_ssh_auto_login)
    if len(argv) == 1:
        parser.print_help()
        exit(0) # Clean exit without any errors/problems
    return parser.parse_args()
Example #30
0
def _parse_args():
  """Parses command line arguments."""
  import sys
  from argparse import ArgumentParser

  arg_parser = ArgumentParser(description="Compile or run pxp files")
  subparsers = arg_parser.add_subparsers()

  compile_parser = subparsers.add_parser("compile", description="Compile a pxp program")
  compile_parser.add_argument("in_filename",
                              nargs="?",
                              help="The file to compile. If not given you will be prompted to "
                                   "enter the program on the command line.")
  compile_parser.add_argument("out_filename",
                              help="The file to which to output the byte code. If the file has an "
                                   "extension other than .pxpc, that extension will be added to "
                                   "the filename that is given before output.")
  compile_parser.set_defaults(func=_compile)

  execute_parser = subparsers.add_parser("exec", description="Execute a pxp program")
  execute_parser.add_argument("filename",
                              nargs="?",
                              help="The file to execute. This can be a .pxp file, which will be "
                                   "compiled and executed, or a .pxpc file which will be executed "
                                   "directly. If not given you will be prompted to enter the "
                                   "program on the command line")
  execute_parser.set_defaults(func=_exec)

  args = arg_parser.parse_args()

  if hasattr(args, "func"):
    return args
  else:
    arg_parser.print_help()
    sys.exit(1)
Example #31
0
def parse_args(argvs):
    # print(argvs)
    # =========================================================================
    # === set up arguments
    # =========================================================================
    parser = ArgumentParser(description='Interface for IMaRS ETL operations')

    # === arguments for the main command
    parser.add_argument(
        "-v", "--verbose",
        help="increase output verbosity",
        action="count",
        default=0
    )
    parser.add_argument(
        "-q", "--quiet",
        help="output only results",
        action="store_true"
    )
    parser.add_argument(
        "-V", "--version",
        help="print version & exit",
        action="store_true"
    )
    # other examples:
    # parser.add_argument("source", help="directory to copy from")
    # parser.add_argument('-l', '--log',
    #     help="desired filepath of log file",
    #     default="/var/opt/imars-etl/backup.log"
    # )
    # parser.add_argument('--rclonelog',
    #     help="desired path of rclone log file",
    #     default=None
    # )

    # =========================================================================
    # === subcommands
    # =========================================================================
    subparsers = parser.add_subparsers(
        title='subcommands',
        description='usage: `imars-etl $subcommand` ',
        help='addtnl help for subcommands: `imars-etl $subcommand -h`'
    )

    # === sub-cmd arguments shared between multiple subcommands:
    SQL = {  # "sql"
        "help": (
            "' AND'-separated list of metadata key-value pairs using "
            "SQL `WHERE _____` style syntax."
            "\nExample: \n\t"
            "\"product_id=3 AND area_id=7 AND date_time='2018-01-02T03:45'\""
        )
    }
    FIRST = {  # "--first"
        "help": "return first result if multiple rather than exiting w/ error",
        "action": "store_true"
    }
    NAME_ARGS = [
        "-n",
        "--product_type_name", "--name", "--short_name"
    ]
    NAME_KWARGS = {
            "help": "product type id short_name"
    }
    PID_ARGS = [
            "-p", "--product_id", "--pid",
    ]
    PID_KWARGS = {
        "help": "product type id (pid)",
        "type": int
    }

    # === extract
    parser_extract = subparsers.add_parser(
        'extract',
        help='download file from data warehouse'
    )
    parser_extract.set_defaults(func=extract, **EXTRACT_DEFAULTS)
    parser_extract.add_argument("sql", **SQL)
    parser_extract.add_argument(
        "-o", "--output_path",
        help="where to save the requested file. " +
        "If excluded cwd and filename from db is used."
    )
    parser_extract.add_argument("--first", **FIRST)
    parser_extract.add_argument(
        "-m", "--method",
        help=(
            "Extraction method to use. "
            "IE should we create a local copy? a link? a compressed file? "
            "'copy' (default) creates local copy."
            "'link' creates symlinks to files rather than copies; "
            " NOTE: This only works for locally-networked resources - "
            " IE mounted NFS shares (currently all data on servers)."
        ),
        choices=[
            EXTRACT_METHOD.COPY[0],
            EXTRACT_METHOD.LINK[0]
        ],
        default=EXTRACT_METHOD.COPY[0]
    )

    # === select
    parser_select = subparsers.add_parser(
        'select',
        help="prints json-formatted metadata for first entry in given args.sql"
    )
    parser_select.set_defaults(func=select)
    parser_select.add_argument("sql", **SQL)
    parser_select.add_argument(
        "-c", "--cols",
        help=(
            "comma-separated list of columns to select from metadatabase."
            "eg: 'filepath,date_time'"
        ),
        default="*"
    )
    parser_select.add_argument(
        "-f", "--format",
        help=(
            "Output formatter."
            "eg: '-f unix'"
        ),
        action=ConstMapAction,
        options_map_dict=SELECT_OUTPUT_FORMATTERS,
        default=SELECT_OUTPUT_FORMATTERS['unix'],
    )
    parser_select.add_argument(
        "-p", "--post_where",
        help=(
            "Additional argument clauses to follow the \"WHERE\" clause."
            "eg: 'ORDER BY last_processed DESC LIMIT 1'"
        ),
        default=""
    )

    # === id_lookup
    parser_id_lookup = subparsers.add_parser(
        'id_lookup',
        help="translates between numeric id numbers & short names"
    )
    parser_id_lookup.set_defaults(func=id_lookup)

    parser_id_lookup.add_argument(
        "table",
        help="name of the table we use (eg: area, product, status)"
    )
    parser_id_lookup.add_argument(
        "value",
        help="id # or short_name to translate."
    )

    # === find
    parser_find = subparsers.add_parser(
        'find',
        help='list files in dir matching given data'
    )
    parser_find.set_defaults(func=find)
    parser_find.add_argument(*NAME_ARGS, **NAME_KWARGS)
    parser_find.add_argument(*PID_ARGS, **PID_KWARGS)
    parser_find.add_argument(
        "directory",
        help="path to directory of files to be searched",
    )

    # === load
    parser_load = subparsers.add_parser(
        'load',
        help='upload file to data warehouse'
    )
    parser_load.set_defaults(func=load, **LOAD_DEFAULTS)
    # required args
    parser_load.add_argument(
        "filepath",
        help="path to file to upload",
    )
    # optional args
    parser_load.add_argument(*NAME_ARGS, **NAME_KWARGS)
    parser_load.add_argument(*PID_ARGS, **PID_KWARGS)
    parser_load.add_argument(
        "-t", "--time",
        help="ISO8601-formatted date-time string of product"
    )
    parser_load.add_argument(
        "-i", "--ingest_key",
        help="explicitly identifies what ingest format to expect"
    )
    parser_load.add_argument(
        "-j", "--json",
        help="string of json with given file's metadata."
    )
    parser_load.add_argument("-s", "--sql", **SQL)
    parser_load.add_argument(
        "-l", "--load_format",
        help="python strptime-enabled format string describing input basename."
    )
    parser_load.add_argument(
        "-m", "--metadata_file",
        help=(
            "File containing metadata for the file being loaded."
            " This argument can use template variables. "
            " Template variables are pulled from the arguments passed in."
            " Example: `--metadata_file /metadir/{basename}.xml` to specify "
            " That the metadata file has the same name as the data file"
            " (without file extension), but is in the `/metadir/` directory."
            "Other template vars: \n"
            " {basename} {filename} {filepath} {time} {date_time} "
            "{date_time.year} {ext}"
        )
    )
    parser_load.add_argument(  # todo change terminology to "parser"
        "--metadata_file_driver",
        help="driver to use to parse the file given by `metadata_file`",
        action=ConstMapAction,
        options_map_dict=METADATA_DRIVER_KEYS
    )
    parser_load.add_argument(
        "--dry_run",
        help="test run only, does not actually insert into database",
        action="store_true"
    )
    parser_load.add_argument(
        "--duplicates_ok",
        help="do not raise error if trying to load file already in database",
        action="store_true"
    )
    parser_load.add_argument(
        "--nohash",
        help="do not compute hash of the file. WARN: may disable features",
        action="store_true"
    )
    parser_load.add_argument(
        "--noparse",
        help="do not parse filename for metadata. WARN: may disable features",
        action="store_true"
    )
    parser_load.add_argument(
        "--no_load",
        help="do not load file into object store.",
        action="store_true"
    )
    # ===
    args = parser.parse_args(argvs)
    try:
        args.func
    except AttributeError:
        try:
            args.version
        except AttributeError:
            SEP = "\n-------------------------------------------------------\n"
            print(SEP)
            parser.print_help()
            print(SEP)
            raise ValueError(
                "\n\n\tSubcommand is required. See help above."
            )
    # =========================================================================
    return args
Example #32
0
    ct = Controller()
    marketplaces = ct.get_marketplace()
    if marketplaces:
        marketplace_list = [mp['mp_name'].lower() for mp in marketplaces]
    return marketplace_list


def test_parser(mp_name=None):
    try:
        ct = Controller(mp_name=mp_name)
        ses, html = ct.get_sessions()
        print ses, html
    except Exception as e:
        print str(e)


if __name__ == '__main__':
    marketplace_choices = get_marketplace()

    parser = ArgumentParser(description="")
    parser.add_argument('-mp', '--marketplace', choices=marketplace_choices,
                        help='Marketplace')

    args = parser.parse_args()
    _mp = args.marketplace

    if _mp:
        test_parser(_mp)
    else:
        parser.print_help()
Example #33
0
def execute(*params):
    parser = ArgumentParser("Boutiques local executor", add_help=False)
    parser.add_argument("mode",
                        action="store",
                        help="Mode of operation to use. Launch: takes a "
                        "set of inputs compliant with invocation schema "
                        "and launches the tool. Simulate: shows sample "
                        "command-lines based on the provided descriptor"
                        " based on provided or randomly generated inputs. "
                        "Prepare: pulls the Docker or Singularity container "
                        "image for a given descriptor. ",
                        choices=["launch", "simulate", "prepare"])
    parser.add_argument("--help",
                        "-h",
                        action="store_true",
                        help="show this help message and exit")

    helps = any([True for ht in ["--help", "-h"] if ht in params])
    if len(params) <= 1 and helps:
        parser.print_help()
        raise SystemExit

    args, params = parser.parse_known_args(params)
    mode = args.mode
    params += ["--help"] if args.help is True else []

    if mode == "launch":
        parser = ArgumentParser("Launches an invocation.")
        parser.add_argument(
            "descriptor",
            action="store",
            help="The Boutiques descriptor as a JSON file, "
            "JSON string or Zenodo ID (prefixed by 'zenodo.').")
        parser.add_argument("invocation",
                            action="store",
                            help="Input JSON complying to invocation.")
        parser.add_argument("-v",
                            "--volumes",
                            action="append",
                            type=str,
                            help="Volumes to mount when launching the "
                            "container. Format consistently the following:"
                            " /a:/b will mount local directory /a to "
                            "container directory /b.")
        parser.add_argument("-x",
                            "--debug",
                            action="store_true",
                            help="Keeps temporary scripts used during "
                            "execution, and prints additional debug "
                            "messages.")
        parser.add_argument("-u",
                            "--user",
                            action="store_true",
                            help="Runs the container as local user ({0})"
                            " instead of root.".format(os.getenv("USER")))
        parser.add_argument("-s",
                            "--stream",
                            action="store_true",
                            help="Streams stdout and stderr in real time "
                            "during execution.")
        parser.add_argument("--imagepath",
                            action="store",
                            help="Path to Singularity image. "
                            "If not specified, will use current directory.")
        parser.add_argument(
            "-w",
            "--workdir",
            action="store",
            help="Directory to mount as your workdir. This will hold tmp files."
            " Your current directory is the default ")
        results = parser.parse_args(params)
        descriptor = results.descriptor
        inp = results.invocation

        # Validate invocation and descriptor
        valid = invocation(descriptor, '-i', inp)

        # Generate object that will perform the commands
        from boutiques.localExec import LocalExecutor
        executor = LocalExecutor(
            descriptor, inp, {
                "forcePathType": True,
                "debug": results.debug,
                "changeUser": results.user,
                "stream": results.stream,
                "imagePath": results.imagepath,
                "workDir": results.workdir
            })
        # Execute it
        return executor.execute(results.volumes)

    if mode == "simulate":
        parser = ArgumentParser("Simulates an invocation.")
        parser.add_argument(
            "descriptor",
            action="store",
            help="The Boutiques descriptor as a JSON file, "
            "JSON string or Zenodo ID (prefixed by 'zenodo.').")
        parser.add_argument("-i",
                            "--input",
                            action="store",
                            help="Input JSON complying to invocation.")
        parser.add_argument("-j",
                            "--json",
                            action="store_true",
                            help="Flag to generate invocation in JSON format.")
        results = parser.parse_args(params)
        descriptor = results.descriptor

        # Do some basic input scrubbing
        inp = results.input

        valid = invocation(descriptor, '-i', inp) if inp else\
            invocation(descriptor)

        # Generate object that will perform the commands
        from boutiques.localExec import LocalExecutor
        executor = LocalExecutor(descriptor, inp, {
            "forcePathType": True,
            "destroyTempScripts": True,
            "changeUser": True
        })
        if not inp:
            executor.generateRandomParams(1)

        if results.json:
            sout = [json.dumps(executor.in_dict, indent=4, sort_keys=True)]
            print(sout[0])
        else:
            executor.printCmdLine()
            sout = executor.cmd_line

        # for consistency with execute
        # Adding hide to "container location" field since it's an invalid
        # value, can parse that to hide the summary print
        return ExecutorOutput(os.linesep.join(sout), "", 0, "", [], [],
                              os.linesep.join(sout), "", "hide")

    if mode == "prepare":
        parser = ArgumentParser("Pulls the container image for a given "
                                "descriptor")
        parser.add_argument(
            "descriptor",
            action="store",
            help="The Boutiques descriptor as a JSON file, "
            "JSON string or Zenodo ID (prefixed by 'zenodo.').")
        parser.add_argument("-x",
                            "--debug",
                            action="store_true",
                            help="Keeps temporary scripts used during "
                            "execution, and prints additional debug "
                            "messages.")
        parser.add_argument("-s",
                            "--stream",
                            action="store_true",
                            help="Streams stdout and stderr in real time "
                            "during execution.")
        parser.add_argument("--imagepath",
                            action="store",
                            help="Path to Singularity image. "
                            "If not specified, will use current directory.")
        results = parser.parse_args(params)
        descriptor = results.descriptor

        # Validate descriptor
        valid = invocation(descriptor)

        # Generate object that will perform the commands
        from boutiques.localExec import LocalExecutor
        executor = LocalExecutor(
            descriptor, None, {
                "forcePathType": True,
                "debug": results.debug,
                "stream": results.stream,
                "imagePath": results.imagepath
            })
        container_location = executor.prepare()[1]
        print("Container location: " + container_location)

        # Adding hide to "container location" field since it's an invalid
        # value, and we can parse that to hide the summary print
        return ExecutorOutput(container_location, "", 0, "", [], [], "", "",
                              "hide")
Example #34
0
def bosh(args=None):
    parser = ArgumentParser(description="Driver for Bosh functions",
                            add_help=False)
    parser.add_argument(
        "function",
        action="store",
        nargs="?",
        help="The tool within boutiques/bosh you wish to run. "
        "Create: creates an Boutiques descriptor from scratch. "
        "Validate: validates an existing boutiques descriptor. "
        "Exec: launches or simulates an execution given a "
        "descriptor and a set of inputs. Import: creates a "
        "descriptor for a BIDS app or updates a descriptor "
        "from an older version of the schema. Export: exports a"
        "descriptor to other formats. Publish: creates"
        "an entry in Zenodo for the descriptor and "
        "adds the DOI created by Zenodo to the descriptor. "
        "Invocation: generates the invocation schema for a "
        "given descriptor. Evaluate: given an invocation and a "
        "descriptor, queries execution properties. "
        "Test: run pytest on a descriptor detailing tests. "
        "Example: Generates example command-line for descriptor"
        ". Search: search Zenodo for descriptors. "
        "Pull: download a descriptor from Zenodo. "
        "Pprint: generate pretty help text from a descriptor."
        "Version: prints the version of this tool.",
        choices=[
            "create", "validate", "exec", "import", "export", "publish",
            "invocation", "evaluate", "test", "example", "search", "pull",
            "pprint", "version"
        ])

    parser.add_argument("--help",
                        "-h",
                        action="store_true",
                        help="show this help message and exit")

    args, params = parser.parse_known_args(args)
    func = args.function
    params += ["--help"] if args.help is True else []

    # Returns True if bosh was called from the CLI
    def runs_as_cli():
        return os.path.basename(sys.argv[0]) == "bosh"

    def bosh_return(val, code=0, hide=False, formatted=None):
        if runs_as_cli():
            if hide:
                return code
            if val is not None:
                if formatted is not None:
                    print(formatted)
                else:
                    print(val)
            else:
                if code == 0:
                    print("OK")
                else:
                    print("Failed")
            return code  # everything went well
        return val  # calling function wants this value

    try:
        if func == "create":
            out = create(*params)
            return bosh_return(out, hide=True)
        elif func == "validate":
            out = validate(*params)
            return bosh_return(out)
        elif func == "exec":
            out = execute(*params)
            # If executed through CLI, print 'out' and return exit_code
            # Otherwise, return out
            return bosh_return(out,
                               out.exit_code,
                               hide=bool(out.container_location == 'hide'))
        elif func == "example":
            out = execute('simulate', '-j', *params)
            return bosh_return(out,
                               out.exit_code,
                               hide=bool(out.container_location == 'hide'))
        elif func == "import":
            out = importer(*params)
            return bosh_return(out)
        elif func == "export":
            out = exporter(*params)
            return bosh_return(out)
        elif func == "publish":
            out = publish(*params)
            return bosh_return(out)
        elif func == "invocation":
            out = invocation(*params)
            return bosh_return(out)
        elif func == "evaluate":
            out = evaluate(*params)
            return bosh_return(out)
        elif func == "test":
            out = test(*params)
            return bosh_return(out)
        elif func == "pprint":
            out = prettyprint(*params)
            return bosh_return(out)
        elif func == "search":
            out = search(*params)
            return bosh_return(out,
                               formatted=tabulate(out,
                                                  headers='keys',
                                                  tablefmt='plain'))
        elif func == "pull":
            out = pull(*params)
            return bosh_return(out, hide=True)
        elif func == "version":
            from boutiques.__version__ import VERSION
            return bosh_return(VERSION)
        else:
            parser.print_help()
            raise SystemExit

    except (ZenodoError, DescriptorValidationError, InvocationValidationError,
            ValidationError, ExportError, ImportError, ExecutorError) as e:
        # We don't want to raise an exception when function is called
        # from CLI.'
        if runs_as_cli():
            try:
                print(e.message)  # Python 2 only
            except Exception as ex:
                print(e)
            return 99  # Note: this conflicts with tool error codes.
        raise e
Example #35
0
def main():
    parser = ArgumentParser(add_help=False)
    parser.add_argument('-c',
                        '--config_path',
                        type=str,
                        default='./src/configs/CIFAR10/ContraGAN.json')
    parser.add_argument('--checkpoint_folder', type=str, default=None)
    parser.add_argument('-current',
                        '--load_current',
                        action='store_true',
                        help='whether you load the current or best checkpoint')
    parser.add_argument('--log_output_path', type=str, default=None)

    parser.add_argument('-DDP',
                        '--distributed_data_parallel',
                        action='store_true')
    parser.add_argument('-n', '--nodes', default=1, type=int, metavar='N')
    parser.add_argument('-nr',
                        '--nr',
                        default=0,
                        type=int,
                        help='ranking within the nodes')

    parser.add_argument('--seed',
                        type=int,
                        default=-1,
                        help='seed for generating random numbers')
    parser.add_argument('--num_workers', type=int, default=8, help='')
    parser.add_argument('-sync_bn',
                        '--synchronized_bn',
                        action='store_true',
                        help='whether turn on synchronized batchnorm')
    parser.add_argument('-mpc',
                        '--mixed_precision',
                        action='store_true',
                        help='whether turn on mixed precision training')
    parser.add_argument('-LARS',
                        '--LARS_optimizer',
                        action='store_true',
                        help='whether turn on LARS optimizer')
    parser.add_argument('-rm_API',
                        '--disable_debugging_API',
                        action='store_true',
                        help='whether disable pytorch autograd debugging mode')

    parser.add_argument('--reduce_train_dataset',
                        type=float,
                        default=1.0,
                        help='control the number of train dataset')
    parser.add_argument('-stat_otf',
                        '--bn_stat_OnTheFly',
                        action='store_true',
                        help='when evaluating, use the statistics of a batch')
    parser.add_argument('-std_stat',
                        '--standing_statistics',
                        action='store_true')
    parser.add_argument('--standing_step',
                        type=int,
                        default=-1,
                        help='# of steps for accumulation batchnorm')
    parser.add_argument('--freeze_layers',
                        type=int,
                        default=-1,
                        help='# of layers for freezing discriminator')

    parser.add_argument('-l', '--load_all_data_in_memory', action='store_true')
    parser.add_argument('-t', '--train', action='store_true')
    parser.add_argument('-e', '--eval', action='store_true')
    parser.add_argument('-s', '--save_images', action='store_true')
    parser.add_argument('-iv',
                        '--image_visualization',
                        action='store_true',
                        help='select whether conduct image visualization')
    parser.add_argument(
        '-knn',
        '--k_nearest_neighbor',
        action='store_true',
        help='select whether conduct k-nearest neighbor analysis')
    parser.add_argument('-itp',
                        '--interpolation',
                        action='store_true',
                        help='whether conduct interpolation analysis')
    parser.add_argument('-fa',
                        '--frequency_analysis',
                        action='store_true',
                        help='whether conduct frequency analysis')
    parser.add_argument('-tsne',
                        '--tsne_analysis',
                        action='store_true',
                        help='whether conduct tsne analysis')
    parser.add_argument('--nrow',
                        type=int,
                        default=10,
                        help='number of rows to plot image canvas')
    parser.add_argument('--ncol',
                        type=int,
                        default=8,
                        help='number of cols to plot image canvas')

    parser.add_argument('--print_every',
                        type=int,
                        default=100,
                        help='control log interval')
    parser.add_argument('--save_every',
                        type=int,
                        default=2000,
                        help='control evaluation and save interval')
    parser.add_argument('--eval_type',
                        type=str,
                        default='test',
                        help='[train/valid/test]')
    args = parser.parse_args()

    if not args.train and \
            not args.eval and \
            not args.save_images and \
            not args.image_visualization and \
            not args.k_nearest_neighbor and \
            not args.interpolation and \
            not args.frequency_analysis and \
            not args.tsne_analysis:
        parser.print_help(sys.stderr)
        sys.exit(1)

    if args.config_path is not None:
        with open(args.config_path) as f:
            model_config = json.load(f)
        train_config = vars(args)
    else:
        raise NotImplementedError

    if model_config['data_processing']['dataset_name'] == 'cifar10':
        assert train_config['eval_type'] in [
            'train', 'test'
        ], "Cifar10 does not contain dataset for validation."
    elif model_config['data_processing']['dataset_name'] in [
            'imagenet', 'tiny_imagenet', 'custom'
    ]:
        assert train_config['eval_type'] == 'train' or train_config['eval_type'] == 'valid', \
            "StudioGAN dose not support the evalutation protocol that uses the test dataset on imagenet, tiny imagenet, and custom datasets"

    if train_config['distributed_data_parallel']:
        msg = "StudioGAN does not support image visualization, k_nearest_neighbor, interpolation, frequency, and tsne analysis with DDP. " +\
            "Please change DDP with a single GPU training or DataParallel instead."
        assert train_config['image_visualization'] + train_config['k_nearest_neighbor'] + train_config['interpolation'] +\
            train_config['frequency_analysis'] + train_config['tsne_analysis'] == 0, msg

    if model_config['train']['model']['conditional_strategy'] in [
            "NT_Xent_GAN", "Proxy_NCA_GAN", "ContraGAN"
    ]:
        assert not train_config[
            'distributed_data_parallel'], "StudioGAN does not support DDP training for NT_Xent_GAN, Proxy_NCA_GAN, and ContraGAN"

    hdf5_path_train = make_hdf5(model_config['data_processing'], train_config, mode="train") \
        if train_config['load_all_data_in_memory'] else None

    if train_config['seed'] == -1:
        cudnn.benchmark, cudnn.deterministic = True, False
    else:
        fix_all_seed(train_config['seed'])
        cudnn.benchmark, cudnn.deterministic = False, True

    gpus_per_node, rank = torch.cuda.device_count(), torch.cuda.current_device(
    )
    world_size = gpus_per_node * train_config['nodes']

    if world_size == 1:
        warnings.warn(
            'You have chosen a specific GPU. This will completely disable data parallelism.'
        )

    if train_config['disable_debugging_API']:
        torch.autograd.set_detect_anomaly(False)
    check_flag_0(model_config['train']['optimization']['batch_size'],
                 world_size, train_config['freeze_layers'],
                 train_config['checkpoint_folder'],
                 model_config['train']['model']['architecture'],
                 model_config['data_processing']['img_size'])

    run_name = make_run_name(
        RUN_NAME_FORMAT,
        framework=train_config['config_path'].split('/')[-1][:-5],
        phase='train')

    if train_config['distributed_data_parallel'] and world_size > 1:
        print("Train the models through DistributedDataParallel (DDP) mode.")
        mp.spawn(prepare_train_eval,
                 nprocs=gpus_per_node,
                 args=(gpus_per_node, world_size, run_name, train_config,
                       model_config, hdf5_path_train))
    else:
        prepare_train_eval(rank,
                           gpus_per_node,
                           world_size,
                           run_name,
                           train_config,
                           model_config,
                           hdf5_path_train=hdf5_path_train)
Example #36
0
def main(args=None):
	# -----------------------------------------------------------------------------------
	# parsers
	# -----------------------------------------------------------------------------------
	# head
	# -----------------------------------------------------------------------------
	head = ArgumentParser(prog='toolsar')
	sub  = head.add_subparsers(title='sub-command')
	
	# -----------------------------------------------------------------------------
	# check uuid
	# -----------------------------------------------------------------------------
	parser = sub.add_parser('check-uuids')
	# arguments
	parser.add_argument('path', type=str, help='search path')
	# function
	parser.set_defaults(func=check_uuids)
	
	# -----------------------------------------------------------------------------
	# parse sar
	# -----------------------------------------------------------------------------
	parser = sub.add_parser('parse')
	# arguments
	parser.add_argument('path', type=str, help='search path')
	# function
	parser.set_defaults(func=sar_parse)
	
	# -----------------------------------------------------------------------------
	# filter sar
	# -----------------------------------------------------------------------------
	parser = sub.add_parser('filter')
	# arguments
	parser.add_argument(
		'path', type=str, help='search path')
	parser.add_argument(
		'--target', type=str, help='target key', nargs='+', default='.*')
	parser.add_argument(
		'--filter', type=str, help='filter key', nargs='+', default='')
	# function
	parser.set_defaults(func=sar_filter)
	
	# -----------------------------------------------------------------------------
	# trace sar
	# -----------------------------------------------------------------------------
	parser = sub.add_parser('trace')
	# arguments
	parser.add_argument('path',
	 	type   =str, 
		help   ='search path')
	parser.add_argument('--target', '-t', 
		type   =str, 
		help   ='target key', 
		default='.*')
	# function
	parser.set_defaults(func=sar_trace)

	# -----------------------------------------------------------------------------
	# view sar
	# -----------------------------------------------------------------------------
	parser = sub.add_parser('view')
	# arguments
	parser.add_argument(
		'path', type=str, help='search path')
	parser.add_argument(
		'--target', type=str, help='target key', nargs='+', default='.*')
	parser.add_argument(
		'--filter', type=str, help='filter key', nargs='+', default='')
	# function
	parser.set_defaults(func=sar_view)

	# -----------------------------------------------------------------------------------
	# execute
	# -----------------------------------------------------------------------------------
	# logger configuration
	config_logger(
		format = '%(log_color)s%(asctime)s %(levelname)-8s %(reset)s| %(message)s',
		stream = stdout,
		level  = INFO,
		datefmt='%H:%M:%S')
	# call method
	opt = head.parse_args(args=args)
	try:
		opt.func(opt)
	except AttributeError:
		head.print_help()
	except:
		Log.exception('main')
Example #37
0
def main():
    """Zegami command line interface."""
    version = pkg_resources.require('zegami-cli')[0].version
    description = dedent(r'''
         ____                      _
        /_  / ___ ___ ____ ___ _  (_)
         / /_/ -_) _ `/ _ `/  ' \/ /
        /___/\__/\_, /\_,_/_/_/_/_/
                /___/  v{}

        Visual data exploration.

    A command line interface for managing Zegami.
    '''.format(version))

    parser = ArgumentParser(
        formatter_class=RawDescriptionHelpFormatter,
        description=description,
    )

    # top level arguments
    parser.add_argument(
        '--version',
        action='version',
        version='%(prog)s {}'.format(version),
    )

    option_mapper = {
        'delete': {
            'help': 'Delete a resource',
            'resources': {
                'collections': collections.delete,
                'dataset': datasets.delete,
                'imageset': imagesets.delete,
            }
        },
        'create': {
            'help': 'Create a resource',
            'resources': {
                'collections': collections.create,
            }
        },
        'get': {
            'help': 'Get a resource',
            'resources': {
                'collections': collections.get,
                'dataset': datasets.get,
                'imageset': imagesets.get,
            }
        },
        'publish': {
            'help': 'Publish a resource',
            'resources': {
                'collection': collections.publish,
            }
        },
        'update': {
            'help': 'Update a resource',
            'resources': {
                'collections': collections.update,
                'dataset': datasets.update,
                'imageset': imagesets.update,
            }
        },
    }

    # option mapper parser
    subparsers = parser.add_subparsers()
    for action in option_mapper:
        action_parser = subparsers.add_parser(
            action,
            help=option_mapper[action]['help'],
        )
        # set the action type so we can work out what was chosen
        action_parser.set_defaults(action=action)
        action_parser.add_argument(
            'resource',
            choices=option_mapper[action]['resources'].keys(),
            help='The name of the resource type.'
        )
        if action != "create":
            action_parser.add_argument(
                'id',
                default=None,
                nargs="?",
                help='Resource identifier.',
            )
        action_parser.add_argument(
            '-c',
            '--config',
            help='Path to command configuration yaml.',
        )
        action_parser.add_argument(
            '-p',
            '--project',
            help='The id of the project.',
        )
        _add_standard_args(action_parser)

    # login parser
    login_parser = subparsers.add_parser(
        'login',
        help='Authenticate against the API and store a long lived token',
    )
    login_parser.set_defaults(action='login')
    _add_standard_args(login_parser)

    args = parser.parse_args()

    if len(sys.argv) == 1:
        parser.print_help(sys.stderr)
        sys.exit(1)

    logger = log.Logger(args.verbose)
    token = auth.get_token(args)
    session = http.make_session(args.url, token)

    if args.action == 'login':
        auth.login(
            logger,
            session,
            args,
        )
        return

    try:
        option_mapper[args.action]['resources'][args.resource](
            logger,
            session,
            args,
        )
    except Exception as e:
        # unhandled exceptions
        if args.verbose:
            raise e
        logger.error('Unhandled exception: {}'.format(e))
        sys.exit(1)
Example #38
0
    p.add_argument("-l",
                   "--label",
                   dest="label",
                   default=None,
                   help="Input Labels (comma separated)")
    p.add_argument("-p",
                   "--pub",
                   dest="pub",
                   help="Input Published Date String (ex. 2050-01-01)")
    p.add_argument("-d",
                   "--draft",
                   dest="draft",
                   default=None,
                   action="store_true",
                   help="Input Status flag")
    p.add_argument('--version', action='version', version=__version__)

    args = p.parse_args()

    if len(argv) == 1:
        p.print_help()
        exit()

    if args.file is None:
        print('Input HTML file ([-i|--in] <HTML file>)')
        exit()

    Upgger(args).start()

# end of Upgger script
Example #39
0
if __name__ == '__main__':
    from argparse import ArgumentParser
    from DuplicatesDeletion import duplicates_gui

    PARSER = ArgumentParser(description='Finds duplicate files.')
    PARSER.add_argument('-gui', action='store_true',
                        help='Display graphical user interface.')
    PARSER.add_argument('-root', metavar='<path>', default='', help='Dir to search.')
    PARSER.add_argument('-remove', action='store_true',
                        help='Delete duplicate files.')
    ARGS = PARSER.parse_args()

    if ARGS.gui == True:
        app = duplicates_gui()
        app.setroot(ARGS.root)
        app.master.title("DuplicatesDeletion")
        app.mainloop()
    else:
        if ARGS.root == '':
            PARSER.print_help()
        else:
            DUPS = find_duplicates(ARGS.root)
            print('%d Duplicate files found.' % len(DUPS))
            for f in sorted(DUPS):
                if ARGS.remove == True:
                    remove(f)
                    print('\tDeleted ' + f)
                else:
                    print('\t' + f)
Example #40
0
File: cli.py Project: annibond/qpc
class CLI():
    """Defines the CLI class.

    Class responsible for displaying usage or matching inputs
    to the valid set of commands supported by qpc.
    """

    def __init__(self, name='cli', usage=None, shortdesc=None,
                 description=None):
        """Create main command line handler."""
        self.shortdesc = shortdesc
        if shortdesc is not None and description is None:
            description = shortdesc
        self.parser = ArgumentParser(usage=usage, description=description)
        self.parser.add_argument('--version', action='version',
                                 version=VERSION)
        self.parser.add_argument('-v', dest='verbosity', action='count',
                                 default=0, help=_(messages.VERBOSITY_HELP))
        self.subparsers = self.parser.add_subparsers(dest='subcommand')
        self.name = name
        self.args = None
        self.subcommands = {}
        self._add_subcommand(server.SUBCOMMAND,
                             [ConfigureHostCommand, LoginHostCommand,
                              LogoutHostCommand, ServerStatusCommand])
        self._add_subcommand(cred.SUBCOMMAND,
                             [CredAddCommand, CredListCommand,
                              CredEditCommand, CredShowCommand,
                              CredClearCommand])
        self._add_subcommand(source.SUBCOMMAND,
                             [SourceAddCommand, SourceListCommand,
                              SourceShowCommand, SourceClearCommand,
                              SourceEditCommand])

        self._add_subcommand(scan.SUBCOMMAND,
                             [ScanAddCommand, ScanStartCommand,
                              ScanListCommand, ScanShowCommand,
                              ScanPauseCommand, ScanCancelCommand,
                              ScanRestartCommand, ScanEditCommand,
                              ScanClearCommand, ScanJobCommand])
        self._add_subcommand(report.SUBCOMMAND,
                             [ReportDeploymentsCommand,
                              ReportDetailsCommand,
                              ReportInsightsCommand,
                              ReportDownloadCommand,
                              ReportMergeCommand,
                              ReportMergeStatusCommand])
        self._add_subcommand(insights.SUBCOMMAND,
                             [InsightsUploadCommand])
        ensure_data_dir_exists()
        ensure_config_dir_exists()

    def _add_subcommand(self, subcommand, actions):
        subcommand_parser = self.subparsers.add_parser(subcommand)
        action_subparsers = subcommand_parser.add_subparsers(dest='action')
        self.subcommands[subcommand] = {}
        for action in actions:
            action_inst = action(action_subparsers)
            action_dic = self.subcommands[action.SUBCOMMAND]
            action_dic[action.ACTION] = action_inst

    def main(self):
        """Execute of subcommand operation.

        Method determine whether to display usage or pass input
        to find the best command match. If no match is found the
        usage is displayed
        """
        self.args = self.parser.parse_args()
        setup_logging(self.args.verbosity)
        is_server_cmd = self.args.subcommand == server.SUBCOMMAND
        is_server_login = is_server_cmd and self.args.action == server.LOGIN
        is_server_logout = is_server_cmd and self.args.action == server.LOGOUT

        if not is_server_cmd or is_server_login or is_server_logout:
            # Before attempting to run command, check server location
            server_location = get_server_location()
            if server_location is None or server_location == '':
                log.error(_(messages.SERVER_CONFIG_REQUIRED % PKG_NAME))
                sys.exit(1)

        if ((not is_server_cmd or is_server_logout) and
                not read_client_token()):
            log.error(_(messages.SERVER_LOGIN_REQUIRED % PKG_NAME))
            sys.exit(1)

        if self.args.subcommand in self.subcommands:
            subcommand = self.subcommands[self.args.subcommand]
            if self.args.action in subcommand:
                action = subcommand[self.args.action]
                action.main(self.args)
            else:
                self.parser.print_help()
        else:
            self.parser.print_help()
Example #41
0
def main():
    stats_aggregate = {"record_count": 0, "field_info": {}}
    element_stats_aggregate = {}

    parser = ArgumentParser(usage='%(prog)s [options] data_filename.xml')
    parser.add_argument("-e",
                        "--element",
                        dest="element",
                        help="print element to screen")
    parser.add_argument("-i",
                        "--id",
                        action="store_true",
                        dest="id",
                        default=False,
                        help="prepend meta_id to line")
    parser.add_argument("-s",
                        "--stats",
                        action="store_true",
                        dest="stats",
                        default=False,
                        help="only print stats for repository")
    parser.add_argument("-p",
                        "--present",
                        action="store_true",
                        dest="present",
                        default=False,
                        help="print if there is value of element in record")
    parser.add_argument("datafile", help="the datafile you want analyzed ")

    args = parser.parse_args()

    if not len(sys.argv) > 0:
        parser.print_help()
        exit()

    if args.element is None:
        args.stats = True

    s = n = 0
    with open(args.datafile) as data:
        for item in ijson.items(data, "docs.item"):
            record = Record(item, args)
            record_id = record.get_record_id()

            if args.stats is False and args.present is False:
                if record.get_elements() is not None:
                    for i in record.get_elements():
                        if args.id:
                            print("\t".join([record_id, i]))
                        else:
                            print(i)

            if args.stats is False and args.present is True:
                print("%s %s" % (record_id, record.has_element()))

            if args.stats is True and args.element is None:
                if (s % 1000) == 0 and s != 0:
                    print("%d records processed" % s)
                    if args.stats is True and args.element is None:
                        stats_averages = create_stats_averages(stats_aggregate)
                        pretty_print_stats(stats_averages)
                s += 1
                collect_stats(stats_aggregate, record.get_stats())
            n += 1

    if args.stats is True and args.element is None:
        stats_averages = create_stats_averages(stats_aggregate)
        pretty_print_stats(stats_averages)
Example #42
0
def main():
    def print_version():
        click.secho(
            f'version {__version__}, A downloader that download the HLS/DASH stream.'
        )

    parser = ArgumentParser(
        prog='XstreamDL-CLI',
        usage='XstreamDL-CLI [OPTION]... URL/FILE/FOLDER...',
        description='A downloader that download the HLS/DASH stream',
        add_help=False,
    )
    parser.add_argument('-v',
                        '--version',
                        action='store_true',
                        help='Print version and exit')
    parser.add_argument('-h',
                        '--help',
                        action='store_true',
                        help='Print help message and exit')
    parser.add_argument('-name',
                        '--name',
                        default='',
                        help='Specific stream base name')
    parser.add_argument('-base',
                        '--base-url',
                        default='',
                        help='Set base url for Stream')
    parser.add_argument('-save-dir',
                        '--save-dir',
                        default='Downloads',
                        help='Set save dir for Stream')
    parser.add_argument('--ffmpeg',
                        default='ffmpeg',
                        help='Set executable ffmpeg path')
    parser.add_argument('--mp4decrypt',
                        default='mp4decrypt',
                        help='Set executable mp4decrypt path')
    parser.add_argument(
        '--select',
        action='store_true',
        help='Show stream to select and download, default is to download all')
    parser.add_argument(
        '--disable-force-close',
        action='store_true',
        help=
        'Default make all connections closed securely, but it will make DL speed slower'
    )
    parser.add_argument(
        '--limit-per-host',
        default=4,
        help=
        'Increase the value if your connection to the stream host is poor, suggest >100 for DASH stream'
    )
    parser.add_argument('--user-agent',
                        default='',
                        help='set user-agent headers for request')
    parser.add_argument('--referer',
                        default='',
                        help='set custom referer for request')
    parser.add_argument(
        '--headers',
        default='',
        help=
        'set custom headers for request, separators is |, e.g. "header1:value1|header2:value2"'
    )
    parser.add_argument('--overwrite',
                        action='store_true',
                        help='Overwrite output files')
    parser.add_argument('--raw-concat',
                        action='store_true',
                        help='Concat content as raw')
    parser.add_argument('--disable-auto-concat',
                        action='store_true',
                        help='Disable auto-concat')
    parser.add_argument('--enable-auto-delete',
                        action='store_true',
                        help='Enable auto-delete files after concat success')
    parser.add_argument(
        '--key',
        default=None,
        help=
        '<id>:<k>, <id> is either a track ID in decimal or a 128-bit KID in hex, <k> is a 128-bit key in hex'
    )
    parser.add_argument(
        '--b64key',
        default=None,
        help=
        'base64 format aes key, only for HLS standard AES-128-CBC encryption')
    parser.add_argument('--hexiv', default=None, help='hex format aes iv')
    parser.add_argument('--proxy',
                        default=None,
                        help='use http proxy, e.g. http://127.0.0.1:1080')
    parser.add_argument('--split',
                        action='store_true',
                        help='Dash option, split one stream to multi sections')
    parser.add_argument('--repl', action='store_true', help='REPL mode')
    parser.add_argument('URI', nargs='*', help='URL/FILE/FOLDER string')
    args = parser.parse_args()
    command_handler(args)
    if args.help:
        print_version()
        parser.print_help()
        sys.exit()
    if args.version:
        print_version()
        sys.exit()
    if len(args.URI) == 0:
        try:
            uri = input(
                'Paste your URL/FILE/FOLDER string at the end of commands, plz.\nCtrl C to exit or input here:'
            )
        except KeyboardInterrupt:
            sys.exit()
        if uri.strip() != '':
            args.URI.append(uri.strip())
    if len(args.URI) == 0:
        sys.exit('No URL/FILE/FOLDER input')
    downloader = Downloader(args)
    downloader.daemon()
def parse_args():
    parser = ArgumentParser(description='MakeConvDataset')

    parser.add_argument('-xDir',
                        '--xDir',
                        type=str,
                        required=True,
                        help='Audio directory.')

    parser.add_argument('-outDir',
                        '--outDir',
                        type=str,
                        required=True,
                        help='Where to store plots and predictions')

    parser.add_argument('-targetDir',
                        '--targetDir',
                        type=str,
                        default=None,
                        help='True values directory.')

    parser.add_argument('-gpu',
                        '--gpu',
                        type=int,
                        default=0,
                        help='GPU used for computation ')

    parser.add_argument('-inputWidth',
                        '--inputWidth',
                        type=int,
                        default=798,
                        help='Width of spectrogram input ')

    parser.add_argument('-inputHeight',
                        '--inputHeight',
                        type=int,
                        default=15,
                        help='Length of audio signal to use in seconds ')

    parser.add_argument(
        '-outputSize',
        '--outputSize',
        type=int,
        default=6,
        help='number of output bands, 6 or 4 (4 for joint estimation)')

    parser.add_argument('-param',
                        '--param',
                        type=str,
                        default='t60',
                        help='Parameter to predict among t60,c50,c80,drr,all ')

    parser.add_argument('-model',
                        '--model',
                        type=str,
                        default='CRNN2D_largefilters',
                        help='Network to use for inference ')

    parser.add_argument('-weights',
                        '--weights',
                        type=str,
                        required=True,
                        help='Load model weights')

    args = parser.parse_args()
    #check args
    if args.param not in ['t60', 'c50', 'c80', 'drr', 'all']:
        parser.print_help()
        sys.exit(1)

    return args
    parser = ArgumentParser(description=descText, add_help=True)
    parser.add_argument("-i",
                        dest="in_zip",
                        default='',
                        required=True,
                        help="Path to WRF Hydro routing grids zip file.")
    parser.add_argument("-o",
                        dest="out_folder",
                        default='',
                        required=True,
                        help="Path to output folder.")

    # If no arguments are supplied, print help message
    if len(sys.argv) == 1:
        parser.print_help(sys.stderr)
        sys.exit(1)
    args = parser.parse_args()
    all_defaults = {key: parser.get_default(key) for key in vars(args)}

    if args.in_zip == all_defaults["in_zip"]:
        print('Using input zip location of: {0}'.format(
            all_defaults["in_zip"]))

    if args.out_folder == all_defaults["out_folder"]:
        print('Using output location of: {0}'.format(
            all_defaults["out_folder"]))

    # Create output directory for temporary outputs
    if os.path.exists(args.out_folder):
        print(
Example #45
0
def parse_args():
    parser = ArgumentParser(description='Account Cleanup script')
    intake = parser.add_mutually_exclusive_group(required=True)
    regions = parser.add_mutually_exclusive_group(required=False)
    intake.add_argument(
        '-ou',
        '--organizational-unit-path',
        dest='ou_path',
        help=
        'Organizational Unit ID for which child accounts will be cleaned. By default non-recursively.',
        type=str)
    intake.add_argument('-a',
                        '--account-id',
                        dest='account_id',
                        help='Specific account ID to clean',
                        type=str)
    parser.add_argument('-s',
                        '--stack-name-pattern',
                        dest='stack_name_pattern',
                        help='Stack name pattern (regex) to remove',
                        type=str)
    parser.add_argument(
        '-or',
        '--recursive-ou-lookup',
        dest='recursive_ou_lookup',
        help=
        'Use recursion to select accounts from child OUs (infinite levels) too, only works with "-ou"',
        action='store_true')
    parser.add_argument(
        '-rm',
        '--remove',
        dest='remove',
        help=
        'Remove the found stacks (by default only lists them). WARNING: This is very permanent!',
        action='store_true')
    parser.add_argument(
        '-rmf',
        '--remove-failed',
        dest='remove_failed',
        help='Remove failed stacks. WARNING: This is very permanent!',
        action='store_true')
    parser.add_argument(
        '-rdv',
        '--remove-dc-vifs',
        dest='remove_dc_vifs',
        help=
        'Remove Direct Connect Virtual Interfaces (before CFN operations), so there is no dependency when deleting Virtual Gateways. WARNING: This is very permanent!',
        action='store_true')
    parser.add_argument(
        '-s3',
        '--s3-bucket-pattern',
        dest='s3_bucket_pattern',
        help=
        'List or remove S3 buckets including contents with a given prefix (before CFN operations)',
        type=str)
    parser.add_argument(
        '-rs3',
        '--remove-s3',
        dest='remove_s3',
        help=
        'Remove the found S3 Buckets (by default only lists them). WARNING: This is very permanent!',
        action='store_true')
    parser.add_argument(
        '-so',
        '--show-ou-tree',
        dest='show_ou_tree',
        help='Output OU tree for every account (in the header lines)',
        action='store_true')
    regions.add_argument(
        '-ar',
        '--all-regions',
        dest='all_regions',
        help=
        'Go through all regions, instead of only the CBSP supported regions',
        action='store_true')
    regions.add_argument('-nsr',
                         '--non-supported-regions',
                         dest='non_supported_regions',
                         help='List/remove stacks in non-supported regions',
                         action='store_true')
    parser.add_argument('-d',
                        '--debug',
                        dest='debug',
                        help='Output some extra debug information',
                        action='store_true')

    if len(sys.argv) == 1:
        parser.print_help()
        exit(2)

    return parser.parse_args()
Example #46
0
def main():
    args = ArgumentParser(
        prog='make_training_sets.py',
        description=
        'Automates making new or extending current PhiSpy\'s training sets.',
        epilog=
        'Example usage:\npython3 scripts/make_training_sets.py -d tests -o data -g tests/groups.txt --retrain --phmms pVOGs.hmm --color --threads 4',
        formatter_class=RawDescriptionHelpFormatter)

    args.add_argument('-i',
                      '--infile',
                      type=str,
                      help='Path to input GenBank file.')

    args.add_argument(
        '-d',
        '--indir',
        type=str,
        help='Path to input directory with multiple GenBank files.')

    args.add_argument(
        '-g',
        '--groups',
        type=str,
        help=
        'Path to file with path to input file and its group name in two tab-delimited columns. Otherwise each file will have its own training set.'
    )

    args.add_argument(
        '-o',
        '--outdir',
        type=str,
        help=
        'Path to output directory. For each kmer creation approach subdirectory will be created.',
        required=True)

    args.add_argument(
        '-k',
        '--kmer_size',
        type=int,
        help=
        'The size of required kmers. For codon approach use multiplicity of 3. [Default: 12]',
        default=12)

    args.add_argument(
        '-t',
        '--type',
        type=str,
        help=
        'Approach for creating kmers. Options are: simple (just slicing the sequence from the first position), all (all possible kmers), codon (all possible kmers made with step of 3 nts to get kmers corresponding translated aas). [Default: all]',
        default='all')

    args.add_argument(
        '--phmms',
        type=str,
        help=
        'Phage HMM profile database (like pVOGs) will be mapped against the genome of interest and used as additional feature to identify prophages.'
    )

    args.add_argument(
        '--color',
        action='store_true',
        help=
        'If set, within the output GenBank file CDSs with phmms hits will be colored (for viewing in Artemis).'
    )

    args.add_argument(
        '--threads',
        type=str,
        help='Number of threads to use while searching with phmms.',
        default='4')

    args.add_argument(
        '--skip_search',
        action='store_true',
        help=
        'If set, the search part will be skipped and the program will assume the existance of updated GenBank files.'
    )

    args.add_argument(
        '--retrain',
        action='store_true',
        help=
        'If set, retrains original training sets, otherwise it extends what it finds in output directory.'
    )

    if len(argv[1:]) == 0:
        args.print_help()
        args.exit()

    try:
        args = args.parse_args()
    except:
        args.exit()

    if not args.infile and not args.indir:
        print('You have to provide input data by either --infile or --indir.')
        exit(1)

    # Create output directory
    if not path.isdir(args.outdir): makedirs(args.outdir)

    # groups of resulting training sets
    if args.groups:
        groups = read_groups(args.groups, args.indir)
        infiles = set()
        for i in groups.values():
            infiles.update(set(i))
        infiles = sorted(list(infiles))
        print('Working on %i input files based on group file.' % len(infiles))
    else:
        if args.indir:
            infiles = glob(path.join(args.indir, r'*.gb'))
            infiles += glob(path.join(args.indir, r'*.gb[kf]'))
            infiles += glob(path.join(args.indir, r'*.gbff'))
            infiles = sorted(infiles)
        else:
            infiles = [args.infile]
        groups = {'group%05d' % (i + 1): [f] for i, f in enumerate(infiles)}

    # create kmers for all input files and group them into host and phage sets
    print('Making kmers from input file(s).')
    if not path.isdir(args.outdir): makedirs(args.outdir)

    host_kmers = set()
    phage_kmers = set()
    for i, infile in enumerate(infiles):
        print('  Processing %i/%i: %s' %
              (i + 1, len(infiles), path.basename(infile)))
        ref_orfs_list, target_orf_list = read_genbank(infile)

        for i in ref_orfs_list:
            host_kmers.update(kmerize_orf(i, args.kmer_size, args.type))
        for i in target_orf_list:
            phage_kmers.update(kmerize_orf(i, args.kmer_size, args.type))

    # write unique phage kmers
    print('Writing phage_kmers_' + args.type + '_wohost.txt.')
    kmers_file = path.join(args.outdir,
                           'phage_kmers_' + args.type + '_wohost.txt')

    if path.isfile(kmers_file):
        if not args.retrain:
            print('  Reading %s.' % kmers_file)
            prev_kmers = read_kmers(kmers_file)
            phage_kmers.update(prev_kmers)
    else:
        print('  %s is missing - just making a new one.' % kmers_file)

    with open(kmers_file, 'w') as outf:
        outf.write('\n'.join(phage_kmers - host_kmers))

    # make all training groups
    print('Making trainSets for each input file.')
    # the following check will be removed after removing the requirement in PhiSpy's helper_functions.py
    if not path.isfile(
            path.join(path.dirname(path.dirname(path.realpath(__file__))),
                      'PhiSpyModules/data/trainSet_genericAll.txt')):
        with open(
                path.join(path.dirname(path.dirname(path.realpath(__file__))),
                          'PhiSpyModules/data/trainSet_genericAll.txt'),
                'w') as outf:
            outf.write('')
    trainsets_outdir = path.join(args.outdir, 'trainSets')
    if not path.isdir(trainsets_outdir): makedirs(trainsets_outdir)
    phispy = path.join(path.dirname(path.dirname(path.realpath(__file__))),
                       'PhiSpy.py')
    for infile in infiles:
        print('  Processing %s' % infile)
        cmd = [
            'python3', phispy, infile, '-o', trainsets_outdir, '-m',
            path.basename(infile) + '.trainSet'
        ]
        if args.phmms: cmd.extend(['--phmms', args.phmms, '-t', args.threads])
        if args.color: cmd.append('--color')
        if args.skip_search: cmd.append('--skip_search')
        # print(f'Calling: {" ".join(cmd)}')
        call(cmd)

    # create trainingGenome_list.txt
    print('Writing trainingGenome_list.txt.')
    tg_file = path.join(args.outdir, 'trainingGenome_list.txt')
    if args.retrain or not path.isfile(tg_file):
        with open(tg_file, 'w') as outf:
            outf.write('0\ttestSet_genericAll.txt\tGeneric Test Set\t%i\n' %
                       len(infiles))
            gcnt = 1
            for g, i in groups.items():
                outf.write('%i\ttrainSet_%s.txt\t%s\t%i\n' %
                           (gcnt, g, ';'.join([path.basename(x)
                                               for x in i]), len(i)))
                gcnt += 1
    else:
        with open(tg_file) as inf:
            train_sets = {}
            inf.readline()
            for line in inf:
                line = line.strip().split('\t')
                line[0] = int(line[0])
                line[-1] = int(line[-1])
                train_sets[line[1].rsplit('.', 1)[0]] = line

        for t, l in train_sets.items():
            t = t.split('_')[1]
            if t in groups and l[3] == len(groups[t]):
                groups.pop(t)
            elif t in groups:
                infiles = [path.basename(i) for i in groups.pop(t)]
                train_sets = [l[0], l[1], ';'.join(infiles), len(infiles)]

        gcnt = len(train_sets) + 1
        if len(groups) > 0:
            for g, infiles in groups.items():
                infiles = [path.basename(i) for i in infiles]
                train_sets[g] = [
                    gcnt,
                    'trainSet_%s.txt' % g, ';'.join(infiles),
                    len(infiles)
                ]
                gcnt += 1

        gsize = 0
        for t, l in train_sets.items():
            if l[3] == 1:
                gsize += 1

        with open(tg_file, 'w') as outf:
            outf.write('0\ttestSet_genericAll.txt\tGeneric Test Set\t%i\n' %
                       (gsize))
            for t in sorted(train_sets.values()):
                outf.write('\t'.join([str(x) for x in t]) + '\n')

    # make or extend genericAll.txt
    print('Making training sets.')
    for g, i in groups.items():
        with open(path.join(args.outdir, 'trainSet_%s.txt' % g), 'w') as outf:
            first = True
            for infile in i:
                trainset = path.join(trainsets_outdir,
                                     path.basename(infile) + '.trainSet')
                with open(trainset) as inf:
                    if not first: inf.readline()
                    outf.write(inf.read())
                    first = False

    if args.retrain:
        with open(path.join(args.outdir, 'trainSet_genericAll.txt'),
                  'w') as outf:
            first = True
            for infile in infiles:
                trainset = path.join(trainsets_outdir,
                                     path.basename(infile) + '.trainSet')
                with open(trainset) as inf:
                    if not first: inf.readline()
                    outf.write(inf.read())
                    first = False
    else:
        # read all testSets in directory and combine them into generic test set
        print(
            '*WARNING* - for updating generic train set only trainSets from single reference files are considered!'
        )
        with open(path.join(args.outdir, 'trainingGenome_list.txt')) as inf:
            with open(path.join(args.outdir, 'trainSet_genericAll.txt'),
                      'w') as outf:
                first = True
                for line in inf:
                    line = line.split()
                    if line[0] == '0': continue
                    if int(line[-1]) == 1:
                        trainset = path.join(args.outdir,
                                             path.basename(line[1]))
                        with open(trainset) as infts:
                            if not first: infts.readline()
                            outf.write(infts.read())
                            first = False

    print('Done!')
Example #47
0
def main():
    parent_parser = ArgumentParser(add_help=False)
    parent_parser.add_argument("--verbose",
                               "-v",
                               default=LogLevel.ERROR,
                               action="count",
                               help="Verbosity of output")
    root_parser = ArgumentParser()
    subparsers = root_parser.add_subparsers(description="Actions on k8s agent",
                                            dest="subaction")
    DeployAction.add_to_parser(
        subparsers.add_parser(
            "deploy",
            help="Deploy a set of SQL Servers in an Availability Group",
            parents=[parent_parser]))
    FailoverAction.add_to_parser(
        subparsers.add_parser("failover",
                              help="Perform a failover to a target replica.",
                              parents=[parent_parser]))

    args = root_parser.parse_args()
    # namespace defaults to AG name
    if args.namespace is None or args.namespace == "":
        args.namespace = args.ag

    log(LogLevel.INFO, "args:", vars(args))

    if not args.subaction:
        root_parser.print_help()
        exit(1)

    global log_verbosity
    log_verbosity = args.verbose

    if not args.obj.validate_args(args):
        print("action.validate_args failed:", args.obj, args)
        root_parser.print_help()
        exit(1)

    log(LogLevel.INFO, sys.argv[0], "Startup Time:", datetime.now(), "UTC:",
        datetime.utcnow())
    log(LogLevel.INFO)

    # create temp directory
    working_dir = mkdtemp(prefix="kube_agent_{}-".format(args.subaction),
                          suffix=(args.namespace))

    # Run action here
    exitcode, spec_paths = args.obj.run(args, working_dir)

    log(LogLevel.INFO, sys.argv[0], "Completion Time:", datetime.now(), "UTC:",
        datetime.utcnow())
    log(LogLevel.INFO, "----------")
    if spec_paths:
        log(LogLevel.ALL, "Created the following specs:")
        for path in spec_paths:
            log(LogLevel.ALL, "\t", path)
        log(LogLevel.ALL)
        try:
            specs_file = "{}_{}_specs".format(args.subaction, args.namespace)
            log(LogLevel.ALL, "Wrote spec paths:", "'{}'".format(specs_file))
            with open(specs_file, "w") as f:
                f.write("\n".join(spec_paths) + "\n")
        except IOError as ex:
            log(LogLevel.ALL, "Caught IOError:", ex, "writing to",
                "'{}'".format(specs_file))
    exit(exitcode)
Example #48
0
def parseArgs():
    """
    Parse the command line arguments
    """
    # Parser for all cases but one.
    parser = ArgumentParser()
    parser.add_argument('--no-color',
                        help=('disable colored output'),
                        action='store_true',
                        default=False)
    parser.add_argument('-v',
                        '--verbose',
                        help=('Print messages to stderr during execution (for '
                              'debugging); show all repositories, regardless '
                              'of changes; show directories in SYSGIT_PATH '
                              'that are not under version control'),
                        action='store_true',
                        default=False)
    subparsers = parser.add_subparsers(dest='function',
                                       help='help for subcommand')

    # { list }
    listParser = subparsers.add_parser('list',
                                       help=('list the status of the'
                                             'system\'s repositories'),
                                       formatter_class=RawTextHelpFormatter)
    # List arguments
    listParser.add_argument('-s',
                            '--submodules',
                            help=('list the status of the repository\'s '
                                  'submodules, if they\ncontain changes.'),
                            action='store_true',
                            default=False)
    listParser.add_argument('-b',
                            '--bugs',
                            help=('show "B" in the output if the repository '
                                  'contains a file\nnamed "bugs" in the top '
                                  'level directory (blue).'),
                            action='store_true',
                            default=False)

    listParser.add_argument('-p',
                            '--show-stash',
                            help=('show the number of entries in the '
                                  'repository\'s stash\n(yellow)'),
                            action='store_true',
                            default=False)
    listParser.add_argument('-r',
                            '--remotes',
                            help=("check the refs of remote branches against "
                                  "the local refs:\n"
                                  "  * 'uu': local is up to date w/ remote\n"
                                  "  * 'lr': local is behind remote\n"
                                  "  * 'rl': local is ahead of remote\n"
                                  "  * '<>': local and remote have diverged\n"
                                  "  * '  ': local has no remote branch\n"
                                  "  * '00': local has no commits yet"),
                            action='store_true',
                            default=False)
    listParser.add_argument('-a',
                            '--all',
                            help=('Same as -bspr'),
                            action='store_true',
                            default=False)

    # Print help if no arguments were given
    if len(sys.argv) < 2:
        parser.print_help()
        sys.exit()

    # Parse the arguments
    return parser.parse_args()
def main():
    # Set up the base command line parser.
    parser = ArgumentParser()
    parser.add_argument(
        '-i',
        '--redis-instances',
        dest='redis_instances',
        type=str,
        required=True,
        help='comma-delimited list of Redis instances to get stats from '
        '(port, host:port and unix:///path/to/socket formats are alowed')
    parser.add_argument(
        '-t',
        '--redis-type',
        dest='redis_type',
        type=str,
        required=True,
        choices=SUBJECTS.keys(),
        help='the type of the Redis instance to get stats from')
    subparsers = parser.add_subparsers(dest='command')

    # Set up 'send' command.
    subparser = subparsers.add_parser(
        'send', help='submit stats through Zabbix sender')
    subparser.add_argument(
        '-c',
        '--zabbix-config',
        dest='zabbix_config',
        type=str,
        required=False,
        default=None,
        help='the Zabbix agent configuration file to fetch the configuration '
        'from')
    subparser.add_argument(
        '-z',
        '--zabbix-server',
        dest='zabbix_server',
        type=str,
        required=False,
        default=None,
        help='hostname or IP address of the Zabbix server / Zabbix proxy')
    subparser.add_argument(
        '-p',
        '--zabbix-port',
        dest='zabbix_port',
        type=int,
        required=False,
        default=None,
        help='port number of server trapper running on the Zabbix server / '
        'Zabbix proxy')
    subparser.add_argument(
        '-s',
        '--zabbix-host',
        dest='zabbix_host',
        type=str,
        required=False,
        default=None,
        help='host name as registered in the Zabbix frontend')

    # Set up 'discover' command.
    subparser = subparsers.add_parser('discover',
                                      help='generate Zabbix discovery schema')
    subparser.add_argument('subject',
                           type=str,
                           help='dynamic resources to be discovered')

    # Parse command line arguments.
    options = parser.parse_args()

    # Check required arguments.
    if options.command == 'send':
        if options.zabbix_config is None and options.zabbix_server is None:
            parser.print_help()
            sys.exit(1)

    # Check subject to be discovered.
    if options.command == 'discover':
        subjects = SUBJECTS[options.redis_type].keys()
        if options.subject not in subjects:
            sys.stderr.write(
                'Invalid subject (choose from %(subjects)s)\n' % {
                    'subjects': ', '.join("'{0}'".format(s) for s in subjects),
                })
            sys.exit(1)

    # Execute command.
    globals()[options.command](options)
    sys.exit(0)
Example #50
0
def handle_arguments():
    if len(argv) > 1 and is_invalid_command():
        print("lgit: '%s' is not a lgit command. See './lgit.py --help'." %
              (argv[1]))
    else:
        # declare information what command is used
        # and description summary each purpose's command
        parser = ArgumentParser(
            prog='lgit',
            usage='./lgit.py <command> [optional] [<arg>]',
            description="Lgit is a lightweight version of git")
        commands = parser.add_subparsers(title='There are common ',
                                         description='Git commands used',
                                         prog='lgit',
                                         dest='command',
                                         metavar="command")
        init = commands.add_parser('init',
                                   usage='usage: ./lgit.py init [<directory>]',
                                   help='Create an empty Git repository'
                                   ' or reinitialize an existing one')
        init.add_argument('dest',
                          metavar='directory',
                          nargs='?',
                          help="Destination to create directories's lgit")

        add = commands.add_parser('add',
                                  usage='./lgit.py add <file> ...',
                                  help='Add file contents to the index')
        add.add_argument('file',
                         nargs='*',
                         help="file will be added to the index")

        commands.add_parser('status',
                            usage='./lgit status',
                            help='Show the working tree status')

        commit = commands.add_parser('commit',
                                     usage='./lgit.py commit -m <message>',
                                     help='Record changes to the repository')
        commit.add_argument('-m', dest='message', help='commit message')

        remove = commands.add_parser('rm',
                                     usage='./lgit.py rm <file > ...',
                                     help='Remove files from the '
                                     'working tree and from the index')
        remove.add_argument('file', nargs='*', help="file will be removed")

        config = commands.add_parser('config',
                                     usage='./lgit.py config --author <name>',
                                     help='Setting config lgit')

        config.add_argument('--author', help='store information author')

        commands.add_parser('ls-files',
                            usage='./lgit.py ls-files',
                            help='Show index file at current directory'
                            ' or subdirectory')

        commands.add_parser('log',
                            usage='./lgit.py log',
                            help='Show commit logs')
        branch = commands.add_parser('branch',
                                     usage='./lgit.py branch [<name>]',
                                     help='List or create branches')
        branch.add_argument('name', nargs='?', help="create a new branch")

        checkout = commands.add_parser('checkout',
                                       usage='./lgit.py checkout <branch>',
                                       help='Switch branches or restore '
                                       'working tree files')
        checkout.add_argument('branch', nargs='?', help="switched to branch")

        merge = commands.add_parser('merge',
                                    usage='./lgit.py merge <branch>',
                                    help='Join two or more development'
                                    ' histories together')
        merge.add_argument('branch',
                           nargs='?',
                           help="join branch into current branch")

        stash = commands.add_parser('stash',
                                    usage='./lgit.py stash [<apply>]',
                                    help='Save the current changes of current '
                                    'working directory')
        stash.add_argument('apply',
                           nargs='?',
                           help='Restore working directory')
        # get arguments from sys.argv
        args = parser.parse_args()

        # check valid argument or not
        # invalid --> show help
        if not args.command:
            parser.print_help()
        else:
            return args, parser
    exit()
Example #51
0
def parse_args():
    """
    Function that parses the args passed on the command line.
    :return:
    """
    logger = logging.getLogger('pbr')
    parser = ArgumentParser(description="""Plan B Recovery, if all else fails go to Plan B! 
                                        Plan B Recover comes with ABSOLUTELY NO WARRANTY.""")

    parser.add_argument("-c", "--check-facts", help="Check if the existing facts changed.", action='store_true')
    parser.add_argument("-b", "--backup", help="Create rescue media, and full system backup.", action='store_true')
    parser.add_argument("-bo", "--backup-only", help="Create backup archive only.", action='store_true')
    parser.add_argument("-f", "--facts", help="Print all the facts.", action='store_true')
    parser.add_argument("--format", help="Format the specified usb device.", action='store', type=str)
    parser.add_argument("-k", "--keep", help="Keep, don't remove temporary backup directory.", action='store_true')
    parser.add_argument("-m", "--mkrescue", help="Create rescue media only.", action='store_true')
    parser.add_argument("-r", "--recover", help="Recover system from backup.", action='store_true')
    parser.add_argument("-v", "--verbose", help="Add verbosity.", action='store_true')
    parser.add_argument("-ba", "--backup-archive", help="Specify the location of the backup archive to use on restore.",
                        action='store', type=str)
    parser.add_argument("-ro", "--restore-only", help="Restore backup archive only.", action='store_true')

    opts = parser.parse_args()

    if not opts.backup and not opts.recover and not opts.mkrescue and not opts.backup_only and not opts.check_facts:
        if not opts.facts and not opts.format:
            logger.error("Please provide a valid argument.")
            parser.print_help()
            exit(1)
    
    if (opts.backup or opts.backup_only) and opts.recover:
        logger.error("Choose either backup or recover not both.")
        parser.print_help()
        exit(1)

    if (opts.backup or opts.backup_only) and opts.mkrescue:
        logger.error("Choose either backup or mkrescue not both.")
        parser.print_help()
        exit(1)

    if opts.backup_archive and not opts.recover:
        logger.error("-bo/--backup-archive can only be specified when running recover.")
        parser.print_help()
        exit(1)

    if opts.restore_only and not opts.recover:
        logger.error("-ro/--restore-only can only be specified when running recover.")
        parser.print_help()
        exit(1)

    if opts.backup and opts.backup_only:
        logger.error("-bo/--backup-only can't be specified when -b/--backup is specified, and vice versa.")
        parser.print_help()
        exit(1)

    if opts.facts and (opts.backup or opts.backup_only or opts.recover or opts.restore_only):
        logger.error("-f/--facts can't be specified if backup or recover is specified also.")
        parser.print_help()
        exit(1)

    if opts.format and (opts.backup or opts.backup_only or opts.recover or opts.restore_only):
        logger.error("--format can't be specified if backup or recover is specified also.")
        parser.print_help()
        exit(1)

    return opts
Example #52
0
def main(argv=None):  # IGNORE:C0111
    '''Command line options.'''

    if argv is None:
        argv = sys.argv
    else:
        sys.argv.extend(argv)

    program_name = os.path.basename(sys.argv[0])
    program_version = "v%s" % __version__
    program_build_date = str(__updated__)
    program_version_message = '%%(prog)s %s (%s)' % (program_version,
                                                     program_build_date)
    program_shortdesc = 'cov19extract -- Tool for extraction of online article data about vaccination'
    program_license = '''%s

  Created by Momcilo Majic on %s.
  GNU GENERAL PUBLIC LICENSE Version 3

USAGE
''' % (program_shortdesc, str(__date__))

    try:
        executors = {}

        # Setup argument parser
        parser = ArgumentParser(description=program_license,
                                formatter_class=RawDescriptionHelpFormatter)
        common_arguments_parser = ArgumentParser(add_help=False)
        common_arguments_parser.add_argument(
            '--dry-run',
            action='store_true',
            help=
            'designates if dry run is to be executed. The tool should not change anything. Warning: this feaature is still not fully implemented!'
        )
        common_arguments_parser.add_argument(
            "-v",
            "--verbose",
            dest="verbose",
            action="count",
            help="set verbosity level [default: %(default)s]")
        subparsers = parser.add_subparsers(help='sub-command help',
                                           dest="subparser_name")

        parser_process_direct_url = subparsers.add_parser(
            'processDirectURL',
            help='attempts to parse content at directly specified URL.',
            parents=[common_arguments_parser])
        parser_process_direct_url.add_argument(
            '--url',
            required=True,
            help=
            'url to be processed, tool will determine if it is able to parse it.'
        )
        parser_process_direct_url.add_argument(
            '--template',
            default='human',
            required=False,
            help=
            'template generator (default is %(default)s) to be used for output',
            choices=[current.name for current in TemplateFactory.templates])
        executors[parser_process_direct_url.prog.split(' ')
                  [-1]] = process_direct_url

        parser.add_argument('-V',
                            '--version',
                            action='version',
                            version=program_version_message)

        # Process arguments
        args = parser.parse_args()
        if args.subparser_name:
            executor = executors[args.subparser_name]
            # execute command
            return executor(args)
        else:
            parser.print_help()
    except KeyboardInterrupt:
        ### handle keyboard interrupt ###
        return 0
Example #53
0
def main():
    # bootstrap the logger with defaults
    logger.configure()

    if os.name == "posix":
        if os.geteuid() == 0:
            logging.warning(
                "You are running Overviewer as root. "
                "It is recommended that you never do this, "
                "as it is dangerous for your system. If you are running "
                "into permission errors, fix your file/directory "
                "permissions instead. Overviewer does not need access to "
                "critical system resources and therefore does not require "
                "root access.")

    try:
        cpus = multiprocessing.cpu_count()
    except NotImplementedError:
        cpus = 1

    avail_north_dirs = [
        'lower-left', 'upper-left', 'upper-right', 'lower-right', 'auto'
    ]

    # Parse for basic options
    parser = ArgumentParser(usage=helptext)
    parser.add_argument("-c",
                        "--config",
                        dest="config",
                        action="store",
                        help="Specify the config file to use.")
    parser.add_argument(
        "-p",
        "--processes",
        dest="procs",
        action="store",
        type=int,
        help="The number of local worker processes to spawn. Defaults to the "
        "number of CPU cores your computer has.")

    parser.add_argument("--pid",
                        dest="pid",
                        action="store",
                        help="Specify the pid file to use.")
    # Options that only apply to the config-less render usage
    parser.add_argument(
        "--rendermodes",
        dest="rendermodes",
        action="store",
        help="If you're not using a config file, specify which rendermodes to "
        "render with this option. This is a comma-separated list.")
    parser.add_argument("world",
                        nargs='?',
                        help="Path or name of the world you want to render.")
    parser.add_argument("output",
                        nargs='?',
                        help="Output directory for the rendered map.")

    # Useful one-time render modifiers:
    render_modifiers = parser.add_mutually_exclusive_group()
    render_modifiers.add_argument("--forcerender",
                                  dest="forcerender",
                                  action="store_true",
                                  help="Force re-render the entire map.")
    render_modifiers.add_argument(
        "--check-tiles",
        dest="checktiles",
        action="store_true",
        help="Check each tile on disk and re-render old tiles.")
    render_modifiers.add_argument(
        "--no-tile-checks",
        dest="notilechecks",
        action="store_true",
        help="Only render tiles that come from chunks that have changed "
        "since the last render (the default).")

    # Useful one-time debugging options:
    parser.add_argument(
        "--check-terrain",
        dest="check_terrain",
        action="store_true",
        help="Try to locate the texture files. Useful for debugging texture"
        " problems.")
    parser.add_argument("-V",
                        "--version",
                        dest="version",
                        help="Display version information and then exits.",
                        action="store_true")
    parser.add_argument(
        "--check-version",
        dest="checkversion",
        help="Fetch information about the latest version of Overviewer.",
        action="store_true")
    parser.add_argument(
        "--update-web-assets",
        dest='update_web_assets',
        action="store_true",
        help="Update web assets. Will *not* render tiles or update "
        "overviewerConfig.js.")

    # Log level options:
    parser.add_argument(
        "-q",
        "--quiet",
        dest="quiet",
        action="count",
        default=0,
        help="Print less output. You can specify this option multiple times.")
    parser.add_argument(
        "-v",
        "--verbose",
        dest="verbose",
        action="count",
        default=0,
        help="Print more output. You can specify this option multiple times.")
    parser.add_argument(
        "--simple-output",
        dest="simple",
        action="store_true",
        default=False,
        help="Use a simple output format, with no colors or progress bars.")

    # create a group for "plugin exes"
    # (the concept of a plugin exe is only loosely defined at this point)
    exegroup = parser.add_argument_group(
        "Other Scripts", "These scripts may accept different "
        "arguments than the ones listed above.")
    exegroup.add_argument("--genpoi",
                          dest="genpoi",
                          action="store_true",
                          help="Run the genPOI script.")
    exegroup.add_argument("--skip-scan",
                          dest="skipscan",
                          action="store_true",
                          help="When running GenPOI, don't scan for entities.")
    exegroup.add_argument("--skip-players",
                          dest="skipplayers",
                          action="store_true",
                          help="When running GenPOI, don't scan player data.")

    args, unknowns = parser.parse_known_args()

    # Check for possible shell quoting issues
    if len(unknowns) > 0:
        possible_mistakes = []
        for i in xrange(len(unknowns) + 1):
            possible_mistakes.append(" ".join([args.world, args.output] +
                                              unknowns[:i]))
            possible_mistakes.append(" ".join([args.output] + unknowns[:i]))
        for mistake in possible_mistakes:
            if os.path.exists(mistake):
                logging.warning(
                    "Looks like you tried to make me use {0} as an argument, but "
                    "forgot to quote the argument correctly. Try using \"{0}\" "
                    "instead if the spaces are part of the path.".format(
                        mistake))
                parser.error("Too many arguments.")
        parser.error("Too many arguments.")

    # first thing to do is check for stuff in the exegroup:
    if args.genpoi:
        # remove the "--genpoi" option from sys.argv before running genPI
        sys.argv.remove("--genpoi")
        g = __import__("overviewer_core.aux_files", {}, {}, ["genPOI"])
        g.genPOI.main()
        return 0

    # re-configure the logger now that we've processed the command line options
    logger.configure(logging.INFO + 10 * args.quiet - 10 * args.verbose,
                     verbose=args.verbose > 0,
                     simple=args.simple)

    ##########################################################################
    # This section of main() runs in response to any one-time options we have,
    # such as -V for version reporting
    if args.version:
        print("Minecraft Overviewer %s" % util.findGitVersion() +
              " (%s)" % util.findGitHash()[:7])
        try:
            import overviewer_core.overviewer_version as overviewer_version
            print("built on %s" % overviewer_version.BUILD_DATE)
            if args.verbose > 0:
                print("Build machine: %s %s" %
                      (overviewer_version.BUILD_PLATFORM,
                       overviewer_version.BUILD_OS))
                print("Read version information from %r" %
                      overviewer_version.__file__)
        except ImportError:
            print("(build info not found)")
        if args.verbose > 0:
            print("Python executable: %r" % sys.executable)
            print(sys.version)
        if not args.checkversion:
            return 0
    if args.checkversion:
        print("Currently running Minecraft Overviewer %s" %
              util.findGitVersion() + " (%s)" % util.findGitHash()[:7])
        try:
            import urllib
            import json
            latest_ver = json.loads(
                urllib.urlopen(
                    "http://overviewer.org/download.json").read())['src']
            print("Latest version of Minecraft Overviewer %s (%s)" %
                  (latest_ver['version'], latest_ver['commit'][:7]))
            print("See https://overviewer.org/downloads for more information.")
        except Exception:
            print("Failed to fetch latest version info.")
            if args.verbose > 0:
                import traceback
                traceback.print_exc()
            else:
                print("Re-run with --verbose for more details.")
            return 1
        return 0

    if args.pid:
        if os.path.exists(args.pid):
            try:
                with open(args.pid, 'r') as fpid:
                    pid = int(fpid.read())
                    if util.pid_exists(pid):
                        print(
                            "Overviewer is already running (pid exists) - exiting."
                        )
                        return 0
            except (IOError, ValueError):
                pass
        with open(args.pid, "w") as f:
            f.write(str(os.getpid()))
    # if --check-terrain was specified, but we have NO config file, then we cannot
    # operate on a custom texture path.  we do terrain checking with a custom texture
    # pack later on, after we've parsed the config file
    if args.check_terrain and not args.config:
        import hashlib
        from overviewer_core.textures import Textures
        tex = Textures()

        logging.info("Looking for a few common texture files...")
        try:
            f = tex.find_file(
                "assets/minecraft/textures/block/sandstone_top.png",
                verbose=True)
            f = tex.find_file(
                "assets/minecraft/textures/block/grass_block_top.png",
                verbose=True)
            f = tex.find_file(
                "assets/minecraft/textures/block/diamond_ore.png",
                verbose=True)
            f = tex.find_file(
                "assets/minecraft/textures/block/acacia_planks.png",
                verbose=True)
        except IOError:
            logging.error("Could not find any texture files.")
            return 1

        return 0

    # if no arguments are provided, print out a helpful message
    if not (args.world and args.output) and not args.config:
        # first provide an appropriate error for bare-console users
        # that don't provide any options
        if util.is_bare_console():
            print("\n")
            print(
                "The Overviewer is a console program.  Please open a Windows command prompt"
            )
            print(
                "first and run Overviewer from there.   Further documentation is available at"
            )
            print("http://docs.overviewer.org/\n")
            print("\n")
            print(
                "For a quick-start guide on Windows, visit the following URL:\n"
            )
            print(
                "http://docs.overviewer.org/en/latest/win_tut/windowsguide/\n")

        else:
            # more helpful message for users who know what they're doing
            logging.error(
                "You must either specify --config or give me a world directory "
                "and output directory.")
            parser.print_help()
            list_worlds()
        return 1

    ##########################################################################
    # This section does some sanity checking on the command line options passed
    # in. It checks to see if --config was given that no worldname/destdir were
    # given, and vice versa
    if args.config and (args.world and args.output):
        print()
        print(
            "If you specify --config, you need to specify the world to render as well as "
            "the destination in the config file, not on the command line.")
        print("Put something like this in your config file:")
        print("worlds['myworld'] = %r" % args[0])
        print("outputdir = %r" %
              (args[1] if len(args) > 1 else "/path/to/output"))
        print()
        logging.error(
            "You cannot specify both --config AND a world + output directory on the "
            "command line.")
        parser.print_help()
        return 1

    if not args.config and (args.world or
                            args.output) and not (args.world and args.output):
        logging.error(
            "You must specify both the world directory and an output directory"
        )
        parser.print_help()
        return 1

    #########################################################################
    # These two halfs of this if statement unify config-file mode and
    # command-line mode.
    mw_parser = configParser.MultiWorldParser()

    if not args.config:
        # No config file mode.
        worldpath, destdir = map(os.path.expanduser, [args.world, args.output])
        logging.debug("Using %r as the world directory", worldpath)
        logging.debug("Using %r as the output directory", destdir)

        mw_parser.set_config_item("worlds", {'world': worldpath})
        mw_parser.set_config_item("outputdir", destdir)

        rendermodes = ['lighting']
        if args.rendermodes:
            rendermodes = args.rendermodes.replace("-", "_").split(",")

        # Now for some good defaults
        renders = util.OrderedDict()
        for rm in rendermodes:
            renders["world-" + rm] = {
                "world": "world",
                "title": "Overviewer Render (%s)" % rm,
                "rendermode": rm,
            }
        mw_parser.set_config_item("renders", renders)

    else:
        if args.rendermodes:
            logging.error(
                "You cannot specify --rendermodes if you give a config file. "
                "Configure your rendermodes in the config file instead.")
            parser.print_help()
            return 1

        # Parse the config file
        try:
            mw_parser.parse(os.path.expanduser(args.config))
        except configParser.MissingConfigException as e:
            # this isn't a "bug", so don't print scary traceback
            logging.error(str(e))
            util.nice_exit(1)

    # Add in the command options here, perhaps overriding values specified in
    # the config
    if args.procs:
        mw_parser.set_config_item("processes", args.procs)

    # Now parse and return the validated config
    try:
        config = mw_parser.get_validated_config()
    except Exception as ex:
        if args.verbose:
            logging.exception(
                "An error was encountered with your configuration. "
                "See the information below.")
        else:  # no need to print scary traceback!
            logging.error("An error was encountered with your configuration.")
            logging.error(str(ex))
        return 1

    if args.check_terrain:  # we are already in the "if configfile" branch
        logging.info("Looking for a few common texture files...")
        for render_name, render in config['renders'].iteritems():
            logging.info("Looking at render %r.", render_name)

            # find or create the textures object
            texopts = util.dict_subset(render, ["texturepath"])

            tex = textures.Textures(**texopts)
            f = tex.find_file(
                "assets/minecraft/textures/block/sandstone_top.png",
                verbose=True)
            f = tex.find_file(
                "assets/minecraft/textures/block/grass_block_top.png",
                verbose=True)
            f = tex.find_file(
                "assets/minecraft/textures/block/diamond_ore.png",
                verbose=True)
            f = tex.find_file("assets/minecraft/textures/block/oak_planks.png",
                              verbose=True)
        return 0

    ############################################################
    # Final validation steps and creation of the destination directory
    logging.info("Welcome to Minecraft Overviewer!")
    logging.debug("Current log level: {0}.".format(logging.getLogger().level))

    def set_renderchecks(checkname, num):
        for name, render in config['renders'].iteritems():
            if render.get('renderchecks', 0) == 3:
                logging.warning(checkname + " ignoring render " + repr(name) +
                                " since it's "
                                "marked as \"don't render\".")
            else:
                render['renderchecks'] = num

    if args.forcerender:
        logging.info("Forcerender mode activated. ALL tiles will be rendered.")
        set_renderchecks("forcerender", 2)
    elif args.checktiles:
        logging.info("Checking all tiles for updates manually.")
        set_renderchecks("checktiles", 1)
    elif args.notilechecks:
        logging.info("Disabling all tile mtime checks. Only rendering tiles "
                     "that need updating since last render.")
        set_renderchecks("notilechecks", 0)

    if not config['renders']:
        logging.error(
            "You must specify at least one render in your config file. Check the "
            "documentation at http://docs.overviewer.org if you're having trouble."
        )
        return 1

    #####################
    # Do a few last minute things to each render dictionary here
    for rname, render in config['renders'].iteritems():
        # Convert render['world'] to the world path, and store the original
        # in render['worldname_orig']
        try:
            worldpath = config['worlds'][render['world']]
        except KeyError:
            logging.error(
                "Render %s's world is '%s', but I could not find a corresponding entry "
                "in the worlds dictionary.", rname, render['world'])
            return 1
        render['worldname_orig'] = render['world']
        render['world'] = worldpath

        # If 'forcerender' is set, change renderchecks to 2
        if render.get('forcerender', False):
            render['renderchecks'] = 2

        # check if overlays are set, if so, make sure that those renders exist
        if render.get('overlay', []) != []:
            for x in render.get('overlay'):
                if x != rname:
                    try:
                        renderLink = config['renders'][x]
                    except KeyError:
                        logging.error(
                            "Render %s's overlay is '%s', but I could not find a "
                            "corresponding entry in the renders dictionary.",
                            rname, x)
                        return 1
                else:
                    logging.error("Render %s's overlay contains itself.",
                                  rname)
                    return 1

    destdir = config['outputdir']
    if not destdir:
        logging.error(
            "You must specify the output directory in your config file.")
        logging.error("e.g. outputdir = '/path/to/outputdir'")
        return 1
    if not os.path.exists(destdir):
        try:
            os.mkdir(destdir)
        except OSError:
            logging.exception("Could not create the output directory.")
            return 1

    ########################################################################
    # Now we start the actual processing, now that all the configuration has
    # been gathered and validated
    # create our asset manager... ASSMAN
    assetMrg = assetmanager.AssetManager(destdir,
                                         config.get('customwebassets', None))

    # If we've been asked to update web assets, do that and then exit
    if args.update_web_assets:
        assetMrg.output_noconfig()
        logging.info("Web assets have been updated.")
        return 0

    # The changelist support.
    changelists = {}
    for render in config['renders'].itervalues():
        if 'changelist' in render:
            path = render['changelist']
            if path not in changelists:
                out = open(path, "w")
                logging.debug("Opening changelist %s (%s).", out, out.fileno())
                changelists[path] = out
            else:
                out = changelists[path]
            render['changelist'] = out.fileno()

    tilesets = []

    # saves us from creating the same World object over and over again
    worldcache = {}
    # same for textures
    texcache = {}

    # Set up the cache objects to use
    caches = []
    caches.append(cache.LRUCache(size=100))
    # TODO: optionally more caching layers here

    renders = config['renders']
    for render_name, render in renders.iteritems():
        logging.debug("Found the following render thing: %r", render)

        # find or create the world object
        try:
            w = worldcache[render['world']]
        except KeyError:
            try:
                w = world.World(render['world'])
            except CorruptNBTError as e:
                logging.error("Failed to open world %r.", render['world'])
                raise e
            except world.UnsupportedVersion as e:
                for ln in str(e).split('\n'):
                    logging.error(ln)
                sys.exit(1)

            worldcache[render['world']] = w

        # find or create the textures object
        texopts = util.dict_subset(
            render, ["texturepath", "bgcolor", "northdirection"])
        texopts_key = tuple(texopts.items())
        if texopts_key not in texcache:
            tex = textures.Textures(**texopts)
            logging.info("Generating textures...")
            tex.generate()
            logging.debug("Finished generating textures.")
            texcache[texopts_key] = tex
        else:
            tex = texcache[texopts_key]

        try:
            logging.debug("Asking for regionset %r." % render['dimension'][1])
            rset = w.get_regionset(render['dimension'][1])
        except IndexError:
            logging.error(
                "Sorry, I can't find anything to render!  Are you sure there are .mca "
                "files in the world directory?")
            return 1
        if rset is None:  # indicates no such dimension was found
            logging.warn(
                "Sorry, you requested dimension '%s' for %s, but I couldn't find it.",
                render['dimension'][0], render_name)
            continue

        #################
        # Apply any regionset transformations here

        # Insert a layer of caching above the real regionset. Any world
        # tranformations will pull from this cache, but their results will not
        # be cached by this layer. This uses a common pool of caches; each
        # regionset cache pulls from the same underlying cache object.
        rset = world.CachedRegionSet(rset, caches)

        # If a crop is requested, wrap the regionset here
        if "crop" in render:
            rsets = []
            for zone in render['crop']:
                rsets.append(world.CroppedRegionSet(rset, *zone))
        else:
            rsets = [rset]

        # If this is to be a rotated regionset, wrap it in a RotatedRegionSet
        # object
        if (render['northdirection'] > 0):
            newrsets = []
            for r in rsets:
                r = world.RotatedRegionSet(r, render['northdirection'])
                newrsets.append(r)
            rsets = newrsets

        ###############################
        # Do the final prep and create the TileSet object

        # create our TileSet from this RegionSet
        tileset_dir = os.path.abspath(os.path.join(destdir, render_name))

        # only pass to the TileSet the options it really cares about
        render[
            'name'] = render_name  # perhaps a hack. This is stored here for the asset manager
        tileSetOpts = util.dict_subset(render, [
            "name", "imgformat", "renderchecks", "rerenderprob", "bgcolor",
            "defaultzoom", "imgquality", "imglossless", "optimizeimg",
            "rendermode", "worldname_orig", "title", "dimension", "changelist",
            "showspawn", "overlay", "base", "poititle", "maxzoom",
            "showlocationmarker", "minzoom"
        ])
        tileSetOpts.update({"spawn": w.find_true_spawn()
                            })  # TODO find a better way to do this
        for rset in rsets:
            tset = tileset.TileSet(w, rset, assetMrg, tex, tileSetOpts,
                                   tileset_dir)
            tilesets.append(tset)

    # If none of the requested dimenstions exist, tilesets will be empty
    if not tilesets:
        logging.error(
            "There are no tilesets to render! There's nothing to do, so exiting."
        )
        return 1

    # Do tileset preprocessing here, before we start dispatching jobs
    logging.info("Preprocessing...")
    for ts in tilesets:
        ts.do_preprocessing()

    # Output initial static data and configuration
    assetMrg.initialize(tilesets)

    # multiprocessing dispatcher
    if config['processes'] == 1:
        dispatch = dispatcher.Dispatcher()
    else:
        dispatch = dispatcher.MultiprocessingDispatcher(
            local_procs=config['processes'])
    dispatch.render_all(tilesets, config['observer'])
    dispatch.close()

    assetMrg.finalize(tilesets)

    for out in changelists.itervalues():
        logging.debug("Closing %s (%s).", out, out.fileno())
        out.close()

    if config['processes'] == 1:
        logging.debug("Final cache stats:")
        for c in caches:
            logging.debug("\t%s: %s hits, %s misses", c.__class__.__name__,
                          c.hits, c.misses)
    if args.pid:
        os.remove(args.pid)

    logging.info(
        "Your render has been written to '%s', open index.html to view it." %
        destdir)

    return 0
def parse_cmd_arguments(args):
    """ Parses the command line arguments

    Arguments:
        args {list} -- argument list from command line

    Returns:
        ArgumentParser.parse_args
    """
    parser = ArgumentParser(description="HPNorton Database Operations")
    parser.add_argument(
        "--all-products",
        help="Show list of all products",
        action="store_true",
        required=False,
        default=False,
    )
    parser.add_argument(
        "--all-customers",
        help="Show list of all customers",
        action="store_true",
        required=False,
        default=False,
    )
    parser.add_argument(
        "--all-rentals",
        help="Show list of all rentals",
        action="store_true",
        required=False,
        default=False,
    )
    parser.add_argument(
        "--available-products",
        help="Show list of available products",
        action="store_true",
        required=False,
        default=False,
    )
    parser.add_argument(
        "--drop-collections",
        help="Drop customers, product, and rental collections",
        action="store_true",
        required=False,
        default=False,
    )
    parser.add_argument(
        "--drop-database",
        help="Drop database",
        action="store_true",
        required=False,
        default=False,
    )
    parser.add_argument(
        "--disable-log",
        help="Disable logging",
        action="store_true",
        required=False,
        default=False,
    )
    parser.add_argument(
        "--rentals-for-customer",
        metavar="USER_ID",
        help="Show rentals for specified user_id",
        action="store",
        required=False,
        default=False,
    )
    parser.add_argument(
        "--customers-renting-product",
        help="Show customers renting the specified product_id",
        metavar="PRODUCT_ID",
        action="store",
        required=False,
        default=False,
    )
    parser.add_argument(
        "--parallel",
        help="Loads csv files from specified data directory",
        metavar=("--parallel", "file1 file2 file3 ..."),
        action="store",
        required=False,
        default=False,
        nargs="*",
    )
    parser.add_argument(
        "--linear",
        help="Loads csv files from specified data directory",
        metavar=("--linear", "file1 file2 file3 ..."),
        action="store",
        required=False,
        default=False,
        nargs="*",
    )
    if len(sys.argv) == 1:
        parser.print_help(sys.stderr)
        sys.exit(1)

    return parser.parse_args(args)
Example #55
0
def argypargy():
    parser = ArgumentParser(description='TransloCapture -1 input_read1.fastq -2 input_read2.fastq -o output.csv')
    req_args = parser.add_argument_group('Required arguments')
    add_args = parser.add_argument_group('Additional arguments')
    req_args.add_argument("--input", "-i", help="Input fastq file for SR sequencing")
    req_args.add_argument("--read1", "-1", help="Fastq read 1 from PE sequencing")
    req_args.add_argument("--read2", "-2", help="Fastq read 2 from PE sequencing")
    req_args.add_argument("--output", "-o", help="Output file to write to, format is csv")
    add_args.add_argument("--control", "-c", help="The fastq you want to normalise to (e.g. untreated).\nIf unspecified, will not normalise.")
    add_args.add_argument("--control1", "-c1", help="Read 1 of the fastq you want to normalise to (e.g. untreated).\nIf unspecified, will not normalise.")
    add_args.add_argument("--control2", "-c2", help="Read 2 of the fastq you want to normalise to (e.g. untreated).\nIf unspecified, will not normalise.")
    req_args.add_argument("--primers", "-p", help="A 3 column .csv file of the name, foward primer sequence and reverse primer sequence (reverse complement) for each site to be analysed.")
    add_args.add_argument("--preproc", "-pp", help="If specified, --input (-i) and --control (-c) must be already quantified TransloCapture matrices.\nOutput will be a new matrix that is the differential of input-control.", action='store_true')
    add_args.add_argument("--translocated", "-t", help="Fastq file to write translocated sequences to.\n If unspecified, will not write")
    add_args.add_argument("--translocated1", "-t1", help="Fastq file to write read1 of translocated sequences to.\n If unspecified, will not write.")
    add_args.add_argument("--translocated2", "-t2", help="Fastq file to write read2 of translocated sequences to.\n If unspecified, will not write.")
    add_args.add_argument("--fastqout", "-fq", help="Fastq file to write non-translocated sequences to.\n If unspecified, will not write")
    add_args.add_argument("--fastqout1", "-fq1", help="Fastq file to write read1 of non-translocated sequences to.\n If unspecified, will not write.")
    add_args.add_argument("--fastqout2", "-fq2", help="Fastq file to write read2 of non-translocated sequences to.\n If unspecified, will not write.")
    add_args.add_argument("--sensitivity", "-s", help="Pads window at start of reads for identify the primer used for amplification.\nLarger numbers increase detection, but reduce specificity. default=2, max=10.", default=2)
    add_args.add_argument("--quiet", "-q", help="Removes all messages.", action='store_true')
    args = parser.parse_args()
    if len(sys.argv)==1: # If no arguments are given, print help information.
        parser.print_help()
        sys.exit()
    if args.input is None and args.read1 is None: # Input file is required
        print("\nTransloCapture ERROR: Please provide an input file with --input (-i) or with --read1 and --read2 (-1 -2) for PE seqeuencing.\n")
        sys.exit()
    if args.output is None: # Output file is required
        print("\nTransloCapture ERROR: Please provide an output file with --output (-o).\n")
        sys.exit()
    if args.input is not None and args.read1 is not None: # Don't crossover the SR and PE options
        print("\nTransloCapture ERROR: --input (-i) is for single-read sequencing and --read1 --read2 (-1 -2) are for PE seqeuencing. They cannot be used together.\n")
        sys.exit()
    if args.control is not None and args.control1 is not None: # Don't crossover the SR and PE options, control edition
        print("\nTransloCapture ERROR: --control (-c) is for single-read sequencing and --control1 --control2 (-c1 -c2) are for PE seqeuencing. They cannot be used together.\n")
        sys.exit()                
    if args.read1 is not None and args.read2 is None or args.read1 is None and args.read2 is not None: # Need both reads specified for PE
        print("\nTransloCapture ERROR: If --read1 (-1) or --read2 (-2) are specified you must also supply the other. For single read sequencing use --input (-i) instead.\n")
        sys.exit()
    if args.control1 is not None and args.control2 is None or args.control1 is None and args.control2 is not None: # Need both reads specified for PE, control edition
        print("\nTransloCapture ERROR: If --control1 (-c1) or --control2 (-c2) are specified you must also supply the other. For single read sequencing use --control (-c) instead.\n")
        sys.exit()
    if args.read1 is not None and args.control is not None or args.input is not None and args.control1 is not None: # Don't crossover the SR and PE options, mix match addition
        print("\nTransloCapture ERROR: --read1 (-1)/--control1 (-c1) must be used alongside each other, not alongside --control (-c)/--input (-i).\n")
        sys.exit()
    if args.translocated1 is not None and args.read1 is None or args.translocated is not None and args.input is None: # Don't crossover the SR and PE options, fastq output edition
        print("\nTransloCapture ERROR: To write translocated reads, for single-read sequencing use --input (-i) and --translocated (-t), for PE sequencinig use --read1 --read2 (-1 -2) and --translocated1 --translocated2 (-t1 -t2). Don't mix and match.\n")
        sys.exit()
    if args.translocated1 is not None and args.translocated2 is None or args.translocated1 is None and args.translocated2 is not None: # Need both reads specified for PE, fastq output edition
        print("\nTransloCapture ERROR: If --translocated1 (-t1) or --translocated2 (-t2) are specified you must also supply the other. For single read sequencing use --input (-i) instead.\n")
        sys.exit()
    if args.fastqout1 is not None and args.read1 is None or args.fastqout is not None and args.input is None: # Don't crossover the SR and PE options, fastq output edition
        print("\nTransloCapture ERROR: To write reads, for single-read sequencing use --input (-i) and --fastqout (-fq), for PE sequencinig use --read1 --read2 (-1 -2) and --fastqout1 --fastqout2 (-fq1 -fq2). Don't mix and match.\n")
        sys.exit()
    if args.fastqout1 is not None and args.fastqout2 is None or args.fastqout1 is None and args.fastqout2 is not None: # Need both reads specified for PE, fastq output edition
        print("\nTransloCapture ERROR: If --fastqout1 (-fq1) or --fastqout2 (-fq2) are specified you must also supply the other. For single read sequencing use --input (-i) instead.\n")
        sys.exit()
    if args.preproc == False and args.input is not None and args.input.endswith(".csv"): # .csv input suggests they want --preproc
        print("\nDetected translocation matrix.csv input instead of fastq, activating --preproc (-pp).\n")
        args.preproc = True
    if args.preproc == True and args.control is None: # Preproc needs a control
        print("\nTransloCapture ERROR: --preproc (-pp) also needs --control (-c) to calculate a differential to the --input (-i) sample.\n")
        sys.exit()
    if args.preproc == True and args.read1 is not None: # Need to use SR options for preproc
        print("\nTransloCapture ERROR: --read1/2 (-1/2) and --control1/2 (-c1/2) are for paired fastq files.\nPlease use --input (-i) and --control (-c) with --preproc (-pp).\n")
        sys.exit()        
    if args.preproc == True and args.translocated is not None or args.preproc == True and args.translocated1 is not None: # Need to use SR options for preproc
        print("\nTransloCapture ERROR: --preproc (-pp) cannot be used alongside --translocated (-t) or --translocated1/2 (-t1/2) because no fastq is being analysed with preproc.\n")
        sys.exit()
    if args.preproc == True and args.fastqout is not None or args.preproc == True and args.fastqout2 is not None: # Need to use SR options for preproc
        print("\nTransloCapture ERROR: --preproc (-pp) cannot be used alongside --fastqout (-fq) or --fastqout1/2 (-fq1/2) because no fastq is being analysed with preproc.\n")
        sys.exit()  
    if args.primers is None and args.preproc is None: # Need primer sequences unless using preproc
        print("\nTransloCapture ERROR: --primers (-p) is needed to identify translocated sequences in fastq files.\n")
        sys.exit()
    try:
        args.sensitivity = int(args.sensitivity)
    except ValueError:
        print("\nTransloCapture ERROR: --sensitivity (-s) must be a number from 0-10.\n")
        sys.exit()
    if args.sensitivity < 0: # Sensitivity below zero error
        print("\nTransloCapture ERROR: --sensitivity (-s) must be a number from 0-10.\n")
        sys.exit()
    elif 10 >= args.sensitivity > 5: # Low specificity warning for sensitivity > 5
        print("\nTransloCapture WARNING: --sensitivity (-s) greater than 5 greatly reduces specificity of the analysis.\n")
    elif args.sensitivity > 10: # Sensitivity limit warning
        print("\nTransloCapture WARNING: --sensitivity (-s) must not exceed 10, TransloCapture will run with a sensitivity of 10.\n")
        args.sensitivity = 10
    return(args)
Example #56
0
def main():
    ''' will make an hdf5 database using the ifeat/ofeats at featDir '''

    parser = ArgumentParser()
    parser.add_argument("configPath", help="Path tp config_db file")
    args = parser.parse_args()

    if args.configPath is None:
        print("\n\nRequired arguments not set !")
        parser.print_help()
        print("Exiting ... \n\n")
        sys.exit(1)

    cfg = loadConfig(args.configPath)

    if cfg['shuffle']:
        print("Shuffling On")
    else:
        print("Shuffling Off")

    if cfg['normalize']:
        print("Will normalize values to between 0 and 1.0")
    else:
        print("Pixel values will be between 0-225")

    if cfg['compress']:
        print("Will apply GZip Compression\n")
    else:
        print("No compression applied\n")

    # creating directories
    saveDir = os.path.join(cfg['save_dir'], cfg['save_name'])
    savePath = os.path.join(saveDir, cfg['save_name'])
    ensureDir(savePath)

    saveConfig(args.configPath, saveDir)

    print("\nWill use images from : ")
    print(cfg['image_sets'])

    imgPaths = []
    for imgSet in cfg['image_sets']:
        imgPaths.extend(sorted(glob.glob(os.path.join(imgSet, '*'))))
    imgPaths = np.array(imgPaths)

    if cfg['kfold'] == 0:
        createHDF5(imgPaths,
                   cfg,
                   savePath + '.hdf5',
                   shuffle=cfg['shuffle'],
                   flushSize=cfg['flush_size'],
                   compress=cfg['compress'],
                   normalize=cfg['normalize'])
    else:
        print("Creating database for %d folds ...\n" % int(cfg['kfold']))
        createKFoldHDF5(cfg['kfold'],
                        imgPaths,
                        cfg,
                        saveDir,
                        cfg['save_name'],
                        shuffle=cfg['shuffle'],
                        flushSize=int(cfg['flush_size']),
                        compress=cfg['compress'],
                        normalize=cfg['normalize'])
Example #57
0
def main(argv):

    for i, arg in enumerate(argv):
        if (arg[0] == '-') and arg[1].isdigit(): argv[i] = ' ' + arg

    parser = ArgumentParser(
        description='AstronZ: tools to analyse astronomical data'
        '|n version {:s} |n install path {:s} |n '
        'Filippo Maccagni <*****@*****.**>'.format(
            __version__, os.path.dirname(__file__)),
        formatter_class=MultilineFormatter,
        add_help=False)

    add = parser.add_argument

    add("-h",
        "--help",
        action="store_true",
        help="Print help message and exit")

    add("-v",
        "--version",
        action='version',
        version='{:s} version {:s}'.format(parser.prog, __version__))

    add('-c',
        '--cosmo',
        action='store_true',
        help='tools for cosmological calculations')

    add('-a', '--agn', action='store_true', help='tools for AGN science')

    add('-hi',
        '--radioHI',
        action='store_true',
        help='''tools for neutral hydrogen science''')

    args = parser.parse_args(argv)

    if args.help:
        print '\n\t************* --- AstronZ : Help --- **************\n'

        print('\t\t  ... called for help ...\n')
        parser.print_help()

        print("""\nRun a command. This can be:\n
astronz\t\t(all tools)
astronz -c\t(cosmological tools)
astronz -hi\t(neutral hydroge tools)
astronz -agn \t(AGN science tools)
            """)
        print '\n\t************* --- AstronZ : DONE --- **************\n'

        sys.exit(0)

    elif args.cosmo:

        print('\n\t************* --- AstronZ : Cosmo --- **************\n')
        print('\t\t    ... Cosmological Tools ... \n')
        c = cosmo.Cosmo()
        c.main()

    elif args.radioHI:
        print('\n\t************* --- AstronZ : HI --- **************\n')
        print('\t\t... Neutral Hydrogen Tools ... \n')
        hi = radiohi.radioHI()

    elif args.agn:
        print('\n\t************* --- AstronZ : AGN --- **************\n')
        print('\t\t   ... AGN Science tools ... \n')
        a = agn.AGN()
        a.main()
    else:
        print '\n\t************* --- AstronZ --- **************\n'
        in1 = "\t   ... list of the avaliable classes: ...\n"
        in2 = '''\n\t - c (cosmological tools)
\t - hi (neutral hydrogen tools)
\t - a (AGN science tools)\n
'''
        inp = str(raw_input(in1 + in2))

        if inp == 'c':
            print('\n\t************* --- AstronZ : Cosmo --- **************\n')
            print('\t\t    ... Cosmological Tools ... \n')
            c = cosmo.Cosmo()
            c.main()
        elif inp == 'a':
            print('\n\t************* --- AstronZ : AGN --- **************\n')
            print('\t\t   ... AGN Science tools ... \n')
            a = agn.AGN()
            a.main()
        elif inp == 'hi':
            print('\n\t************* --- AstronZ : HI --- **************\n')
            print('\t\t... Neutral Hydrogen Tools ... \n')
            hi = radiohi.radioHI()
            hi.main()
        else:
            print(
                '\n\t ... you have not entered an available class function ... \n'
            )
            print('\t************* --- AstronZ : ERROR --- **************\n')
            sys.exit(0)
Example #58
0
def main():
    prog = "python plot_scatter.py"
    description = "Plots performances of the best config at one time vs another in a scatter plot"

    parser = ArgumentParser(description=description, prog=prog)

    # General Options
    # parser.add_argument("-l", "--log", action="store_true", dest="log",
    #                     default=False, help="Plot on log scale")
    parser.add_argument("--max", dest="max", type=float,
                        default=1000, help="Maximum of both axes")
    parser.add_argument("--min", dest="min", type=float,
                        default=None, help="Minimum of both axes")
    parser.add_argument("-s", "--save", dest="save",
                        default="", help="Where to save plot instead of showing it?")
    parser.add_argument("--title", dest="title",
                        default="", help="Optional supertitle for plot")
    parser.add_argument("--greyFactor", dest="grey_factor", type=float,
                        default=1, help="If an algorithms is not greyFactor-times better"
                                        " than the other, show this point less salient, > 1")
    parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", default=False,
                        help="Plot some debug info")
    parser.add_argument("-f", "--lineFactors", dest="linefactors",
                        default=None, help="Plot X speedup/slowdown, format 'X,..,X' (no spaces)")
    parser.add_argument("--time", dest="time", default=None,
                        help="Plot config at which time?, format 'time1,time2' ")
    parser.add_argument("--obj", dest="obj", default=None, required=True,
                        help="Path to validationObjectiveMatrix-traj-run-* file")
    parser.add_argument("--res", dest="res", required=True,
                        help="Path to validationResults-traj-run-* file")
    parser.add_argument("--minvalue", dest="minvalue", type=float,
                        help="Replace all values smaller than this",)
    parser.add_argument("--fontsize", dest="fontsize", type=int, default=20,
                        help="Use this fontsize for plotting",)

    args, unknown = parser.parse_known_args()

    if len(unknown) != 0:
        print "Wrong number of arguments"
        parser.print_help()
        sys.exit(1)

    if args.grey_factor < 1:
        print "A grey-factor lower than one makes no sense"
        parser.print_help()
        sys.exit(1)

    # Load validationResults
    res_header, res_data = plot_util.read_csv(args.res, has_header=True)
    av_times = [float(row[0]) for row in res_data]
    if args.time == None:
        # Print available times and quit
        print "Choose a time from"
        print "\n".join(["* %s" % i for i in av_times])
        sys.exit(0)
    time_arr = args.time.split(",")
    if len(time_arr) != 2 or (len(time_arr) == 2 and (time_arr[1] == "" or time_arr[0] == "")):
        print "Something wrong with %s, should be 'a,b'" % args.time
        print "Choose a time from"
        print "\n".join(["* %s" % i for i in av_times])
        sys.exit(0)
    time_1 = float(time_arr[0])
    time_2 = float(time_arr[1])

    # Now extract data
    config_1 = [int(float(row[len(res_header)-2].strip('"'))) for row in res_data if int(float(row[0])) == int(time_1)]
    config_2 = [int(float(row[len(res_header)-2].strip('"'))) for row in res_data if int(float(row[0])) == int(time_2)]
    if len(config_1) == 0 or len(config_2) == 0:
        print "Time int(%s) or int(%s) not found. Choose a time from:" % (time_1, time_2)
        print "\n".join(["* %s" % i for i in av_times])
        sys.exit(1)
    config_1 = config_1[0]
    config_2 = config_2[0]

    obj_header, obj_data = plot_util.read_csv(args.obj, has_header=True)
    head_template = '"Objective of validation config #%s"'
    idx_1 = obj_header.index(head_template % config_1)
    idx_2 = obj_header.index(head_template % config_2)

    data_one = np.array([float(row[idx_1].strip('"')) for row in obj_data])
    data_two = np.array([float(row[idx_2].strip('"')) for row in obj_data])

    print "Found %s points for config %d and %s points for config %d" % (str(data_one.shape), config_1, str(data_two.shape), config_2)

    linefactors = list()
    if args.linefactors is not None:
        linefactors = [float(i) for i in args.linefactors.split(",")]
        if len(linefactors) < 1:
            print "Something is wrong with linefactors: %s" % args.linefactors
            sys.exit(1)
        if min(linefactors) < 1:
            print "A line-factor lower than one makes no sense"
            sys.exit(1)
    if args.grey_factor > 1 and args.grey_factor not in linefactors:
        linefactors.append(args.grey_factor)

    label_template = 'Objective of validation config #%s, best at %s sec'
    
    # This might produce overhead for large .csv files
    times = [int(float(row[0])) for row in res_data]
    time_1 = res_data[times.index(int(time_1))][0]
    time_2 = res_data[times.index(int(time_2))][0]

    data_one = np.array([max(args.minvalue, i) for i in data_one])
    data_two = np.array([max(args.minvalue, i) for i in data_two])

    save = ""
    if args.save != "":
        save = args.save
        print "Save to %s" % args.save
    else:
        print "Show"
    plot_scatter_plot(x_data=data_one, y_data=data_two,
                      labels=[label_template % (config_1, str(time_1)),
                              label_template % (config_2, str(time_2))],
                      title=args.title, save=save,
                      max_val=args.max, min_val=args.min,
                      grey_factor=args.grey_factor, linefactors=linefactors,
                      user_fontsize=args.fontsize,
                      debug=args.verbose)
Example #59
0
def main():
    '''
    Description:
        Gathers input parameters and extracts auxiliary NARR data.  Calling
        main is intended for debugging pruposes only.
    '''

    # Create a command line arugment parser
    description = ('Retrieves and generates auxillary LST inputs, then'
                   ' processes and calls other executables for LST generation')
    parser = ArgumentParser(description=description)

    # ---- Add parameters ----
    # Required parameters
    parser.add_argument('--xml',
                        action='store',
                        dest='xml_filename',
                        required=False,
                        default=None,
                        help='The XML metadata file to use')

    parser.add_argument('--debug',
                        action='store_true',
                        dest='debug',
                        required=False,
                        default=False,
                        help='Keep any debugging data')

    parser.add_argument('--version',
                        action='store_true',
                        dest='version',
                        required=False,
                        default=False,
                        help='Reports the version of the software')

    # Parse the command line parameters
    args = parser.parse_args()

    # Command line arguments are required so print the help if none were
    # provided
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)  # EXIT FAILURE

    # Report the version and exit
    if args.version:
        print(util.Version.version_text())
        sys.exit(0)  # EXIT SUCCESS

    # Verify that the --xml parameter was specified
    if args.xml_filename is None:
        raise Exception('--xml must be specified on the command line')

    # Verify that the XML filename provided is not an empty string
    if args.xml_filename == '':
        raise Exception('No XML metadata filename provided.')

    # Setup the logging level
    log_level = logging.INFO
    if args.debug:
        log_level = logging.DEBUG

    # Setup the default logger format and level.  Log to STDOUT.
    logging.basicConfig(format=('%(asctime)s.%(msecs)03d %(process)d'
                                ' %(levelname)-8s'
                                ' %(filename)s:%(lineno)d:'
                                '%(funcName)s -- %(message)s'),
                        datefmt='%Y-%m-%d %H:%M:%S',
                        level=log_level,
                        stream=sys.stdout)

    # Get the logger
    logger = logging.getLogger(__name__)

    try:
        logger.info('Extracting LST AUX data')
        current_processor = AuxNARRGribProcessor(args.xml_filename)
        current_processor.extract_aux_data()

    except Exception:
        logger.exception('Failed processing auxiliary NARR data')
        raise

    logger.info('Completed extraction of auxiliary NARR data')
import glob
import json
import os
import subprocess
import sys
import traceback
from argparse import ArgumentParser
from multiprocessing import Pool
from subprocess import Popen, PIPE, DEVNULL
from pathlib import Path

#import ipfshttpclient

parser = ArgumentParser(description=f"keep live")
subparsers = parser.add_subparsers(help="Command")
parser.set_defaults(command=lambda _: parser.print_help())


def download_with_curl(gateway, hash):

    # or api way
    url = f"https://{gateway}/api/v0/get?arg={hash}&archive=true"  # &archive=true is more likey to bypass cache
    print('api ' + url)

    Path(f"./test/{gateway}").mkdir(parents=True, exist_ok=True)

    with open(f"./test/{gateway}/{hash}.log", "wb") as f:
        p = Popen(["curl", '-f', '-X', 'POST', url], stdout=DEVNULL, stderr=f)
        p.wait(
        )  # wait for process to finish; this also sets the returncode variable inside 'res'
        #print(p.returncode)