def parse_arguments(): parser = OptionParser() parser.description = \ "This program takes a left (or right) occluded model and creates its mirror right (or left) occluded one" parser.add_option("-i", "--input", dest="input_path", metavar="PATH", type="string", help="path to a trained model.") parser.add_option("-o", "--output", dest="output_path", metavar="PATH", type="string", help="path to the model fiel to be created") (options, args) = parser.parse_args() #print (options, args) if not options.input_path: parser.error("'input' option is required to run this program") if not os.path.exists(options.input_path): parser.error("Could not find the input file %s" % path) # we normalize the path options.input_path = os.path.normpath(options.input_path) if options.output_path: if os.path.exists(options.output_path): parser.error("output_path should point to a non existing file") else: parser.error("'output' option is required to run this program") return options
def __init__ (self): #option settings op=OptionParser(usage='usage: %prog [OPTION]... [FILE]...', version="%prog 0.0.1") op.description="Print counts of Chinese characters in given FILE(s). If no"+\ " FILE is given,it will read from standard input stream" op.add_option('-l','--list',dest='list', help = 'List all appeared Chinese characters with their counts', action = 'store_true') op.add_option('-s','--sum',dest='sum', help = 'Print the sum of all used Chinese characters', action = 'store_true') op.add_option('-e','--encoding',dest='encoding',default='utf8', help = 'Specify input encoding (default UTF-8)', action = 'store',type='string') self.ops,self.args = op.parse_args() #the default job is to print the sum if self.ops.list is None and self.ops.sum is None: self.ops.sum = True #check up encoding try: codecs.lookup(self.ops.encoding) except LookupError as e: print( e) exit(1)
def main(): global default_ua if IS_DEBUG > 0: tests() return 0 parser = OptionParser() parser.description = description # parser.add_option('-u', '--url', dest="url", help="target url") parser.add_option("-U", "--user-agent", dest="useragent", help="default user agent") # parser.add_option('-f', '--file', dest="file", help="file for reading") (options, args) = parser.parse_args() if options.useragent is not None: default_ua = options.useragent if len(args) < 3: return 1 q = my_replace(data, payload=payload_q) q = my_replace(q, CONDITION=mid_expr) q = my_replace(q, target=file_load_q) q = my_replace(q, dst_file=string_tonum(args[0])) log.debug(q) r = bisection(q, int(args[2]), offset=int(args[1])) log.info("file %s len %s offset %s" % (args[0], args[2], args[1])) log.info("string = %s" % r)
def main(argv): """ Converts LCOV coverage data to Cobertura-compatible XML for reporting. Usage: lcov_cobertura.py lcov-file.dat lcov_cobertura.py lcov-file.dat -b src/dir -e test.lib -o path/out.xml By default, XML output will be written to ./coverage.xml """ parser = OptionParser() parser.usage = 'lcov_cobertura.py lcov-file.dat [-b source/dir] [-e <exclude packages regex>] [-o output.xml]' parser.description = 'Converts lcov output to cobertura-compatible XML' parser.add_option('-b', '--base-dir', action='store', help='Directory where source files are located', dest='base_dir', default='.') parser.add_option('-o', '--output', help='Path to store cobertura xml file', action='store', dest='output', default='coverage.xml') (options, args) = parser.parse_args(args=argv) if len(args) != 2: print((main.__doc__)) sys.exit(1) try: vs_cobertura = VSCobertura("c:\\myworkspace\\wx2\\latest\\wme\\") vs_cobertura.addCoverage(args[1], ["wme\\mediasession", "wme\\mediaengine"], ["include", "unittest"]) cobertura_xml = vs_cobertura.convert() with open(options.output, mode='wt') as output_file: output_file.write(cobertura_xml) except IOError: sys.stderr.write("Unable to convert %s to Cobertura XML" % args[1])
def parse_options(): """ Parse command line options """ import version formatter = IndentedHelpFormatter(indent_increment=2, max_help_position=32, width=100, short_first=0) parser = OptionParser( conflict_handler="resolve", formatter=formatter, usage="freevo %prog [options]", version="%prog " + str(version.version), ) parser.prog = os.path.splitext(os.path.basename(sys.argv[0]))[0] parser.description = "Helper to convert old local_conf.py configuration to current configuration" # parser.add_option('-v', '--verbose', action='count', default=0, # help='set the level of verbosity [default:%default]') parser.add_option("--scan", action="store_true", default=False, help="scan source files for the old variables") parser.add_option("--file", metavar="FILE", default=None, help="the local_conf.py file [default:%default]") parser.add_option( "-w", "--write", action="store_true", default=False, help="write the local_conf.py file, this will overwrite an existing file!", ) opts, args = parser.parse_args() if not opts.file and not opts.scan: parser.error("either --scan or --file must be given.") return opts, args
def process_options(): "Process command-line options and arguments." parser = OptionParser() parser.usage = "%prog <input files> [options]" parser.description = "Plots the ice flux as a function of the distance from the divide." parser.add_option("-o", "--output", dest="output", type="string", help="Output image file name (e.g. -o foo.png)") parser.add_option("-e", "--experiment", dest="experiment", type="string", help="MISMIP experiment: 1a,1b,2a,2b,3a,3b (e.g. -e 1a)") parser.add_option("-s", "--step", dest="step", type="int", help="MISMIP step: 1,2,3,... (e.g. -s 1)") parser.add_option("-m", "--model", dest="model", type="string", help="MISMIP model (e.g. -M ABC1)") parser.add_option("-f", "--flux", dest="profile", action="store_false", default=True, help="Plot ice flux only") parser.add_option("-p", "--profile", dest="flux", action="store_false", default=True, help="Plot geometry profile only") opts, args = parser.parse_args() if len(args) == 0: print("ERROR: An input file is requied.") exit(0) if len(args) > 1 and opts.output: print("More than one input file given. Ignoring the -o option...\n") opts.output = None if opts.output and opts.profile and opts.flux: print("Please choose between flux (-f) and profile (-p) plots.") exit(0) return args, opts.output, opts
def parse_arguments(): parser = OptionParser() parser.description = \ "This program takes the INRIA pedestrians dataset and " \ "creates a new training dataset for multiscale objects detection" parser.add_option("-i", "--input", dest="input_path", metavar="PATH", type="string", help="path to the INRIAPerson dataset Test or Train folder") parser.add_option("-o", "--output", dest="output_path", metavar="DIRECTORY", type="string", help="path to a non existing directory where the new training dataset will be created") (options, args) = parser.parse_args() #print (options, args) if options.input_path: if not os.path.exists(options.input_path): parser.error("Could not find the input file") else: # we normalize the path options.input_path = os.path.normpath(options.input_path) else: parser.error("'input' option is required to run this program") if options.output_path: if os.path.exists(options.output_path): parser.error("output_path should point to a non existing directory") else: parser.error("'output' option is required to run this program") return options
def parse_arguments(): parser = OptionParser() parser.description = \ "This program takes the INRIA pedestrians dataset and " \ "creates occluded pedestrians" parser.add_option("-i", "--input", dest="input_file", metavar="PATH", type="string", help="path to the .data_sequence file") parser.add_option("-o", "--output", dest="output_path", metavar="DIRECTORY", type="string", help="path to a non existing directory where the new training dataset will be created") parser.add_option("-n", "--number_of_samples", dest="number_of_samples", type="int",default=10, help="Number of samples to be randomly choosen") (options, args) = parser.parse_args() if options.input_file: pass else: parser.error("'input_file' option is required to run this program") #if not options.output_path: # parser.error("'output' option is required to run this program") # if os.path.exists(options.output_path): # parser.error("output_path already exists") return options
def cli(): # parse our args from optparse import OptionParser option_parser = OptionParser() option_parser.usage = "%prog [options] path path2 path3 ..." option_parser.description = "Will cascade together multiple"\ + " (python config) ini files. Paths can be directories or files." option_parser.add_option('-j', '--json', dest='output_json', default=False, help="output in json format", action="store_true") options, args = option_parser.parse_args() # now that we know what we're doing lets do it smasher = ConfigSmasher(args) r_dict = smasher.smash() # now the smasher object has the config on it, we want # to write it to stdout # do they want json? if options.output_json: import json print json.dumps(r_dict) else: from cStringIO import StringIO buffer = StringIO() smasher.config.write(buffer) print buffer.getvalue()
def main(): global ops op=OptionParser(usage='usage: %prog [OPTION]... [FILE]', version="%prog 0.0.1") op.description="Convert csv-like table(from file or stdin) to self-parsing javascript code:"+\ "Object made of arrays.\n "+\ "The table file must have a header as its first line."+\ "And the header will be used to name the generated arrays" op.add_option('-f',dest='FS', default="|", help='sets the field separator of incoming file,which is by default "|" ', action = 'store',type="string") op.add_option('-o',dest='objname', default="obj", help='names the target object. If it is omitted, if the table text will be read from '+\ "a file, the file's base name will became the object name, otherwise 'obj' will be used", action = 'store',type="string") op.add_option('-s',dest='OFS', help='sets the field separater of output string (intermediate), '+\ 'which is by default "~"',default="~", action = 'store',type="string") (ops,args)=op.parse_args() if len(args)==0: process(sys.stdin) else: if not os.path.isfile(args[0]): exit(1) else: if ops.objname=='obj': ops.objname==os.path.splitext(os.path.basename(args[0]))[0] with open(args[0]) as coming: process(coming)
def main(): cwd = getcwd() redis_server = redis.StrictRedis(host='localhost', port=6379, db=0) parser = OptionParser() parser.description = '''This is a script that tracks prices from Amazon. Read README.md for more information.''' parser.add_option("--file", action="store", type="string", default = cwd + "/request", metavar = "/absolue/path/to/request", dest = "file", help="provide absolue path to the request file, if not in the current" " directory or running tracker with a cronjob") parser.add_option("--clean", action="store_true", dest = "clean", help = "clean all data in redis server") (options, args) = parser.parse_args() if options.clean: try: redis_server.flushall() sys.exit("Successfully cleaned redis data") except Exception as e: print "Something went wrong while cleaning data" print "Here is the actual error: " print e if options.file: request_file = options.file amazon = connect_to_amazon(request_file) products = get_products(request_file) track_prices(redis_server, amazon, products, request_file)
def parse_options(): global commands, hosts, maxthreads, verboselvl parser = OptionParser() parser.description = "Simple Slave Server Command executor" parser.add_option('-l', '--host_list', dest="host_list", help="delimited list of hosts alias.") parser.add_option('-o', '--host', dest="host", help="only one host") parser.add_option('-c', '--command', dest="command", help="command to execute") parser.add_option('-n', '--nthreads', dest="nthreads", help="number of parralell threads") parser.add_option('-v', '--verbose', dest="verbose", action="store_true", help="verbose level") parser.print_help = usage (options, args) = parser.parse_args() if len(args) > 0: try: commands = open(args[0]).read().rstrip('\n') except Exception as e: print e; exit(1) if options.command is not None: commands = options.command if options.host_list is not None: hosts = open(options.host_list).read().rstrip('\n') if options.host is not None: hosts = options.host if options.nthreads is not None: maxthreads = int(options.nthreads) maxthreads = min(maxthreads, len(hosts.splitlines())) if options.verbose: logging.basicConfig(level = logging.DEBUG) #sanity check if commands is None or len(commands) == 0: print 'can\'t find any commands'; exit(1) if hosts is None: usage() exit() print hosts.splitlines() print "%s threads" % maxthreads
def parse_commandline(): """Parse the comandline and return parsed options.""" parser = OptionParser() parser.description = __doc__ parser.set_usage('usage: %prog [options] (add|delete) [package].\n' 'Try %prog --help for details.') parser.add_option('-d', '--debug', action='store_true', help='Enables debugging mode') parser.add_option('-l', '--login', help='Username to login with') parser.add_option('-a', '--account', help='User owning the repositories to be changed ' \ '[default: same as --login]') parser.add_option('-t', '--apitoken', help='API Token - can be found on the lower right of ' \ 'https://github.com/account') options, args = parser.parse_args() if len(args) != 2: parser.error('wrong number of arguments') if (len(args) == 1 and args[0] in ['add', 'delete']): parser.error('%r needs a package name as second parameter\n' % args[0]) if (len(args) == 2 and args[0] not in ['add', 'delete']): parser.error('unknown command %r. Try "add" or "delete"\n' % args[0]) return options, args
def run(args=None): from optparse import OptionParser parser = OptionParser() parser.usage = "%prog [options] TYPE" parser.description = __doc__ parser.add_option("-b", "--batch-size", action="store", dest="batch_size", default="1024", help="Number of keys in read_bulk/write_bulk batch [default: %default]") parser.add_option("-d", "--debug", action="store_true", dest="debug", default=False, help="Enable debug output [default: %default]") parser.add_option("-D", "--dir", dest="tmp_dir", default='/var/tmp/dnet_recovery_%TYPE%', metavar="DIR", help="Temporary directory for iterators' results [default: %default]") parser.add_option("-g", "--groups", action="store", dest="elliptics_groups", default=None, help="Comma separated list of groups [default: all]") parser.add_option("-k", "--lock", dest="lock", default='dnet_recovery.lock', metavar="LOCKFILE", help="Lock file used for recovery [default: %default]") parser.add_option("-l", "--log", dest="elliptics_log", default='dnet_recovery.log', metavar="FILE", help="Output log messages from library to file [default: %default]") parser.add_option("-L", "--log-level", action="store", dest="elliptics_log_level", default=elliptics.log_level.notice, help="Elliptics client verbosity [default: %default]") parser.add_option("-n", "--nprocess", action="store", dest="nprocess", default="1", help="Number of subprocesses [default: %default]") parser.add_option("-N", "--dry-run", action="store_true", dest="dry_run", default=False, help="Enable test mode: only count diffs without recovering [default: %default]") parser.add_option("-r", "--remote", action="append", dest="remotes", default=[], help="Elliptics node address") parser.add_option("-s", "--stat", action="store", dest="stat", default="text", help="Statistics output format: {0} [default: %default]".format("/".join(ALLOWED_STAT_FORMATS))) parser.add_option("-S", "--safe", action="store_true", dest="safe", default=False, help="Do not remove recovered keys after merge [default: %default]") parser.add_option("-t", "--time", action="store", dest="timestamp", default=None, help="Recover keys modified since `time`. " "Can be specified as timestamp or as time difference" "e.g.: `1368940603`, `12h`, `1d`, or `4w` [default: %default]") parser.add_option("-e", "--no-exit", action="store_true", dest="no_exit", default=False, help="Will be waiting for user input at the finish.") parser.add_option("-m", "--monitor-port", action="store", dest="monitor_port", default=0, help="Enable remote monitoring on provided port [default: disabled]") parser.add_option("-w", "--wait-timeout", action="store", dest="wait_timeout", default="3600", help="[Wait timeout for elliptics operations default: %default]") parser.add_option("-a", "--attemps", action="store", dest="attempts", default=1, help="Number of attempts to recover one key") parser.add_option("-o", "--one-node", action="store", dest="one_node", default=None, help="Elliptics node address that should be iterated/recovered [default: %default]") parser.add_option("-c", "--chunk-size", action='store', dest='chunk_size', default=1024 * 1024, help="Size of chunk by which all object will be read and recovered [default: %default]") parser.add_option("-C", "--custom-recover", action="store", dest="custom_recover", default="", help="Sets custom recover app which accepts file path and returns file path to filtered keys") parser.add_option("-f", '--dump-file', action='store', dest='dump_file', default='', help='Sets dump file which contains hex ids of object that should be recovered') parser.add_option('-i', '--backend-id', action='store', dest='backend_id', default=None, help='Specifies backend data on which should be recovered. IT WORKS ONLY WITH --one-node') parser.add_option("-u", "--dont-dump-keys", action="store_false", dest="dump_keys", default=True, help="Disable dumping all iterated key [default: %default]") parser.add_option("-M", "--no-meta", action="store_true", dest="no_meta", default=False, help="Recover data without meta. It is usefull only for services without data-rewriting because" " with this option dnet_recovery will not check which replica of the key is newer" " and will copy any replica of the key to missing groups.") return main(*parser.parse_args(args))
def parse_commandline(): """Parse the comandline and return parsed options.""" parser = OptionParser() parser.description = __doc__ parser.set_usage("usage: %prog [options] (list|add|remove) [collaborator]." "\nTry %prog --help for details.") parser.add_option("-d", "--debug", action="store_true", help="Enables debugging mode") parser.add_option("-c", "--cache", default=None, help="Location for network cache [default: None]") parser.add_option("-l", "--login", help="Username to login with") parser.add_option( "-a", "--account", help="User owning the repositories to be changed " "[default: same as --login]" ) parser.add_option( "-t", "--apitoken", help="API Token - can be found on the lower right of " "https://github.com/account" ) options, args = parser.parse_args() if len(args) not in [1, 2]: parser.error("wrong number of arguments") if len(args) == 1 and args[0] in ["add", "remove"]: parser.error("%r needs a collaborator name as second parameter\n" % args[0]) elif len(args) == 1 and args[0] != "list": parser.error('unknown command %r. Try "list", "add" or "remove"\n' % args[0]) if len(args) == 2 and args[0] not in ["add", "remove"]: parser.error('unknown command %r. Try "list", "add" or "remove"\n' % args[0]) if not options.login: parser.error("you must provide --login information\n") return options, args
def parse_arguments(): parser = OptionParser() parser.description = \ "This program takes a detections.data_sequence created by ./objects_detection and converts it into the Caltech dataset evaluation format" parser.add_option("-i", "--input", dest="input_path", metavar="FILE", type="string", help="path to the folder containing the recordings") parser.add_option("-o", "--output", dest="output_path", type="string", help="path to a directory where the curves are saved") (options, args) = parser.parse_args() #print (options, args) if options.input_path: if not os.path.exists(options.input_path): parser.error("Could not find the input file") else: parser.error("'input' option is required to run this program") if options.output_path: pass else: parser.error("'output_path' option is required to run this program") return options
def parse_arguments(): parser = OptionParser() parser.description = \ "The program reads an existing model file and generates models for different amounts of occlusions" parser.add_option("-i", "--input_model", dest="input_model", type="string", help="path to the trained model.") parser.add_option("-c", "--classifier_type", dest="classifier_type", type="string", help="this option is required and denotes the type of the classifier: \"up\" or \"left\"") (options, args) = parser.parse_args() #print (options, args) if not options.classifier_type: parser.error("'classifier_type' has to be specified") if not options.input_model: parser.error("'input' option is required to run this program") else: if not os.path.exists(options.input_model): parser.error("Could not find the input file %s" % options.input_model) return options
def main(): parser = OptionParser("Usage: %prog [options]") parser.description = """ Script for validating and updating translations from crowdin. Default action is to validate the existing translations. USAGE python3 crowdin.py -f path/to/crowdin.zip -v Note: Python 3 required. """.strip() parser.add_option("-v", "--verbose", action="store_true", dest="verbose") parser.add_option("-d", "--debug", action="store_true", dest="debug") parser.add_option("-f", "--file", action="store", type="string", dest="zip_path", help="Path to crowdin export zip file. This will replace all local versions of these files with the crowdin versions.") parser.add_option("-l", "--locale", action="store", dest="locale", help="Only import and/or validate this locale") parser.add_option("-k", "--key", action="store", dest="key", help="Only validate keys containing this substring") parser.add_option("-u", "--find-unused", action="store_true", dest="find_unused", help="Show keys that don't seem to be used any more") (options, args) = parser.parse_args() if options.zip_path: import_crowdin_for_android(options.zip_path, options) validate_android_translations(options) if options.find_unused: print("\n\n") find_unused_keys(options)
def parse_options(): """ Parse command line options """ import version formatter = IndentedHelpFormatter(indent_increment=2, max_help_position=32, width=100, short_first=0) parser = OptionParser(conflict_handler='resolve', formatter=formatter, usage="freevo %prog [options]", version='%prog ' + str(version.version)) parser.prog = os.path.splitext(os.path.basename(sys.argv[0]))[0] parser.description = "Helper to convert the record_schedule.xml to favorites.pickle" parser.add_option('-v', '--verbose', action='count', default=0, help='set the level of verbosity [default:%default]') parser.add_option('-i', '--schedule', metavar='FILE', default=config.TV_RECORD_SCHEDULE, help='the record schedule file [default:%default]') parser.add_option('-o', '--favorites', metavar='FILE', default=config.TV_RECORD_FAVORITES, help='the record favorites file [default:%default]') opts, args = parser.parse_args() if not os.path.exists(opts.schedule): parser.error('%r does not exist.' % (opts.schedule,)) if os.path.exists(opts.favorites): parser.error('%r exists, please remove.' % (opts.favorites,)) if os.path.splitext(opts.schedule)[1] != '.xml': parser.error('%r is not an XML file.' % (opts.schedule,)) return opts, args
def main(): from optparse import OptionParser parser = OptionParser() parser.description = "This module will send a message through the Pushover.net notification service. It requires at least the '-m' / '--message' parameter to be passed." parser.add_option("-c", "--config", dest = "configfile", help = "Location of the Pushover config file.") parser.add_option("-d", "--debug", dest = "debug", action = "store_true", help = "Log at the DEBUG loglevel.") parser.add_option("-m", "--message", dest = "message", help = "The message to send, will truncate to 512 chars.") if len(sys.argv) <= 1: parser.print_help() sys.exit(1) (options, args) = parser.parse_args() if options.debug: loglevel = logging.DEBUG else: loglevel = logging.INFO logging.basicConfig(level=loglevel, format="%(asctime)s [%(module)s] %(levelname)s: %(message)s") if options.configfile: client = PushoverClient(options.configfile) else: client = PushoverClient() if options.message: options.message = options.message[:512] else: parser.error("Can't do anything without a message now can I?") try: client.send_message(options.message) except Exception as e: logger.critical("Something went wrong: {0}".format(e))
def main(): parser = OptionParser() parser.description = \ """ Reads the recordings of objects_detection over Caltech version of INRIA dataset, generates an output file containing the values for precision/recall """ parser.add_option("-i", "--input", dest="input_path", metavar="FILE", type="string", help="path to the recording directory") parser.add_option("-o", "--output", dest="output_file", metavar="FILE", type="string", help="output file containing precision recall values") (options, args) = parser.parse_args() #print (options, args) if options.input_path: if not os.path.exists(options.input_path): parser.error("Could not find the input directory") else: parser.error("'input' option is required to run this program") if not os.path.isdir(options.input_path): parser.error("the 'input' option should point towards " \ "the recording directory of the objects_detection application") results_path = options.input_path output_file= options.output_file get_precision_recall(results_path, output_file) return
def parse_arguments(): parser = OptionParser() parser.description = \ "This program takes a detections.data_sequence created by ./objects_detection and converts it into the Caltech dataset evaluation format" parser.add_option("-i", "--input", dest="input_path", metavar="FILE", type="string", help="path to the .data_sequence file") parser.add_option("-o", "--output", dest="output_path", metavar="DIRECTORY", type="string", help="path to a non existing directory where the caltech .txt files will be created") (options, args) = parser.parse_args() #print (options, args) if options.input_path: if not os.path.exists(options.input_path): parser.error("Could not find the input file") else: parser.error("'input' option is required to run this program") if options.output_path: if os.path.exists(options.output_path): parser.error("output_path should point to a non existing directory") else: parser.error("'output' option is required to run this program") return options
def parse_arguments(): parser = OptionParser() parser.description = \ "This program will track objects " \ "on videos in the MILTrack paper format. " \ "See http://goo.gl/pSTo9r" parser.add_option("-i", "--input", dest="video_path", metavar="PATH", type="string", default=None, help="path to a folder o a MILTrack video") parser.add_option("--gui", dest="store_gui", default=gui, help="turn gui and and off") parser.add_option("--save", dest="store_save", default=save, help="turn on saving tracks") (options, args) = parser.parse_args() #print (options, args) if not options.video_path: parser.error("'input' option is required to run this program") if not os.path.exists(options.video_path): parser.error("Could not find the input file %s" % options.video_path) return options
def parse_arguments(): parser = OptionParser() parser.description = \ "This program takes a detections.data_sequence created by ./objects_detection and converts it into the Caltech dataset evaluation format" parser.add_option("-i", "--input", dest="input_path", metavar="FILE", type="string", help="path to the installation result file") parser.add_option("-d", "--directory", dest="input_directory", metavar="Directory", type="string", help="directory to the installation result files") (options, args) = parser.parse_args() # print (options, args) if options.input_path: if not os.path.exists(options.input_path): parser.error("Could not find the input file") elif options.input_directory: if not os.path.exists(options.input_directory): parser.error("Could not find the input directory") else: parser.error("'input' or 'diretory' option is required to run this program") return options
def main(): parser = OptionParser() parser.description = \ "Reads a trained detector model and prints its content" parser.add_option("-i", "--input", dest="input_path", metavar="FILE", type="string", help="path to the model file") (options, args) = parser.parse_args() #print (options, args) if options.input_path: if not os.path.exists(options.input_path): parser.error("Could not find the input file") else: parser.error("'input' option is required to run this program") model_filename = options.input_path model = read_model(model_filename) if type(model) is detector_model_pb2.MultiScalesDetectorModel: raise ("only for non multiscale models") else: # assume single scale model print_detector_model(model) return
def parse_args(): parser = OptionParser() parser.description = "Check Linux Host Memory Usage" parser.add_option("-W", "--warn", dest="warn", default = float('85'), help="Warning level for memory % use, default is 85%") parser.add_option("-C", "--crit", dest="crit", default = float('95'), help="Critical level for memory % use, default is 95%") (options, args) = parser.parse_args() return options
def parse_options(): """ Parse command line options """ import version tmp = os.environ.has_key('TEMP') and os.environ['TEMP'] or '/tmp' formatter = IndentedHelpFormatter(indent_increment=2, max_help_position=32, width=100, short_first=0) parser = OptionParser(conflict_handler='resolve', formatter=formatter, usage="freevo %prog [options]", version='%prog ' + str(version.version)) parser.prog = os.path.splitext(os.path.basename(sys.argv[0]))[0] parser.description = "Helper to convert a favorites.txt to a favorites.pickle" parser.add_option('-v', '--verbose', action='count', default=0, help='set the level of verbosity [default:%default]') parser.add_option('--favorites-txt', metavar='FILE', default=config.TV_RECORD_FAVORITES_LIST, help='the favorites.txt file to read and process [default:%default]') parser.add_option('--favorites-pickle-out', metavar='FILE', default=os.path.join(tmp, 'favorites.pickle'), help='the reritten favorites.pickle file [default:%default]') parser.add_option('--favorites-txt-out', metavar='FILE', default=os.path.join(tmp, 'favorites.txt'), help='the reritten favorites.txt file [default:%default]') parser.add_option('--schedule-pickle-out', metavar='FILE', default=os.path.join(tmp, 'schedule.pickle'), help='the reritten schedule.pickle file [default:%default]') opts, args = parser.parse_args() return opts, args
def main(): parser = OptionParser("usage: %prog -i <input file> [options]") parser.description = "%prog is used to strip out docstrings from .py-files" parser.epiolog = "enjoy." parser.add_option("-i", "--input-file", dest="input", help="Reads input from FILE", metavar="FILE") parser.add_option("-s", "--suffix", dest="suffix", help="List only file that has the suffix SUFF", metavar="SUFF") parser.add_option("-r", "--rel-path", dest="absPath", action="store_false", help="Lists the files with a relative path", default=False) parser.add_option("-a", "--abs-path", dest="absPath", action="store_true", help="Lists the files with an absolute path", default=False) parser.add_option("-q", "--qouted", dest="qouted", action="store_true", help="Qoutes the output with double qoutes (i.e. \"<file>\")", default=False) parser.add_option("-b", "--base-path", dest="base", help="If used with --rel-path this will set the base directory from where the path should be relative from.", default=False) parser.add_option("-f", "--filter-out", dest="filter", help="Remove any entries containing SUBSTR, to use several filter separate each with a ';'.", metavar="SUBSTR") parser.add_option("-n", "--basename", dest="basename", action="store_true", help="List only the basename of each file. That is; ../example_dir/example_file.cpp will only be shown as example_file.cpp.", default=False) (options, args) = parser.parse_args() if not options.input: parser.print_help() sys.exit(1) projDir = os.path.dirname(options.input) try: mdp = xml.dom.minidom.parse(options.input) except IOError as e: print(e.args[1] + ":", e.filename) sys.exit(2) files = mdp.getElementsByTagName("ClCompile") files += mdp.getElementsByTagName("None") for f in files: relp = f.getAttribute("Include").replace("\\", os.path.sep) if not relp: continue doPrint = True if options.suffix: doPrint = False if options.suffix == relp.split(".")[-1]: doPrint = True if options.filter: for fe in options.filter.split(';'): idx = relp.rfind(fe) if idx >= 0 and (fe[0].islower() or relp[idx+len(fe):][:1].isupper()): doPrint = False if doPrint: if options.qouted: q = "\"" else: q = "" filepath = os.path.abspath(os.path.join(projDir, relp)) try: if options.basename: print(q + os.path.basename(filepath) + q) elif options.absPath: print(q + filepath + q) else: if options.base: base = options.base else: base = os.curdir print(q + relpath(filepath, base) + q) except OSError as e: sys.stderr.write('OSError: %s\n' % str(e))
def createHfccaCommandLineParser(): from optparse import OptionParser parser = OptionParser(version=VERSION) parser.add_option("-v", "--verbose", help="Output in verbose mode (long function name)", action="store_true", dest="verbose", default=False) parser.add_option("-C", "--CCN", help = "Threshold for cyclomatic complexity number warning. "+ "The default value is %d. Functions with CCN bigger than this number will generate warning" % DEFAULT_CCN_THRESHOLD, action="store", type="int", dest="CCN", default=DEFAULT_CCN_THRESHOLD) parser.add_option("-a", "--arguments", help="Limit for number of parameters", action="store", type="int", dest="arguments", default=100) parser.add_option("-w", "--warnings_only", help="Show warnings only, using clang/gcc's warning format for printing warnings. http://clang.llvm.org/docs/UsersManual.html#cmdoption-fdiagnostics-format", action="store_true", dest="warnings_only", default=False) parser.add_option("-i", "--ignore_warnings", help="If the number of warnings is equal or less than the number, the tool will exit normally, otherwize it will generate error. Useful in makefile when improving legacy code.", action="store", type="int", dest="number", default=0) parser.add_option("-x", "--exclude", help="Exclude files that match this pattern. * matches everything, ? matches any single characoter, \"./folder/*\" exclude everything in the folder, recursively. Multiple patterns can be specified. Don't forget to add \"\" around the pattern.", action="append", dest="exclude", default=[]) parser.add_option("-X", "--xml", help="Generate XML in cppncss style instead of the normal tabular output. Useful to generate report in Jenkins server", action="store_true", dest="xml", default=None) parser.add_option("-P", "--no_preprocessor_count", help="By default, a #if will also increase the complexity. Adding this option to ignore them", action="store_true", dest="no_preprocessor_count", default=False) parser.add_option("-t", "--working_threads", help="number of working threads. The default value is 1.", action="store", type="int", dest="working_threads", default=1) parser.usage = "hfcca [options] [PATH or FILE] [PATH] ... " parser.description = __doc__ return parser
def parse_options(): """ Parse command line options """ conf = FreevoConf() formatter = IndentedHelpFormatter(indent_increment=2, max_help_position=32, width=100, short_first=0) parser = OptionParser(conflict_handler='resolve', formatter=formatter, usage="""freevo %prog [options] For more information see: freevo %prog -- --help""", version='%prog ' + str(conf.version)) parser.prog = 'setup' parser.description = """Set up Freevo for your specific environment. Depending on the display and the tv standard the geometry may be automatically changed.""" parser.add_option('-v', '--verbose', action='count', default=0, help='set the level of verbosity [default:%default]') parser.add_option('--geometry', default=conf.geometry, metavar='GEOMETRY', help='set the screen geometry as "WIDTHxHEIGHT" [default:%default]') parser.add_option('--position', default=conf.position, metavar='POSITION', help='set the screen position as "X,Y" [default:%default]') parser.add_option('--display', choices=conf.displays, default=conf.display, metavar='DISPLAY', help='set the display [default:%default], choose from: "' + '", "'.join(conf.displays) + '"') parser.add_option('--tv', choices=conf.tv_norms, default=conf.tv, metavar='STANDARD', help='set the TV standard [default:%default], choose from: "' + '", "'.join(conf.tv_norms) + '"') parser.add_option('--chanlist', choices=conf.chanlists, default=conf.chanlist, metavar='LIST', help='set the channel list [default:%default], choose from: "' + '", "'.join(conf.chanlists) + '"') parser.add_option('--sysfirst', action='store_true', default=False, help='search for programs from system path first [default:%default]') parser.add_option('--compile', action='store', default=None, help='compile the modules [default:%default]') parser.add_option('--prefix', action='store', default='.', help='destination prefix the modules [default:%default]') opts, args = parser.parse_args() try: w, h = opts.geometry.split('x') w = int(w) h = int(h) except: parser.error('geometry %r is not "<width>x<height>"' % opts.geometry) try: x, y = opts.position.split(',') x = int(x) y = int(y) except: parser.error('position %r is not "<x>x<y>"' % opts.geometry) if opts.compile is not None: try: c = int(opts.compile) if c < 0 or c > 2: raise ValueError('value is < 0 or > 2') except ValueError, why: parser.error('compile %r: %s' % opts.geometry)
recovers.append(rec) for r in recovers: r.wait() ret &= r.succeeded() rs += r.stats rs.apply(stats) stats.timer('recover', 'finished') return ret if __name__ == '__main__': from elliptics_recovery.ctx import Ctx from optparse import OptionParser parser = OptionParser() parser.usage = "%prog [options]" parser.description = __doc__ parser.add_option("-i", "--merged-filename", dest="merged_filename", default='merged_result', metavar="FILE", help="Input file which contains information about keys " "in groups [default: %default]") parser.add_option("-d", "--debug", action="store_true", dest="debug", default=False, help="Enable debug output [default: %default]") parser.add_option("-D", "--dir",
# save split list train_list_txt = os.path.join(dir,'train_list.txt') val_list_txt = os.path.join(dir,'val_list.txt') with open(train_list_txt, 'w') as t_obj: t_obj.writelines(train_list) with open(val_list_txt, 'w') as v_obj: v_obj.writelines(val_list) if __name__ == "__main__": usage = "usage: %prog [options] image_list " parser = OptionParser(usage=usage, version="1.0 2018-11-17") parser.description = 'Introduction: split images to subset of train and test ' parser.add_option('-t','--train_per', action='store',dest='train_per',type='float',default=0.8, help="percentage of training data, a float value from 0 to 1") parser.add_option('-s','--shuffle', action='store_true',dest='Do_shuffle',default=False, help="shuffle before splitting") (options, args) = parser.parse_args() if len(sys.argv) < 2 or len(args) < 1: parser.print_help() sys.exit(2)
parser.add_option( "-d", "--db-unique-name", dest="dbuname", help= "Gebruik deze optie om UDM voor een specifieke standby database de UDM aan te zetten. Let op: de parameterwaarde moet db_unique_name zijn en niet instance of db naam. VB: D105T_2", default=False) parser.add_option( "-p", "--path", dest="pathagentxml", help= "Gebruik deze optie om de locatie van agent configuratie xml te bepalen. Default locatie is /u01/app/oracle/product/11.1.0/agent11g/sysman/emd/collection", default='/u01/app/oracle/product/11.1.0/agent11g/sysman/emd/collection' ) parser.description = '''Gebruik dit script om User defined metric (UDM_FRA_USAGE en UDM_DG_APPLY_LAG) op de standby database aan te zetten. VB: -- Pas alleen database D105T, D105T_2 is heeft standby role, uitvoeren op de standby server %prog -d D105T_2 -- Pas voor alle standby db targets aan, uitvoeren op de standby server %prog ''' (options, args) = parser.parse_args() configpath = options.pathagentxml if path.isdir(configpath): os.chdir(configpath) print 'Oracle agent database target config path: ' + os.getcwd() import shutil import glob
def main(): """ Main program. """ class MyIndentedHelpFormatter(IndentedHelpFormatter): """ Slightly modified formatter for help output: allow paragraphs """ def format_paragraphs(self, text): """ wrap text per paragraph """ result = "" for paragraph in text.split("\n"): result += self._format_text(paragraph) + "\n" return result def format_description(self, description): """ format description, honoring paragraphs """ if description: return self.format_paragraphs(description) + "\n" else: return "" def format_epilog(self, epilog): """ format epilog, honoring paragraphs """ if epilog: return "\n" + self.format_paragraphs(epilog) + "\n" else: return "" usage = 'Usage: %s [OPTIONS] fortune_path' % os.path.basename(sys.argv[0]) arg_parser = OptionParser(usage=usage, formatter=MyIndentedHelpFormatter()) arg_parser.description = "Print a random, hopefully interesting, adage " \ "(\"fortune\"), selected from a collection of fortune files found " \ "in fortune_path.\n\n" \ "%s " % str(os.path.basename(sys.argv[0])) \ + 'is an extended implementation of ' \ 'the classic BSD Unix fortune command. It combines the capabilities ' \ 'of the strfile command (which produces the fortune index file) and ' \ 'the fortune command (which displays a random fortune). It reads ' \ 'the traditional fortune program\'s text file format. ' \ "For more information about the fortune files, and the accompanying " \ "fortune index files, see below." arg_parser.add_option('-u', '--update', action='store_true', dest='update', help='Update the index files, instead of printing a ' 'fortune. You must run this before you will be ' 'able to print fortunes from the fortune files. ' 'This option serves the same purpose as the ' 'strfile utility for the traditional BSD ' 'fortune command. Note that the generated ' 'index files are not compatible with the format ' 'of the traditional index files. The generated ' 'index files have the %s extension.' % INDEX_EXT ) arg_parser.add_option('-q', '--quiet', action='store_true', dest='quiet', help="When updating the index file, don't emit " "messages.") arg_parser.add_option('-a', '--all', action='store_true', dest='use_all', help="Choose from all fortune files, including " "offensive ones. Don't complain if you are offended!") arg_parser.add_option('-o', '--offensive', action='store_true', dest='offensive', help="Choose only from offensive fortunes. " "Offensive fortunes are those stored in files " "with filenames ending in '-o'. Make absolutely " "sure that you want to be offended!") arg_parser.add_option('-e', '--equal', action='store_true', dest='equal_size', help="Consider all fortune files to be of equal " "size, making it equally likely for a " "fortune to be chosen from any fortune file") arg_parser.add_option('-f', '--fortunefiles', action='store_true', dest='list_fortunefiles', help="Print out the list of files which would be " "searched, but don't print a fortune. ") arg_parser.add_option('-l', '--long', action='store_true', dest='use_long', help="Show only long fortunes. See -n on how " "''long'' is defined in this sense.") arg_parser.add_option('-w', '--wait', action='store', type=int, dest='seconds_to_wait', help="Wait before termination for an amount of time " "calculated from the number of characters in " "the message. This is useful if it is executed " "as part of the logout procedure to guarantee " "that the message can be read before the screen " "is cleared.") arg_parser.add_option('-m', '--filter', action='store', dest='pattern', help="Print out all fortunes which match the " "regular expression pattern.\n" "The fortunes are printed to standard output, " "while the names of the file from which each " "fortune comes are printed to standard " "error. Either or both can be redirected; " "if standard output is redirected to a file, " "the result is a valid fortunes database " "file. If standard error is also redirected " "to this file, the result is still valid, " "but there will be ''bogus'' fortunes, i.e. " "the filenames themselves, in parentheses.\n" "You may combine this option with -o, -l, " "-s, -n, -i") arg_parser.add_option('-i', '--ignorecase', action='store_true', dest='ignorecase', help="Ignore case for -m patterns.") arg_parser.add_option('-s', '--short', action='store_true', dest='use_short', help="Show only short fortunes. See -n on how " "''short'' is defined in this sense.") arg_parser.add_option('-n', action='store', dest='max_shortlength', help="Set the longest fortune length (in " "characters) considered to be ''short'' " "(the default is %s)" % DEFAULT_LENGTH) arg_parser.epilog = 'If <fortune_path> is omitted, fortune looks at ' \ 'the FORTUNE_PATH environment variable for the paths. Different ' \ 'paths in FORTUNE_PATH are separated by \':\'.\n' \ 'An individual item inside the fortune_path can be a direct fortune' \ 'file, or a folder, in which case all fortune files inside the ' \ 'folder will be used. Any item may be preceded by a percentage, '\ 'which is a number N between 0 and 99 inclusive, followed by a %. ' \ 'If it is, there will be a N percent probability that a fortune ' \ 'will be picked from that file or directory. For items for which ' \ 'there is a percentage, the probability of a fortune being selected ' \ 'from any one of them is based on the relative number of fortunes ' \ 'it contains.\n\n' \ 'The format of each fortune file is simple: All the fortunes appear ' \ 'in clear text, separated by a single line containing only a ' \ '\'%\'. For example, the following is a fortune file containing two ' \ 'fortunes:\n\n' \ ' 186,282 miles per second:\n\n' \ ' It isn\'t just a good idea, it\'s the law!\n' \ ' %\n' \ ' A bird in the hand makes it awfully hard to blow your nose.\n\n' \ 'Before a fortune file can be used, you must generate an index ' \ 'file for it. This is a binary file that is used to select ' \ 'fortunes with more speed and efficiency.\n\n' \ 'For more background information about the fortune utility ' \ 'look at http://en.wikipedia.org/wiki/Fortune_(Unix)' options, args = arg_parser.parse_args(sys.argv) if len(args) >= 2: fortunepaths = args[1:] else: try: fortunepaths = os.environ['FORTUNE_PATH'].split(':') except KeyError: print ("Missing fortune files", file=sys.stderr) print ("Try %s --help" % os.path.basename(sys.argv[0]), file=sys.stderr) sys.exit(1) if options.use_all: offensive = None elif options.offensive: offensive = True else: offensive = False if options.use_short: minlength = 0 maxlength = DEFAULT_LENGTH if not options.max_shortlength is None: maxlength = int(options.max_shortlength) elif options.use_long: minlength = DEFAULT_LENGTH if not options.max_shortlength is None: minlength = int(options.max_shortlength) maxlength = None else: minlength = 0 maxlength = None try: # Update Mode if options.update: make_fortune_data_file(fortunepaths) # Listing Fortune Files Mode elif options.list_fortunefiles: percentages, fortune_files = fortune_files_from_paths(fortunepaths, offensive) for filename in fortune_files: print (filename) # Filtering Mode elif not options.pattern is None: filter_fortunes(fortunepaths, options.pattern, ignorecase=options.ignorecase, offensive=offensive) # Printing Fortunes Mode else: sys.stdout.write(get_random_fortune( fortunepaths, offensive=offensive, weighted=(not options.equal_size), min_length=minlength, max_length=maxlength) ) except ValueError as msg: print(msg, file=sys.stderr) sys.exit(1) if not options.seconds_to_wait is None: sleep(options.seconds_to_wait) sys.exit(0)
default=False, dest="registers") parser.add_option("-t", "--type", help="invoke the typer", action="store_true", default=False, dest="type") parser.add_option("-v", "--verbose", help="be verbose", action="store_true", default=False, dest="verbose") parser.usage = """%prog [options] [file]""" parser.description = "Compile a Tiger program (or standard input)" (options, args) = parser.parse_args() options.liveness |= options.registers options.gen |= options.liveness options.canon |= options.gen if options.irvm and options.gen: print("Error: IRVM cannot be selected for code generation", file=sys.stderr) sys.exit(1) options.irvm &= not options.gen options.ir |= options.canon | options.irvm options.type |= options.ir if len(args) > 1 or (options.expression and len(args) > 0): parser.print_help(file=sys.stderr)
def parse_args(): from optparse import OptionParser ctx = Ctx() parser = OptionParser() parser.usage = "%prog type [options]" parser.description = __doc__ parser.add_option("-g", "--groups", action="store", dest="groups", default=None, help="Comma separated list of groups [default: all]") parser.add_option("-l", "--log", dest="log", default='/dev/stderr', metavar="FILE", help="Output log messages from library to file [default: %default]") parser.add_option("-L", "--log-level", action="store", dest="log_level", default="1", help="Elliptics client verbosity [default: %default]") parser.add_option("-r", "--remote", action="append", dest="remote", help="Elliptics node address [default: %default]") parser.add_option("-d", "--data", action="store_true", dest="data", default=False, help="Requests object's data with other info [default: %default]") parser.add_option("-k", "--key-begin", action="store", dest="key_begin", default="0", help="Begin key of range for iterating") parser.add_option("-K", "--key-end", action="store", dest="key_end", default="-1", help="End key of range for iterating") parser.add_option("-t", "--time-begin", action="store", dest="time_begin", default=None, help="Begin timestamp of time range for iterating") parser.add_option("-T", "--time-end", action="store", dest="time_end", default=None, help="End timestamp of time range for iterating") (options, args) = parser.parse_args() if len(args) > 1: raise ValueError("Too many arguments passed: {0}, expected: 1" .format(len(args))) elif len(args) == 0: raise ValueError("Please specify one of following modes: {0}" .format(ALLOWED_MODES)) if args[0].lower() not in ALLOWED_MODES: raise ValueError("Unknown mode: '{0}', allowed: {1}" .format(args[0], ALLOWED_MODES)) ctx.iterate_mode = args[0].lower() try: if options.groups: ctx.groups = map(int, options.groups.split(',')) else: ctx.groups = [] except Exception as e: raise ValueError("Can't parse grouplist: '{0}': {1}".format( options.groups, repr(e))) print("Using group list: {0}".format(ctx.groups)) try: ctx.log_file = options.log ctx.log_level = int(options.log_level) except Exception as e: raise ValueError("Can't parse log_level: '{0}': {1}" .format(options.log_level, repr(e))) print("Using elliptics client log level: {0}".format(ctx.log_level)) if not options.remote: raise ValueError("Please specify at least one remote address (-r option)") try: ctx.remotes = [] for r in options.remote: ctx.remotes.append(elliptics.Address.from_host_port_family(r)) print("Using remote host:port:family: {0}".format(ctx.remotes[-1])) except Exception as e: raise ValueError("Can't parse host:port:family: '{0}': {1}" .format(options.remote, repr(e))) try: if options.time_begin: ctx.time_begin = Time.from_epoch(options.time_begin) else: ctx.time_begin = None except Exception: raise ValueError("Can't parse timestamp: '{0}': {1}" .format(options.timestamp, repr(e))) print("Using time_begin: {0}".format(ctx.time_begin)) try: if options.time_end: ctx.time_end = Time.from_epoch(options.time_end) else: ctx.time_end = None except Exception: raise ValueError("Can't parse timestamp: '{0}': {1}" .format(options.timestamp, repr(e))) print("Using time_end: {0}".format(ctx.time_end)) ctx.data = options.data key_range = elliptics.IteratorRange() try: if options.key_begin == '-1': key_range.key_begin = elliptics.Id([255] * 64, 0) elif options.key_begin: key_range.key_begin = elliptics.Id(transf(options.key_begin), 0) else: key_range.key_begin = elliptics.Id([0] * 64, 0) except Exception: raise ValueError("Can't parse key_begin: '{0}': {1}" .format(options.key_begin, repr(e))) try: if options.key_end == '-1': key_range.key_end = elliptics.Id([255] * 64, 0) elif options.key_end: key_range.key_end = elliptics.Id(transf(options.key_end), 0) else: key_range.key_end = elliptics.Id([255] * 64, 0) except Exception: raise ValueError("Can't parse key_end: '{0}': {1}" .format(options.key_end, repr(e))) ctx.ranges = [key_range] return ctx
crop_dem_list = crop_to_same_exent_for_diff( dem_list, save_dir, idx, ext_poly, process_num) dem_list = crop_dem_list dem_diff_newest_oldest(dem_list, save_dem_diff, save_date_diff, process_num, b_max_subsidence=options.max_subsidence) if __name__ == '__main__': usage = "usage: %prog [options] dem_tif_dir or dem_list_txt " parser = OptionParser(usage=usage, version="1.0 2020-12-26") parser.description = 'Introduction: difference for multi-temporal DEM ' parser.add_option("-d", "--save_dir", action="store", dest="save_dir", default='./', help="the folder to save pre-processed results") parser.add_option("", "--process_num", action="store", dest="process_num", type=int, default=4, help="number of processes to create the mosaic")
def main(options, args): polygons_shp = args[0] output = options.output if output is None: output = io_function.get_name_by_adding_tail(polygons_shp, 'removed') para_file = options.para_file remove_polygons_main(polygons_shp, output, para_file) if __name__ == "__main__": usage = "usage: %prog [options] shp_file" parser = OptionParser(usage=usage, version="1.0 2019-1-4") parser.description = 'Introduction: remove polygons based on an attributes values' parser.add_option( "-o", "--output", action="store", dest="output", #default='save_polygon.shp', help="save file path") parser.add_option("-p", "--para_file", action="store", dest="para_file", help="the parameters file") (options, args) = parser.parse_args()
#!/usr/bin/env python # Create a SCRIP file from an MPAS mesh. # See for details: http://www.earthsystemmodeling.org/esmf_releases/public/ESMF_5_2_0rp1/ESMF_refdoc/node3.html#SECTION03024000000000000000 import sys import netCDF4 import numpy as np from optparse import OptionParser print( "== Gathering information. (Invoke with --help for more details. All arguments are optional)" ) parser = OptionParser() parser.description = "This script takes an MPAS grid file and generates a SCRIP grid file." parser.add_option("-m", "--mpas", dest="mpasFile", help="MPAS grid file name used as input.", default="grid.nc", metavar="FILENAME") parser.add_option("-s", "--scrip", dest="scripFile", help="SCRIP grid file to output.", default="scrip.nc", metavar="FILENAME") parser.add_option( "-l", "--landice", dest="landiceMasks",
def parse_arguments(args): """ Create and parse arguments. """ parser = OptionParser(option_class=PercentageOption) parser.add_option("--version", help="Print the version number, then exit", action="store_true", dest="version", default=False) parser.add_option("-v", "--verbose", help="Print progress messages", action="store_true", dest="verbose", default=False) parser.add_option( '--object-directory', help="Specify the directory that contains the gcov data files. gcovr " "must be able to identify the path between the *.gcda files and the " "directory where gcc was originally run. Normally, gcovr can guess " "correctly. This option overrides gcovr's normal path detection and " "can specify either the path from gcc to the gcda file (i.e. what " "was passed to gcc's '-o' option), or the path from the gcda file to " "gcc's original working directory.", action="store", dest="objdir", default=None) parser.add_option("-o", "--output", help="Print output to this filename", action="store", dest="output", default=None) parser.add_option( "-k", "--keep", help="Keep the temporary *.gcov files generated by gcov. " "By default, these are deleted.", action="store_true", dest="keep", default=False) parser.add_option( "-d", "--delete", help="Delete the coverage files after they are processed. " "These are generated by the users's program, and by default gcovr " "does not remove these files.", action="store_true", dest="delete", default=False) parser.add_option( "-f", "--filter", help="Keep only the data files that match this regular expression", action="append", dest="filter", default=[]) parser.add_option( "-e", "--exclude", help="Exclude data files that match this regular expression", action="append", dest="exclude", default=[]) parser.add_option( "--gcov-filter", help="Keep only gcov data files that match this regular expression", action="store", dest="gcov_filter", default=None) parser.add_option( "--gcov-exclude", help="Exclude gcov data files that match this regular expression", action="append", dest="gcov_exclude", default=[]) parser.add_option( "-r", "--root", help="Defines the root directory for source files. " "This is also used to filter the files, and to standardize " "the output.", action="store", dest="root", default='.') parser.add_option( "-x", "--xml", help="Generate XML instead of the normal tabular output.", action="store_true", dest="xml", default=False) parser.add_option( "--xml-pretty", help="Generate pretty XML instead of the normal dense format.", action="store_true", dest="prettyxml", default=False) parser.add_option( "--html", help="Generate HTML instead of the normal tabular output.", action="store_true", dest="html", default=False) parser.add_option("--html-details", help="Generate HTML output for source file coverage.", action="store_true", dest="html_details", default=False) parser.add_option( "--html-absolute-paths", help="Set the paths in the HTML report to be absolute instead " "of relative", action="store_false", dest="relative_anchors", default=True) parser.add_option('--html-encoding', help='HTML file encoding (default: UTF-8).', action='store', dest='html_encoding', default='UTF-8') parser.add_option( "-b", "--branches", help="Tabulate the branch coverage instead of the line coverage.", action="store_true", dest="show_branch", default=None) parser.add_option( "-u", "--sort-uncovered", help="Sort entries by increasing number of uncovered lines.", action="store_true", dest="sort_uncovered", default=None) parser.add_option( "-p", "--sort-percentage", help="Sort entries by decreasing percentage of covered lines.", action="store_true", dest="sort_percent", default=None) parser.add_option( "--gcov-executable", help="Defines the name/path to the gcov executable [defaults to the " "GCOV environment variable, if present; else 'gcov'].", action="store", dest="gcov_cmd", default=os.environ.get('GCOV', 'gcov')) parser.add_option( "--exclude-unreachable-branches", help="Exclude from coverage branches which are marked to be excluded " "by LCOV/GCOV markers or are determined to be from lines " "containing only compiler-generated \"dead\" code.", action="store_true", dest="exclude_unreachable_branches", default=False) parser.add_option( "--exclude-directories", help= "Exclude directories from search path that match this regular expression", action="append", dest="exclude_dirs", default=[]) parser.add_option("-g", "--use-gcov-files", help="Use preprocessed gcov files for analysis.", action="store_true", dest="gcov_files", default=False) parser.add_option( "-s", "--print-summary", help="Prints a small report to stdout with line & branch " "percentage coverage", action="store_true", dest="print_summary", default=False) parser.add_option( "--fail-under-line", type="percentage", metavar="MIN", help="Exit with a status of 2 if the total line coverage is less " "than MIN. " "Can be ORed with exit status of '--fail-under-branch' option", action="store", dest="fail_under_line", default=0.0) parser.add_option( "--fail-under-branch", type="percentage", metavar="MIN", help="Exit with a status of 4 if the total branch coverage is less " "than MIN. " "Can be ORed with exit status of '--fail-under-line' option", action="store", dest="fail_under_branch", default=0.0) parser.usage = "gcovr [options]" parser.description = \ "A utility to run gcov and generate a simple report that summarizes " \ "the coverage" return parser.parse_args(args=args)
print('unknow task name: %s' % task_name) pass time.sleep(10) # wait all local task finished while basic.b_all_process_finish(local_tasks) is False: print(datetime.now(), 'wait 5 minutes to let all local tasks to complete') time.sleep(60 * 5) if __name__ == '__main__': usage = "usage: %prog [options] task_name (dem_diff, segment, dem_headwall) " parser = OptionParser(usage=usage, version="1.0 2021-4-25") parser.description = 'Introduction: parallel processing DEM on CURC ' parser.add_option("-j", "--max_job_count", action="store", dest="max_job_count", type=int, default=50, help="number of jobs to submit at the same time") parser.add_option("-n", "--n_tif_per_job", action="store", dest="n_tif_per_job", type=int, default=10,
# # This program is dual licensed under the terms of the MIT license # or the GNU General Public License (GPL) version 3. # A copy of both licenses is provided in the doc/ folder of the # official release of Sozi. # # See http://sozi.baierouge.fr/wiki/en:license for details. from optparse import OptionParser from lxml import etree import subprocess, shutil, sys if __name__ == '__main__': option_parser = OptionParser() option_parser.description = "Convert all texts to paths" option_parser.usage = "texts2paths.py [options] input_file.svg" option_parser.add_option("-o", "--output", type="string", dest="output", help="The target SVG file name") options, args = option_parser.parse_args() if len(args) == 0: option_parser.print_usage(sys.stderr) sys.exit() # Set input and output file name
from optparse import OptionParser parser = OptionParser() parser.usage = \ """%prog MX DURATION where MX is number of grid points, DURATION is time in years for run, Example: Try this diagnostic-only run: $ export MX=101 YY=0 $ ./exactQ.py $MX $YY $ pismr -o outQ$MX.nc -y $YY -i initQ$MX.nc -bootstrap -Mx $MX -My $MX -Mz 21 -Lz 1500 -z_spacing equal -surface given -stress_balance ssa -energy none -yield_stress constant -tauc 1e6 -ssa_dirichlet_bc -cfbc -part_grid -o_order zyx -ssa_e 1.0 -ssa_flow_law isothermal_glen """ parser.description = "A script which runs Test Q." (options, args) = parser.parse_args() if (len(args) < 2) | (len(args) > 2): print("ERROR; exactQ.py needs two arguments; run with --help to see usage") print("... EXITING") exit(-1) SperA = 31556926.0 Mx = int(args[0]) My = Mx runtime = float(args[1]) * SperA ncfile = "initQ%d.nc" % Mx # basic parameters
import pylab as plt from optparse import OptionParser __author__ = "Andy Aschwanden" # If no threshold is given, set it to 1e1 THRESHOLD = 1e1 # This is the default variable to calculate norm from X = 'enthalpybase' # Default norm is the euclidian norm, 2-norm PNORM = float(2) # Set up the option parser parser = OptionParser() parser.usage = "usage: %prog [options] FILE" parser.description = '''Check stationarity of a variable in FILE by calculating the rate of change of its p-norm. That is d/dt || X ||_{p} = (\sum_{i}^{m} (E_{i}^{n+1}-E_{i}^{n})^p)^(1/p)/(t^{n+1}-t^{n+1}), where E_{i}^{n} is the value at time n and coordinate i.''' parser.add_option("-p", "--pnorm", dest="pnorm", type='float', help="use P norm (default p = 2)", metavar="P", default=PNORM) parser.add_option("-s", "--stride", dest="stride", type="int", help="stride, plot only every stride value", default=1) parser.add_option("-t", "--threshold", dest="threshold", help="draws a line horizontal line at THRESHOLD", metavar="THRESHOLD", default=THRESHOLD) parser.add_option("-v", "--variable", dest="varname", type='string', help="calculate from from variable X (default=enthalpybase)", metavar="VAR", default=X)
snow_series = one_point_snowcover_series(img_col, img_row, xy_srs, mod_snow_file, myd_snow_file) snow_series_wholeArea.append(snow_series) save_yearly_days(mod_snow_file, snow_series_wholeArea, img_width, img_height) save_year_monthly_days(mod_snow_file, snow_series_wholeArea, img_width, img_height) if __name__ == "__main__": usage = "usage: %prog [options] msi_file1 msi_file2 ..." parser = OptionParser(usage=usage, version="1.0 2019-4-14") parser.description = 'Introduction: plot the time series of landsat data' parser.add_option("-o", "--output", action="store", dest="output", help="the output file path") # parser.add_option("-p", "--para", # action="store", dest="para_file", # help="the parameters file") (options, args) = parser.parse_args() if len(sys.argv) < 2: parser.print_help() sys.exit(2)
shp_path, burn_value, attribute_name, save_path, ignore_edge=b_burn_edge) else: xres = options.pixel_size_x yres = options.pixel_size_y rasterize_polygons(shp_path, burn_value, attribute_name, xres, yres, save_path) if __name__ == "__main__": usage = "usage: %prog [options] polygons_path" parser = OptionParser(usage=usage, version="1.0 2021-7-2") parser.description = 'Introduction: rasterize the polygons in the entire scene, without gdal_rasterize. ' \ 'The image and shape file should have the same projection.' parser.add_option( "-r", "--reference_raster", action="store", dest="reference_raster", help="a raster file as reference, should have have the same projection" ) parser.add_option("-o", "--out_dir", action="store", dest="out_dir", default='./', help="the folder path for saving output files") parser.add_option("-e",
% duration) pass def main(options, args): print(" YOLO Post-processing ") para_file = args[0] # the test string in 'exe.sh' if len(args) > 1: test_note = args[1] else: test_note = '' yolo_postProcess(para_file, test_note) if __name__ == '__main__': usage = "usage: %prog [options] para_file test_note" parser = OptionParser(usage=usage, version="1.0 2021-04-08") parser.description = 'Introduction: Post-processing of YOLO prediction results ' (options, args) = parser.parse_args() if len(sys.argv) < 2: parser.print_help() sys.exit(2) main(options, args)
def main(): print("== Gathering information. (Invoke with --help for more details. " "All arguments are optional)") parser = OptionParser() parser.description = \ "This script translates the coordinate system of the planar MPAS " \ "mesh specified with the -f flag. \n" \ "There are 3 possible methods to choose from:\n" \ "1) shift the origin to the center of the domain\n" \ "2) arbirary shift in x and/or y\n" \ "3) shift to the center of the domain described in a separate file\n" parser.add_option("-f", "--file", dest="fileInName", help="MPAS planar grid file name.", default="grid.nc", metavar="FILENAME") parser.add_option("-d", "--datafile", dest="dataFileName", help="data file name to which to match the domain " "center of. Uses xCell,yCell or, if those fields " "do not exist, will secondly try x1,y1 fields.", metavar="FILENAME") parser.add_option("-x", dest="xshift", help="user-specified shift in the x-direction.", type="float", default=0.0, metavar="SHIFT_VALUE") parser.add_option("-y", dest="yshift", help="user-specified shift in the y-direction.", type="float", default=0.0, metavar="SHIFT_VALUE") parser.add_option("-c", dest="center", help="shift so origin is at center of domain", action="store_true", default=False) for option in parser.option_list: if option.default != ("NO", "DEFAULT"): option.help += (" " if option.help else "") + "[default: %default]" options, args = parser.parse_args() print("Attempting to translate coordinates in file: {}".format( options.fileInName)) if options.dataFileName is not None and \ (options.xshift != 0. or options.yshift != 0.): raise ValueError('Specifying a datafile AND one or both of x/y shift ' 'is invalid. Please select one of those methods ' 'only.') if options.center and (options.xshift != 0. or options.yshift != 0.): raise ValueError('Specifying a shift to center AND one or both of x/y ' 'shift is invalid. Please select one of those ' 'methods only.') if options.dataFileName is not None and options.center: raise ValueError('Specifying a datafile AND a shift to center is ' 'invalid. Please select one of those methods only.') if not options.center and (options.xshift == 0.) and \ (options.yshift == 0.) and options.dataFileName is None: raise ValueError('No translation method was specified. Please select ' 'one. Run with -h for more information.') mesh = xarray.open_dataset(options.fileInName) if options.dataFileName is not None: print(" Translating coordinates in {} so the domain center matches " "the domain center in {}.\n\n".format(options.fileInName, options.dataFileName)) otherMesh = xarray.open_dataset(options.dataFileName) center_on_mesh(mesh, otherMesh) if options.xshift != 0. or options.yshift != 0.: print(" Translating coordinates in {} by user-specified values. " "X-shift={:f}; Y-shift={:f}\n\n".format(options.fileInName, options.xshift, options.yshift)) translate(mesh, options.xshift, options.yshift) if options.center: print(" Translating coordinates in %s so the origin is the center of " "the domain.\n\n") center(mesh) # close the file so we can re-open it for writing mesh.close() write_netcdf(mesh, options.fileInName) print("Translation completed.")
def createHfccaCommandLineParser(): from optparse import OptionParser parser = OptionParser() parser.add_option("-v", "--verbose", help="Output in verbose mode (long function name)", action="store_true", dest="verbose", default=False) parser.add_option( "-C", "--CCN", help= "Threshold for cyclomatic complexity number warning. _functions with CCN bigger than this number will be shown in warning", action="store", type="int", dest="CCN", default=DEFAULT_CCN_THRESHOLD) parser.add_option("-w", "--warnings_only", help="Show warnings only", action="store_true", dest="warnings_only", default=False) parser.add_option( "-i", "--ignore_warnings", help= "If the number of warnings is equal or less than the number, the tool will exit normally, otherwize it will generate error. Useful in makefile when improving legacy code.", action="store", type="int", dest="number", default=0) parser.add_option( "-x", "--exclude", help= "Exclude data files that match this regular expression. Multiple regular expressions can be specified.", action="append", dest="exclude", default=[]) parser.add_option( "-X", "--xml", help= "Generate XML in cppncss style instead of the normal tabular output. Useful to generate report in Hudson server", action="store_true", dest="xml", default=None) parser.add_option( "-p", "--preprocess", help= "Use preprocessor, always ignore the #else branch. By default, hfcca just ignore any preprocessor statement.", action="store_true", dest="use_preprocessor", default=False) parser.add_option("-a", "--arguments", help="Limit for number of parameters", action="store", type="int", dest="arguments", default=100) parser.add_option( "-P", "--no_preprocessor_count", help= "By default, a #if will also increase the complexity. Adding this option to ignore them", action="store_true", dest="no_preprocessor_count", default=False) parser.add_option( "-t", "--working_threads", help="number of working threads. The default value is 1.", action="store", type="int", dest="working_threads", default=1) parser.usage = "hfcca.py [options] [PATH or FILE] [PATH] ... " parser.description = __doc__ return parser
TranskribusClient.__init__(self, sServerUrl=self.sDefaultServerUrl, proxies=sHttpProxy, loggingLevel=loggingLevel) def run(self, dictName, dictString): ret = self.uploadDict(dictName, dictString) return ret if __name__ == '__main__': version = "v.01" #prepare for the parsing of the command line parser = OptionParser(usage=usage, version=version) parser.description = description #"-s", "--server", "-l", "--login" , "-p", "--pwd", "--https_proxy" OPTIONS __Trnskrbs_basic_options(parser, DoHtrRnn.sDefaultServerUrl) parser.add_option("-d", "--dict", dest='ldict', action="append", type="string", help="list of dictionaries") # --- #parse the command line (options, args) = parser.parse_args() proxies = {} if not options.https_proxy else {
# Copyright (C) 2010-2013 Guillaume Savaton # # This program is dual licensed under the terms of the MIT license # or the GNU General Public License (GPL) version 3. # A copy of both licenses is provided in the doc/ folder of the # official release of Sozi. # # See http://sozi.baierouge.fr/wiki/en:license for details. import sys, os, tempfile, shutil, subprocess from optparse import OptionParser if __name__ == '__main__': option_parser = OptionParser() option_parser.description = "Export a Sozi presentation to a video" option_parser.usage = "sozi2video.py [options] url.svg" option_parser.add_option("-W", "--width", type="int", dest="width_px", default=1024, help="Page width, in pixels (default is 1024)") option_parser.add_option("-H", "--height", type="int", dest="height_px", default=768, help="Page height, in pixels (default is 768)")
def init_host_test_cli_params(): """! Function creates CLI parser object and returns populated options object. @return Function returns 'options' object returned from OptionParser class @details Options object later can be used to populate host test selector script. """ parser = OptionParser() parser.add_option("-m", "--micro", dest="micro", help="Target microcontroller name", metavar="MICRO") parser.add_option("-p", "--port", dest="port", help="Serial port of the target", metavar="PORT") parser.add_option("-d", "--disk", dest="disk", help="Target disk (mount point) path", metavar="DISK_PATH") parser.add_option("-t", "--target-id", dest="target_id", help="Unique Target Id or mbed platform", metavar="TARGET_ID") parser.add_option( "", "--sync", dest="sync_behavior", default=2, type=int, help= "Define how many times __sync packet will be sent to device: 0: none; -1: forever; 1,2,3... - number of times (Default 2 time)", metavar="SYNC_BEHAVIOR") parser.add_option("-f", "--image-path", dest="image_path", help="Path with target's binary image", metavar="IMAGE_PATH") copy_methods_str = "Plugin support: " + ', '.join( host_tests_plugins.get_plugin_caps('CopyMethod')) parser.add_option("-c", "--copy", dest="copy_method", help="Copy (flash the target) method selector. " + copy_methods_str, metavar="COPY_METHOD") reset_methods_str = "Plugin support: " + ', '.join( host_tests_plugins.get_plugin_caps('ResetMethod')) parser.add_option("-r", "--reset", dest="forced_reset_type", help="Forces different type of reset. " + reset_methods_str) parser.add_option( "-C", "--program_cycle_s", dest="program_cycle_s", help= "Program cycle sleep. Define how many seconds you want wait after copying binary onto target", type="float", metavar="PROGRAM_CYCLE_S") parser.add_option( "-R", "--reset-timeout", dest="forced_reset_timeout", default=1, metavar="NUMBER", type="float", help= "When forcing a reset using option -r you can set up after reset idle delay in seconds (Default is 1 second)" ) parser.add_option( "--process-start-timeout", dest="process_start_timeout", default=60, metavar="NUMBER", type="float", help= "This sets the maximum time in seconds to wait for an internal process to start. This mostly only affects machines under heavy load (Default is 60 seconds)" ) parser.add_option("-e", "--enum-host-tests", dest="enum_host_tests", help="Define directory with local host tests") parser.add_option( '', '--test-cfg', dest='json_test_configuration', help='Pass to host test class data about host test configuration') parser.add_option('', '--list', dest='list_reg_hts', default=False, action="store_true", help='Prints registered host test and exits') parser.add_option('', '--plugins', dest='list_plugins', default=False, action="store_true", help='Prints registered plugins and exits') parser.add_option( '-g', '--grm', dest='global_resource_mgr', help= '[Experimental] Global resource manager service module name, IP and port, example remote_client:10.2.123.43:3334' ) parser.add_option( '', '--run', dest='run_binary', default=False, action="store_true", help= 'Runs binary image on target (workflow: flash, reset, output console)') parser.add_option( '', '--skip-flashing', dest='skip_flashing', default=False, action="store_true", help= 'Skips use of copy/flash plugin. Note: target will not be reflashed') parser.add_option( '', '--skip-reset', dest='skip_reset', default=False, action="store_true", help='Skips use of reset plugin. Note: target will not be reset') parser.add_option( '-P', '--polling-timeout', dest='polling_timeout', default=60, metavar="NUMBER", type="int", help= 'Timeout in sec for readiness of mount point and serial port of local or remote device. Default 60 sec' ) parser.add_option( '-b', '--send-break', dest='send_break_cmd', default=False, action="store_true", help= 'Send reset signal to board on specified port (-p PORT) and print serial output. You can combine this with (-r RESET_TYPE) switch' ) parser.add_option( '', '--baud-rate', dest='baud_rate', help= "Baud rate of target, overrides values from mbed-ls, disk/mount point (-d, --disk-path), and serial port -p <port>:<baud rate>", metavar="BAUD_RATE") parser.add_option('-v', '--verbose', dest='verbose', default=False, action="store_true", help='More verbose mode') parser.add_option('', '--serial-output-file', dest='serial_output_file', default=None, help='Save target serial output to this file.') parser.add_option( '', '--compare-log', dest='compare_log', default=None, help='Log file to compare with the serial output from target.') parser.add_option('', '--version', dest='version', default=False, action="store_true", help='Prints package version and exits') parser.description = """Flash, reset and perform host supervised tests on mbed platforms""" parser.epilog = """Example: mbedhtrun -d E: -p COM5 -f "test.bin" -C 4 -c shell -m K64F""" (options, _) = parser.parse_args() return options
acc_table_pd = pd.DataFrame(acc_table) acc_table_IOU_version_pd = pd.DataFrame(acc_table_IOU_version) with pd.ExcelWriter(output_file) as writer: acc_table_pd.to_excel(writer, sheet_name='accuracy table') acc_table_IOU_version_pd.to_excel( writer, sheet_name='accuracy table IOU version') pass if __name__ == "__main__": usage = "usage: %prog [options] eva_report eva_report ... " parser = OptionParser(usage=usage, version="1.0 2020-6-28") parser.description = 'Introduction: convert the evaluation reports to tables' parser.add_option("-o", "--output", action="store", dest="output", default="accuracy_table.csv", help="the output file path") (options, args) = parser.parse_args() if len(sys.argv) < 2: parser.print_help() sys.exit(2) main(options, args)
if options.output is not None: output = options.output else: output = io_function.get_name_by_adding_tail(new_image,'coreg') bkeepmidfile = True xml_path=os.path.splitext(output)[0]+'.xml' coreg_xml = OffsetMetaDataClass(xml_path) RSImageProcess.coregistration_siftGPU(ref_image,new_image,bkeepmidfile,coreg_xml) if __name__ == "__main__": usage = "usage: %prog [options] ref_image new_image" parser = OptionParser(usage=usage, version="1.0 2019-1-29") parser.description = 'Introduction: co-registration of two images' parser.add_option("-o", "--output", action="store", dest="output", help="the output file path") parser.add_option("-p", "--para", action="store", dest="para_file", help="the parameters file") (options, args) = parser.parse_args() if len(sys.argv) < 2: parser.print_help() sys.exit(2) ## set parameters files
def train_input_parse(cls): parser = OptionParser() parser.description = "This program train model" parser.add_option("-t", "--task", dest="task_name", type="string", default=None, help="task name") parser.add_option("-i", "--trainPath", dest="trainPath", metavar="PATH", type="string", default="./train.txt", help="path to data config file") parser.add_option("-v", "--valPath", dest="valPath", metavar="PATH", type="string", default=None, help="path to data config file") parser.add_option("-m", "--model", dest="model", metavar="PATH", type="string", default="cfg/cifar100.cfg", help="cfg file path or model name") parser.add_option("-p", "--pretrainModel", dest="pretrainModel", metavar="PATH", type="string", default=None, help="path to store weights") parser.add_option("-c", "--config", dest="config_path", metavar="PATH", type="string", default=None, help="config path") (options, args) = parser.parse_args() if options.trainPath: if not os.path.exists(options.trainPath): parser.error("Could not find the input train file") else: options.input_path = os.path.normpath(options.trainPath) else: parser.error("'trainPath' option is required to run this program") return options
out_mask = options.output_mask valid_mask = get_valid_pixel_mask(img_path,img_nodata,out_mask=out_mask) # to shapefile raster_io.raster2shapefile(valid_mask,out_shp=output_shp,nodata=0) # the nodata for valid_mask is 0 if valid_mask=='tmp.tif': io_function.delete_file_or_dir(valid_mask) if __name__ == "__main__": usage = "usage: %prog [options] raster_path " parser = OptionParser(usage=usage, version="1.0 2021-11-04") parser.description = 'Introduction: get valid region of a raster' parser.add_option("-n", "--nodata", action="store", dest="nodata", help="the nodata value of raster") parser.add_option("-o", "--output_shp", action="store", dest="output_shp", help="the output shapefile") parser.add_option("-m", "--output_mask", action="store", dest="output_mask", help="the output raster mask of valid regions, sometime, shapefile may have multiple polygons, " "it will be convenient to use the raster mask ")
def main(options, args): input = args[0] output = args[1] data_para_file = options.data_para if data_para_file is None: data_para_file = options.para_file add_polygon_attributes(input, output, options.para_file, data_para_file) if __name__ == '__main__': usage = "usage: %prog [options] input_path output_file" parser = OptionParser(usage=usage, version="1.0 2017-7-24") parser.description = 'Introduction: Post process of Polygon shape file, including ' \ 'statistic polygon information,' parser.add_option("-p", "--para", action="store", dest="para_file", help="the parameters file") parser.add_option("-d", "--data_para", action="store", dest="data_para", help="the parameters file for data") parser.add_option("-a", "--min_area", action="store",
write = stderr.write from netCDF4 import Dataset as CDF from optparse import OptionParser ice_density = 910.0 __author__ = "Andy Aschwanden" # Create PISM-readable input file from Storglaciaren DEM parser = OptionParser() parser.usage = "usage: %prog [options]" parser.description = "Preprocess Storglaciaren files." (options, args) = parser.parse_args() # Create PISM-readable input file from Storglaciaren DEM write('------------------------------\n') write('PISM-Storglaciaren example\n') write('------------------------------\n') # data dir data_dir = './' # X is Northing (http://en.wikipedia.org/wiki/Swedish_grid) XFile = data_dir + 'X.txt.gz' # Y is Easting (http://en.wikipedia.org/wiki/Swedish_grid) YFile = data_dir + 'Y.txt.gz'
#!/usr/bin/env python3 from PISMNC import PISMDataset as NC from optparse import OptionParser import numpy as np import subprocess import shlex import sys parser = OptionParser() parser.usage = "%prog [options]" parser.description = "Test the SSAFD solver using various geometric configurations." parser.add_option("-o", dest="output_file_prefix", default="ssafd_test", help="output file prefix") parser.add_option("-L", dest="L", type=float, default=10.0, help="horizontal domain dimensions, km") parser.add_option("-H", dest="H", type=float, default=500.0, help="ice thickness in icy areas") (options, args) = parser.parse_args() M = 3
def main(): # option argument parser parser = OptionParser() parser.description = "Simple command line program for RSA encryption" # Add command line options parser.add_option( "-g", "--generate-keys", help= "Generate public and private keys optionally specify the size of p and q with --pqsize", action='store_true', dest='generate', default=False) parser.add_option("-p", "--pqsize", help="Specify the size of p and q", dest='pqsize', type='int') parser.add_option("-e", "--encrypt", dest='message_file', default=None, help="encrypt message in file") parser.add_option("-d", "--decrypt", dest='cipher_file', default=None, help="decrypt message") parser.add_option("-f", "--loadkey/s", dest='filenames', help="Load key files") # parser args (options, args) = parser.parse_args() # rsa class rsa = RSA() # Generate keys; parse -g option and arguments if (options.generate == True): pqsize = 380 if (options.pqsize != None): pqsize = options.pqsize print("\nGenerating keys") print("----------------------------") # Generate keys public, private = rsa.generateKeys(pqsize) with open("key_rsa.pub", 'w') as outfile: json.dump(public, outfile) private = {"private": private} with open("key_rsa", "w") as outfile: json.dump(private, outfile) # Print message print("----------------------------") print("Generated keys!") print("Public key stored in key_rsa.pub") print("Private key stored in key_rsa.", "Keep this secret!") print("----------------------------\n") # Encrypt parse -e arguments elif (options.message_file != None): if (options.filenames): with open(options.filenames) as json_file: with open(options.message_file) as plaintext: # Load and read plaintext m = plaintext.read() public = json.load(json_file) # Encrypt cipher = rsa.encrypt(m, public) # print message print("----------------------------") print("Generated file cipher.json") print("----------------------------\n") else: # Print error message print("----------------------------") print( "Also requires you to specifiy a file containing public key using -f" ) print("----------------------------\n") # Decrypt elif (options.cipher_file != None): if (options.filenames): filename_pub = options.filenames if (args): filename_private = args[0] else: print("----------------------------") print( "ERROR please specify filename key private key.\n Eg. \n -f key_rsa.pub key_rsa" ) print("----------------------------\n") sys.exit(1) with open(filename_pub) as public: # Load keys and cipher public = json.load(public) with open(filename_private) as private: with open(options.cipher_file) as ciphertext: # Load cipher c = json.load(ciphertext) private = json.load(private) # Decrypt message = rsa.decrypt(c, private["private"], public) print( "Decrypted message and written to file decrypted.txt" ) # with open("decrypted.txt", "w") as decrypted: # decrypted.write(message) else: # Print error print("----------------------------") print("-----------------------\nERROR") print("Specifiy filenames for public and private keys") print("----------------------------\n") # No options given else: print("run: python3 rsa.py -h \n...to see use")